prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python
"""Calculate regionprops of segments.
"""
import sys
import argparse
# conda install cython
# conda install pytest
# conda install pandas
# pip install ~/workspace/scikit-image/ # scikit-image==0.16.dev0
import os
import re
import glob
import pickle
import numpy as np
import pandas as pd
from scipy import ndimage as ndi
from scipy.spatial import distance
from stapl3d import Image, LabelImage, wmeMPI
from stapl3d.channels import get_bias_field_block
from skimage.measure import regionprops, regionprops_table
from skimage.morphology import binary_dilation
from skimage.segmentation import find_boundaries
from stapl3d.segmentation.segment import extract_segments
def main(argv):
"""Calculate regionprops of segments."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--seg_paths',
nargs='*',
help='paths to label volumes (xyz)',
)
parser.add_argument(
'--seg_names',
nargs='*',
help='names for (sub)segmentations',
)
parser.add_argument(
'--data_path',
nargs='*',
help='paths to data channels',
)
parser.add_argument(
'--data_names',
nargs='*',
help='names for channels',
)
parser.add_argument(
'--aux_data_path',
help='path to auxilliary data file (zyxc)',
)
parser.add_argument(
'--downsample_factors',
nargs='*',
type=int,
default=[],
help='the downsample factors applied to the aux_data_path image'
)
parser.add_argument(
'--csv_path',
default='',
help='path to output csv file',
)
parser.add_argument(
'-s', '--blocksize',
required=True,
nargs='*',
type=int,
default=[],
help='size of the datablock'
)
parser.add_argument(
'-m', '--blockmargin',
nargs='*',
type=int,
default=[],
help='the datablock overlap used'
)
parser.add_argument(
'--blockrange',
nargs=2,
type=int,
default=[],
help='a range of blocks to process'
)
parser.add_argument(
'--channels',
nargs='*',
type=int,
default=[],
help='a list of channel indices to extract intensity features for'
)
parser.add_argument(
'-f', '--filter_borderlabels',
action='store_true',
help='save intermediate results'
)
parser.add_argument(
'--min_labelsize',
type=int,
default=0,
help='minimum labelsize in voxels',
)
parser.add_argument(
'--split_features',
action='store_true',
help='save intermediate results'
)
parser.add_argument(
'--fset_morph',
default=['label'],
help='morphology feature set',
)
parser.add_argument(
'--fset_intens',
default=['mean_intensity'],
help='intensity feature set',
)
parser.add_argument(
'--fset_addit',
default=['com_z', 'com_y', 'com_x'],
help='auxilliary feature set',
)
args = parser.parse_args()
export_regionprops(
args.seg_paths,
args.seg_names,
args.data_path,
args.data_names,
args.aux_data_path,
args.downsample_factors,
args.csv_path,
args.blocksize,
args.blockmargin,
args.blockrange,
args.channels,
args.filter_borderlabels,
args.min_labelsize,
args.split_features,
args.fset_morph,
args.fset_intens,
args.fset_addit,
)
def export_regionprops(
seg_paths,
seg_names=['full', 'memb', 'nucl'],
data_paths=[],
data_names=[],
aux_data_path=[],
downsample_factors=[1, 1, 1],
outputstem='',
blocksize=[],
blockmargin=[],
blockrange=[],
channels=[],
filter_borderlabels=False,
min_labelsize=0,
split_features=False,
fset_morph=['label'],
fset_intens=['mean_intensity'],
fset_addit=['com_z', 'com_y', 'com_x'],
):
# load the segments: ['full'] or ['full', 'memb', 'nucl']
label_ims = {}
pfs = seg_names[:len(seg_paths)]
for pf, seg_path in zip(pfs, seg_paths):
im = LabelImage(seg_path, permission='r')
im.load(load_data=False)
label_ims[pf] = im
comps = label_ims['full'].split_path()
# prepare parallel processing
mpi_label = wmeMPI(usempi=False)
blocksize = blocksize or label_ims['full'].dims
mpi_label.set_blocks(label_ims['full'], blocksize, blockmargin, blockrange)
mpi_label.scatter_series()
# load the data
data_ims = {}
mpi_data = wmeMPI(usempi=False)
for i, data_path in enumerate(data_paths):
pf = 'im{:02d}'.format(i)
data = Image(data_path, permission='r')
data.load(load_data=False)
ch_idx = data.axlab.index('c')
# FIXME channels for multiple data_paths
chs = channels or [ch for ch in range(data.dims[ch_idx])]
names = [data_names.pop(0) for _ in range(len(chs))]
data_ims[pf] = {'im': data, 'ch': chs, 'names': names}
""" TODO
try:
mpi_data.blocks = [
{'id': split_filename(comps['file'])[0]['postfix'],
'slices': dset_name2slices(comps['file'], axlab=data.axlab, shape=data.dims),
'path': '',},
]
except:
"""
mpi_data.set_blocks(data, blocksize, blockmargin, blockrange)
border_labelset = set([])
# if filter_borderlabels:
# outstem = outputstem or label_ims['full'].split_path()['base']
# outstem += '_dataset'
# border_labelset |= filter_borders(label_ims['full'], outstem)
dfs = []
for i in mpi_label.series:
print('processing block {:03d} with id: {}'.format(i, mpi_label.blocks[i]['id']))
dfs.append(process_block(
mpi_label.blocks[i],
mpi_data.blocks[i],
label_ims,
split_features,
data_ims,
min_labelsize,
channels,
filter_borderlabels,
fset_morph,
fset_intens,
fset_addit,
border_labelset,
outputstem,
aux_data_path,
downsample_factors,
)
)
return dfs
def process_block(
block_label,
block_data,
label_ims,
split_features,
data_ims,
min_labelsize,
channels,
filter_borderlabels=False,
fset_morph=['label'],
fset_intens=['mean_intensity'],
fset_addit=['com_z', 'com_y', 'com_x'],
border_labelset=set([]),
outputstem='',
aux_data_path='',
downsample_factors=[1, 1, 1],
):
morph, intens, add = get_feature_set(fset_morph, fset_intens, fset_addit)
all_regions = {}
for pf, label_im in label_ims.items():
label_im.slices = block_label['slices'][:3]
all_regions[pf] = label_im.slice_dataset().astype('int')
all_data = {}
for dpf, datadict in data_ims.items():
data = datadict['im']
data.slices = block_data['slices']
for ch, name in zip(datadict['ch'], datadict['names']):
data.slices[data.axlab.index('c')] = slice(ch, ch + 1, 1)
ch_data = data.slice_dataset()
all_data[name] = ch_data
outstem = outputstem or label_ims['full'].split_path()['base']
outstem += '_{}'.format(block_label['id'])
if filter_borderlabels:
border_labelset |= filter_borders(label_ims['full'], outstem)
if min_labelsize:
all_regions = filter_size(all_regions, min_labelsize, outstem)
for pf, regions in all_regions.items():
try:
rpt = regionprops_table(regions, properties=morph)
except IndexError:
print('IndexError on MORPH {}: empty labelset'.format(block_label['id']))
df = get_empty_dataframe(morph, add, intens, channels)
except ValueError:
print('ValueError on MORPH {}'.format(block_label['id']))
df = get_empty_dataframe(morph, add, intens, channels)
else:
df = pd.DataFrame(rpt)
origin = [block_data['slices'][i].start for i in [0, 1, 2]] # in full dataset voxels
df = add_features(df, aux_data_path, origin, downsample_factors)
for cpf, ch_data in all_data.items():
df_int = get_intensity_df_data(regions, intens, ch_data, cpf)
df = pd.concat([df, df_int], axis=1)
outstem = outputstem or label_im.split_path()['base']
outstem += '_{}'.format(block_label['id'])
csv_path = "{}_features_{}.csv".format(outstem, pf)
df.to_csv(csv_path)
# TODO: close images
return df
def filter_borders(label_im, outstem):
labelset = find_border_labels(label_im)
strpat = 'found {:12d} border labels in {}'
print(strpat.format(len(labelset), outstem))
write_labelset(labelset, outstem, pf='borderlabels')
return labelset
def filter_size(all_regions, min_labelsize, outstem=''):
pf = 'nucl' if 'nucl' in all_regions.keys() else 'full'
rp = regionprops(all_regions[pf])
small_labels = [prop.label for prop in rp if prop.area < min_labelsize]
strpat = 'found {:12d} small labels in {}'
print(strpat.format(len(small_labels), outstem))
write_labelset(set(small_labels), outstem, pf='smalllabels')
maxlabel = np.amax(all_regions['full'])
fw = np.zeros(maxlabel + 1, dtype='bool')
fw[small_labels] = True
sl_mask = np.array(fw)[all_regions['full']]
for pf in all_regions.keys():
all_regions[pf][sl_mask] = 0
return all_regions
def write_labelset(labelset, outstem, pf):
ppath = "{}_{}.pickle".format(outstem, pf)
with open(ppath, 'wb') as f:
pickle.dump(labelset, f)
def get_nuclearmask(block_label):
maskfile_compound = False # FIXME: as argument
if maskfile_compound:
maskpath = os.path.join(datadir, '{}_bfc_nucl-dapi_mask_sauvola.ims'.format(dataset))
mask_sauvola_im = MaskImage(maskpath_sauvola, permission='r')
mask_sauvola_im.load(load_data=False)
mask_sauvola_im.slices[:3] = block_label['slices'][:3]
mask_sauvola_im.slices[3] = slice(0, 1, None)
labelfile_blocks = False # FIXME: as argument
from stapl3d import MaskImage
#datadir = '/hpc/pmc_rios/Kidney/190910_rl57_fungi_16bit_25x_125um_corr-stitching'
datadir = 'G:\\mkleinnijenhuis\\PMCdata\Kidney\\190910_rl57_fungi_16bit_25x_125um_corr-stitching'
dataset = '190910_rl57_fungi_16bit_25x_125um_corr-stitching'
if labelfile_blocks:
block_label['id'] = '02496-03904_12736-14144_00000-00106'
#block_label['id'] = '03776-05184_12736-14144_00000-00106'
blockdir = os.path.join(datadir, 'blocks_1280')
maskpath_sauvola = os.path.join(blockdir, '{}_{}.h5/nucl/dapi_mask_sauvola'.format(dataset, block_label['id']))
mask_sauvola_im = MaskImage(maskpath_sauvola, permission='r')
mask_sauvola_im.load(load_data=False)
maskpath_absmin = os.path.join(blockdir, '{}_{}.h5/nucl/dapi_mask_absmin'.format(dataset, block_label['id']))
mask_absmin_im = MaskImage(maskpath_absmin, permission='r')
mask_absmin_im.load(load_data=False)
elif maskfile_compund:
maskpath_sauvola = os.path.join(datadir, '{}_bfc_nucl-dapi_mask_sauvola.ims'.format(dataset))
mask_sauvola_im = MaskImage(maskpath_sauvola, permission='r')
mask_sauvola_im.load(load_data=False)
mask_sauvola_im.slices[:3] = block_label['slices'][:3]
mask_sauvola_im.slices[3] = slice(0, 1, None)
maskpath_absmin = os.path.join(datadir, '{}_bfc_nucl-dapi_mask_absmin.ims'.format(dataset))
mask_absmin_im = MaskImage(maskpath_absmin, permission='r')
mask_absmin_im.load(load_data=False)
mask_absmin_im.slices[:3] = block_label['slices'][:3]
mask_absmin_im.slices[3] = slice(0, 1, None)
mask_sauvola = mask_sauvola_im.slice_dataset().astype('bool')
mask_absmin = mask_absmin_im.slice_dataset().astype('bool')
mask = mask_absmin & mask_sauvola
return mask
def add_features(df, image_in='', origin=[0, 0, 0], dsfacs=[1, 16, 16]):
if 'centroid-0' in df.columns:
cens = ['centroid-{}'.format(i) for i in [0, 1, 2]]
coms = ['com_{}'.format(d) for d in 'zyx']
df[coms] = df[cens] + origin
if image_in:
dt_im = Image(image_in, permission='r')
dt_im.load(load_data=False)
data = dt_im.slice_dataset()
dt_im.close()
ds_centroid = np.array(df[coms] / dsfacs, dtype='int')
ds_centroid = [data[p[0], p[1], p[2]] for p in ds_centroid]
df['dist_to_edge'] = np.array(ds_centroid)
if 'inertia_tensor_eigvals-0' in df.columns:
ites = ['inertia_tensor_eigvals-{}'.format(i) for i in [0, 1, 2]]
eigvals = np.clip(np.array(df[ites]), 0, np.inf)
df['fractional_anisotropy'] = fractional_anisotropy(eigvals)
df['major_axis_length'] = get_ellips_axis_lengths(eigvals[:, 0])
df['minor_axis_length'] = get_ellips_axis_lengths(eigvals[:, -1])
# TODO: range, variance, ...
return df
def get_intensity_df_data(regions, rp_props_int, ch_data, cpf):
try:
rpt = regionprops_table(regions, ch_data, properties=rp_props_int)
except ValueError:
print('got ValueError on INT {}'.format(cpf))
cols = ['{}_{}'.format(cpf, col)
for col in get_column_names(rp_props_int)]
df_int = pd.DataFrame(columns=cols)
else:
df_int = pd.DataFrame(rpt)
df_int.columns = ['{}_{}'.format(cpf, col)
for col in get_column_names(rp_props_int)]
return df_int
def get_intensity_df(regions, rp_props_int, data, ch, bf=None):
data.slices[data.axlab.index('c')] = slice(ch, ch + 1, 1)
ch_data = data.slice_dataset()
if bf is not None:
bias = get_bias_field_block(bf, data.slices, ch_data.shape)
bias = np.reshape(bias, ch_data.shape)
ch_data /= bias
ch_data = np.nan_to_num(ch_data, copy=False)
try:
rpt = regionprops_table(regions, ch_data, properties=rp_props_int)
except ValueError:
print('got ValueError on INT {}'.format(ch))
cols = ['ch{:02d}_{}'.format(ch, col)
for col in get_column_names(rp_props_int)]
df_int = pd.DataFrame(columns=cols)
else:
df_int = pd.DataFrame(rpt)
df_int.columns = ['ch{:02d}_{}'.format(ch, col)
for col in get_column_names(rp_props_int)]
return df_int
def split_filename(filename, blockoffset=[0, 0, 0]):
"""Extract the data indices from the filename."""
datadir, tail = os.path.split(filename)
fname = os.path.splitext(tail)[0]
parts = re.findall('([0-9]{5}-[0-9]{5})', fname)
id_string = '_'.join(parts)
dset_name = fname.split(id_string)[0][:-1]
x = int(parts[-3].split("-")[0]) - blockoffset[0]
X = int(parts[-3].split("-")[1]) - blockoffset[0]
y = int(parts[-2].split("-")[0]) - blockoffset[1]
Y = int(parts[-2].split("-")[1]) - blockoffset[1]
z = int(parts[-1].split("-")[0]) - blockoffset[2]
Z = int(parts[-1].split("-")[1]) - blockoffset[2]
dset_info = {'datadir': datadir, 'base': dset_name,
'nzfills': len(parts[1].split("-")[0]),
'postfix': id_string,
'x': x, 'X': X, 'y': y, 'Y': Y, 'z': z, 'Z': Z}
return dset_info, x, X, y, Y, z, Z
def dset_name2slices(dset_name, blockoffset=[0, 0, 0], axlab='xyz', shape=[]):
"""Get slices from data indices in a filename."""
_, x, X, y, Y, z, Z = split_filename(dset_name, blockoffset)
slicedict = {'x': slice(x, X, 1),
'y': slice(y, Y, 1),
'z': slice(z, Z, 1)}
for dim in ['c', 't']:
if dim in axlab:
upper = shape[axlab.index(dim)]
slicedict[dim] = slice(0, upper, 1)
slices = [slicedict[dim] for dim in axlab]
return slices
def get_column_names(rp_props):
cols = []
for i, it in enumerate(rp_props):
if 'centroid' in it or 'eigvals' in it:
cols += ['{}-{}'.format(it, dim)
for dim in [0, 1, 2]]
elif 'moments' in it:
# FIXME: need only half of the diagonal matrix
cols += ['{}-{}-{}-{}'.format(it, dim1, dim2, dim3)
for dim1 in [0, 1, 2, 3]
for dim2 in [0, 1, 2, 3]
for dim3 in [0, 1, 2, 3]]
else:
cols += [it]
return cols
def get_empty_dataframe(rp_props_morph, rp_props_add, rp_props_int, channels):
cols_morph = get_column_names(rp_props_morph)
cols_int = get_column_names(rp_props_int)
cols = ['ch{:02d}_{}'.format(ch, col) for ch in channels for col in cols_int]
df = pd.DataFrame(columns=cols_morph + rp_props_add + cols)
return df
def get_feature_set(fset_morph='minimal', fset_intens='minimal', aux_data_path=''):
# TODO: self-defined features
"""
# eccentricity not implemented
# orientation not implemented
# perimeter not implemented
# moments_hu not implemented
# weighted_moments_hu not implemented
# max_intensity needs aux data volume
# mean_intensity needs aux data volume
# min_intensity needs aux data volume
# weighted_centroid needs aux data volume
# weighted_moments needs aux data volume
# weighted_moments_central needs aux data volume
# weighted_moments_normalized needs aux data volume
"""
# morphological features
# TODO: file bug report on minor/major_axis_length
# gives this ValueError:
# https://github.com/scikit-image/scikit-image/issues/2625
fsets_morph ={
'none': (
'label',
),
'minimal': (
'label',
'area',
'centroid'
),
'medium': (
'label',
'area',
'centroid',
'bbox',
'equivalent_diameter',
'extent',
'euler_number',
'inertia_tensor_eigvals',
),
'maximal': (
'label',
'area',
'bbox',
'centroid',
'equivalent_diameter',
'extent',
'euler_number',
# 'convex_area',
# 'solidity',
'moments',
'moments_central',
'moments_normalized',
# 'orientation',
'inertia_tensor_eigvals',
# 'major_axis_length',
# 'minor_axis_length',
),
}
# intensity features
fsets_intens ={
'none': (),
'minimal': (
'mean_intensity',
),
'medium': (
'mean_intensity',
'weighted_centroid',
),
'maximal': (
'min_intensity',
'mean_intensity',
'median_intensity',
'variance_intensity',
'max_intensity',
'weighted_centroid',
# FIXME: OverflowError: Python int too large to convert to C long
# 'weighted_moments',
# 'weighted_moments_central',
# 'weighted_moments_normalized',
),
}
# FIXME: convex hull morph features often fail
# FIXME: intensity weighted fail
try:
morph = fsets_morph[fset_morph]
except (KeyError, TypeError):
morph = fset_morph
try:
intens = fsets_intens[fset_intens]
except (KeyError, TypeError):
intens = fset_intens
try:
addit = get_additional_columns(aux_data_path, fset_morph)
except (KeyError, TypeError):
addit = fset_addit
return morph, intens, addit
def get_additional_columns(aux_data_path='', fset_morph='minimal'):
cols_add = ['com_z', 'com_y', 'com_x']
if aux_data_path:
cols_add += ['dist_to_edge']
if fset_morph == 'maximal':
cols_add += ['fractional_anisotropy',
'major_axis_length', 'minor_axis_length']
return cols_add
def find_border_labels(labels):
border_labelset = set([])
fullslices = [slc for slc in labels.slices]
for dim in [0, 1, 2]:
for s in [0, -1]:
if s:
fullstop = fullslices[dim].stop
labels.slices[dim] = slice(fullstop - 1, fullstop, None)
else:
fullstart = fullslices[dim].start
labels.slices[dim] = slice(fullstart, fullstart + 1, None)
labeldata = labels.slice_dataset()
border_labelset |= set(np.unique(labeldata))
labels.slices = [slc for slc in fullslices]
border_labelset -= set([0])
return border_labelset
def split_nucl_and_memb_data(labeldata, nuclearmask=None):
labeldata_memb = np.copy(labeldata)
labeldata_nucl = np.copy(labeldata)
memb_mask = find_boundaries(labeldata)
for i, slc in enumerate(memb_mask):
memb_mask[i, :, :] = binary_dilation(slc)
labeldata_memb[~memb_mask] = 0
if nuclearmask is None:
nuclearmask = ~memb_mask
labeldata_nucl[~nuclearmask] = 0
# print('mask_nucl0_sum', np.sum(~memb_mask))
# print('mask_nucl1_sum', np.sum(nuclearmask))
# print('mask_memb_sum', np.sum(memb_mask))
# print('label_full_sum', np.sum(labeldata.astype('bool')))
# print('label_memb_sum', np.sum(labeldata_memb.astype('bool')))
# print('label_nucl_sum', np.sum(labeldata_nucl.astype('bool')))
return labeldata_memb, labeldata_nucl
def split_nucl_and_memb(labels, outpat, nuclearmask=None):
labeldata = labels.slice_dataset()
labeldata, labeldata_nucl = split_nucl_and_memb_data(labeldata, nuclearmask)
pf = '_memb'
outpath = outpat.format(pf)
im_memb = LabelImage(outpath, **props)
im_memb.create()
im_memb.write(labeldata)
pf = '_nucl'
outpath = outpat.format(pf)
im_nucl = LabelImage(outpath, **props)
im_nucl.create()
im_nucl.write(labeldata_nucl)
return im_memb, im_nucl
def small_label_mask(labeldata, maxlabel, min_labelsize=5):
"""
NOTE:
- need to take out very small labels (<5) for convex hull
- even without these labels, convex hull often fails
- removed features dependent on convex hull from the feature sets for now
"""
rp = regionprops(labeldata)
small_labels = [prop.label for prop in rp if prop.area < min_labelsize]
fw = np.zeros(maxlabel + 1, dtype='bool')
fw[small_labels] = True
smalllabelmask = np.array(fw)[labeldata]
return smalllabelmask
def label_selection_mask(labeldata, filestem):
import scanpy as sc
filename = '{}_adata.h5ad'.format(filestem)
adata = sc.read(filename)
labelset = set(adata.obs['label'].astype('uint32'))
ulabels = np.load('{}_ulabels.npy'.format(filestem))
ulabels = np.delete(ulabels, 0)
maxlabel = np.amax(ulabels)
fw = np.zeros(maxlabel + 1, dtype='bool')
for label in labelset:
fw[label] = True
mask = np.array(fw)[labeldata]
return mask
def select_features(dfs, feat_select, min_size=0, split_features=False):
df1 = dfs['full'][feat_select['morphs']]
key = 'memb' if split_features else 'full'
df2 = pd.DataFrame(index=df1.index)
df2[feat_select['membrane']] = dfs[key][feat_select['membrane']]
key = 'nucl' if split_features else 'full'
df3 = | pd.DataFrame(index=df1.index) | pandas.DataFrame |
import os
import random
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
import torch
from sklearn.metrics import pairwise_distances
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset
import matplotlib.pyplot as plt
from scripts.ssc.evaluation.mldl_copied import CompPerformMetrics
from src.datasets.datasets import SwissRoll, SwissRoll_manifold
from src.evaluation.eval import Multi_Evaluation
from src.models.COREL.eval_engine import get_latentspace_representation
from src.models.WitnessComplexAE.wc_ae import WitnessComplexAutoencoder
from src.models.autoencoder.autoencoders import Autoencoder_MLP_topoae
def update_dict(dict, ks, metric, result):
for i, k in enumerate(ks):
dict.update({metric+'_k{}'.format(k): result[metric][i]})
return dict
def plot_dist_comparison(Z_manifold, Z_latent, labels, path_to_save = None,name = None):
print('normalize x,y')
Z_manifold[:, 0] = (Z_manifold[:,0]-Z_manifold[:,0].min())/(Z_manifold[:,0].max()-Z_manifold[:,0].min())
Z_manifold[:, 1] = (Z_manifold[:,1]-Z_manifold[:,1].min())/(Z_manifold[:,1].max()-Z_manifold[:,1].min())
Z_latent[:, 0] = (Z_latent[:,0]-Z_latent[:,0].min())/(Z_latent[:,0].max()-Z_latent[:,0].min())
Z_latent[:, 1] = (Z_latent[:,1]-Z_latent[:,1].min())/(Z_latent[:,1].max()-Z_latent[:,1].min())
manifold = pd.DataFrame({'x': Z_manifold[:, 0], 'y': Z_manifold[:, 1],'label': labels})
latents = pd.DataFrame({'x': Z_latent[:, 0], 'y': Z_latent[:, 1],'label': labels})
print('compute distances')
pwd_Z = pairwise_distances(Z_eval, Z_eval, n_jobs=2)
pwd_Ztrue = pairwise_distances(data_manifold, data_manifold, n_jobs=2)
print('normalize distances')
#normalize distances
pwd_Ztrue = (pwd_Ztrue-pwd_Ztrue.min())/(pwd_Ztrue.max()-pwd_Ztrue.min())
pwd_Z = (pwd_Z-pwd_Z.min())/(pwd_Z.max()-pwd_Z.min())
print('flatten')
#flatten
pwd_Ztrue = pwd_Ztrue.flatten()
pwd_Z = pwd_Z.flatten()
ind = random.sample(range(len(pwd_Z)), 2**12)
distances = pd.DataFrame({'Distances on $\mathcal{M}$': pwd_Ztrue[ind], 'Distances in $\mathcal{Z}$': pwd_Z[ind]})
print('plot')
#plot
fig, ax = plt.subplots(1,3, figsize=(3*10, 10))
sns.scatterplot(x = 'Distances on $\mathcal{M}$', y = 'Distances in $\mathcal{Z}$',data = distances, ax = ax[1], edgecolor = None,alpha=0.3)
#ax[0].set(xlabel='Distances on $\mathcal{M}$', ylabel='Distances in $\mathcal{Z}$',fontsize=25)
ax[1].xaxis.label.set_size(20)
ax[1].yaxis.label.set_size(20)
ax[1].set_title('Comparison of pairwise distances',fontsize=24,pad=20)
sns.scatterplot(y = 'x', x = 'y', hue='label', data = manifold,ax = ax[0],palette=plt.cm.viridis, marker=".", s=80,
edgecolor="none", legend=False)
ax[0].set_title('True manifold ($\mathcal{M}$)',fontsize=24,pad=20)
ax[0].set(xlabel="", ylabel="")
ax[0].set_yticks([])
sns.scatterplot(x = 'x', y = 'y',hue='label', data = latents,ax = ax[2],palette=plt.cm.viridis, marker=".", s=80,
edgecolor="none", legend=False)
ax[2].set_title('Latent space ($\mathcal{Z}$)',fontsize=24,pad=20)
ax[2].set(xlabel="", ylabel="")
ax[2].set_yticks([])
fig.tight_layout(pad=5)
if path_to_save != None and name != None:
print('save plot')
fig.savefig(os.path.join(path_to_save,'{}_4.pdf'.format(name)),dpi = 100)
plt.show()
plt.close()
return (np.square(pwd_Ztrue - pwd_Z)).mean()
def plot_dist_comparison2(Z_manifold, Z_latent, labels, path_to_save = None,name = None):
print('normalize x,y')
Z_manifold[:, 0] = (Z_manifold[:,0]-Z_manifold[:,0].min())/(Z_manifold[:,0].max()-Z_manifold[:,0].min())
Z_manifold[:, 1] = (Z_manifold[:,1]-Z_manifold[:,1].min())/(Z_manifold[:,1].max()-Z_manifold[:,1].min())
Z_latent[:, 0] = (Z_latent[:,0]-Z_latent[:,0].min())/(Z_latent[:,0].max()-Z_latent[:,0].min())
Z_latent[:, 1] = (Z_latent[:,1]-Z_latent[:,1].min())/(Z_latent[:,1].max()-Z_latent[:,1].min())
manifold = pd.DataFrame({'x': Z_manifold[:, 0], 'y': Z_manifold[:, 1],'label': labels})
latents = pd.DataFrame({'x': Z_latent[:, 0], 'y': Z_latent[:, 1],'label': labels})
print('compute distances')
pwd_Z = pairwise_distances(Z_eval, Z_eval, n_jobs=2)
pwd_Ztrue = pairwise_distances(data_manifold, data_manifold, n_jobs=2)
print('normalize distances')
#normalize distances
pwd_Ztrue = (pwd_Ztrue-pwd_Ztrue.min())/(pwd_Ztrue.max()-pwd_Ztrue.min())
pwd_Z = (pwd_Z-pwd_Z.min())/(pwd_Z.max()-pwd_Z.min())
print('flatten')
#flatten
pwd_Ztrue = pwd_Ztrue.flatten()
pwd_Z = pwd_Z.flatten()
ind = random.sample(range(len(pwd_Z)), 2**12)
distances = | pd.DataFrame({'Distances on $\mathcal{M}$': pwd_Ztrue[ind], 'Distances in $\mathcal{Z}$': pwd_Z[ind]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Monday 18 may 2020
All the thesis code, no code excecution!
@author: Dainean
"""
#Prepare the python system
import pandas as pd #Dataframes
import numpy as np #Numpy
# Reading and saving fits files
import os #Move around in our OS
from astropy.table import Table
from astropy.io import fits #Working with fits
#Isolation Foreststuffs
import eif as iso #Expanded Isolation Forest
#Clustering
from scipy.sparse import diags # Laplacian scoring
from skfeature.utility.construct_W import construct_W # Laplacian scoring
from sklearn.cluster import KMeans #Kmeans clustering
from sklearn.preprocessing import StandardScaler
# For PFA
from sklearn.decomposition import PCA
from collections import defaultdict
from sklearn.metrics.pairwise import euclidean_distances
# Plotting
import matplotlib.pyplot as plt
import seaborn as sns #improved plots
#Working directory control
cwd = os.getcwd()
#Selecting dataset
#change dataset here, Alpha, prichi or beta
#dataset = "Alpha" #Initial max row dataset
#dataset = "prichi" #prichi < 3 filtered dataset, 24999 rows. OBSELETE
#dataset = "beta" #prichi < 2 filtered dataset, 13787 rows
#dataset = "gamma" #prichi < 2 filtered dataset, (removed photometric)) OBSELETE
#dataset = "delta" #updated DB creator, based on GaussFitSimple, 28128 rows
#dataset = "epsilon" #trimmed down version of delta, prichi <2, 10941 rows (for easier computation)
#dataset = "zeta" # Full Photometric, GaussFitSimple, prichi <2, 10941 rows × 134 columns
#dataset = "zeta" # Full Photometric, GaussFitSimple, prichi <2, 10941 rows × 134 columns
dataset = "eta" # Full Photometric, GaussFitSimple, all columns
detect_path = True #this is for easier working in spyder
#Set up directory path, load initial dataframes
if detect_path == True:
print("Initial working directory is:", cwd)
if '31618' in cwd:
print("Working at Dora")
location = "dora"
if 'Dainean' in cwd:
print("Working at home, changing to onedrive folder")
location = "home"
if 'Onedrive' in cwd:
print("Working in onedrive folder")
location = "home"
if 'Dropbox' in cwd:
print("Working at home, changing to onedrive folder")
location = "home"
if location == "home":
os.chdir('D:\Onedrive\Thesis\support\%s'%(dataset))
print(os.getcwd())
if location == "dora":
os.chdir('C:\Sander\support\%s'%(dataset))
print(os.getcwd())
#Loading dataframes Only part for now
phot = pd.read_hdf('Parts_DB.h5', 'Photometric')
col = pd.read_hdf('Parts_DB.h5', 'Colour')
spec = | pd.read_hdf('Parts_DB.h5', 'Spectral') | pandas.read_hdf |
from __future__ import print_function
from collections import defaultdict
import pandas as pd
import re
import click
codons = {'AAA': 'Lys',
'AAC': 'Asn',
'AAG': 'Lys',
'AAU': 'Asn',
'ACA': 'Thr',
'ACC': 'Thr',
'ACG': 'Thr',
'ACU': 'Thr',
'AGA': 'Arg',
'AGC': 'Ser',
'AGG': 'Arg',
'AGU': 'Ser',
'AUA': 'Ile',
'AUC': 'Ile',
'AUG': 'Met',
'AUU': 'Ile',
'CAA': 'Gln',
'CAC': 'His',
'CAG': 'Gln',
'CAU': 'His',
'CCA': 'Pro',
'CCC': 'Pro',
'CCG': 'Pro',
'CCU': 'Pro',
'CGA': 'Arg',
'CGC': 'Arg',
'CGG': 'Arg',
'CGU': 'Arg',
'CUA': 'Leu',
'CUC': 'Leu',
'CUG': 'Leu',
'CUU': 'Leu',
'GAA': 'Glu',
'GAC': 'Asp',
'GAG': 'Glu',
'GAU': 'Asp',
'GCA': 'Ala',
'GCC': 'Ala',
'GCG': 'Ala',
'GCU': 'Ala',
'GGA': 'Gly',
'GGC': 'Gly',
'GGG': 'Gly',
'GGU': 'Gly',
'GUA': 'Val',
'GUC': 'Val',
'GUG': 'Val',
'GUU': 'Val',
'UAA': 'TER',
'UAC': 'Tyr',
'UAG': 'TER',
'UAU': 'Tyr',
'UCA': 'Ser',
'UCC': 'Ser',
'UCG': 'Ser',
'UCU': 'Ser',
'UGA': 'TER',
'UGC': 'Cys',
'UGG': 'Trp',
'UGU': 'Cys',
'UUA': 'Leu',
'UUC': 'Phe',
'UUG': 'Leu',
'UUU': 'Phe'}
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.argument('codonw-table')
@click.option('--output-table', default=None)
def codonw_to_table(codonw_table, output_table=None):
if not output_table:
output_table = "./parsed_codonw.csv"
big_dict = defaultdict(lambda:[])
with open(codonw_table) as ih:
d = []
for l in ih:
if "codons in" in l:
orf = "_".join(l.split()[3].split("_")[0:2])
big_dict[orf] += d
d = []
else:
vec = l.strip().split()
for i, v in enumerate(vec):
if re.search('[A|T|C|G|U]{3}', v):
d.append({'codon': v,'AA':codons[v],'val1':vec[i + 1],'val2':vec[i+2]})
dflist = []
for i in big_dict.keys():
df = | pd.DataFrame(big_dict[i]) | pandas.DataFrame |
# column deletion using del operator and pop method of pandas dataframe
import pandas as pd
import numpy as np
d={'one':pd.Series([1,2,3],index=['a','b','c']),
'two':pd.Series([1,2,3,4],index=['a','b','c','d']),
'three': | pd.Series([10,20,30],index=['a','b','c']) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # EXPANDING WINDOW SPLIT
# ### LOAD LIBRARIES
# In[ ]:
import os
import gc
import warnings
import pandas as pd
import numpy as np
import pickle
warnings.filterwarnings("ignore")
| pd.set_option("display.max_columns", 500) | pandas.set_option |
import os
import sys
import time
import shutil
import random
import numpy as np
import pandas as pd
import geopandas as gpd
from map2loop.topology import Topology
from map2loop import m2l_utils
from map2loop import m2l_geometry
from map2loop import m2l_interpolation
from map2loop import m2l_map_checker
from map2loop.m2l_utils import display, enable_quiet_mode, disable_quiet_mode, print
from map2loop.m2l_export import export_to_projectfile
import map2model
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import rasterio
import shapely
class Config(object):
"""Object that represents a sub-project. It is defined by some source data,
a region of interest (bounding box or polygon) and some execution flags.
"""
def __init__(self,
project_path,
overwrite,
geology_file,
fault_file,
fold_file,
structure_file,
mindep_file,
bbox_3d,
polygon,
step_out,
dtm_crs,
proj_crs,
local,
quiet,
loopFilename,
c_l={},
**kwargs):
self.project_path = project_path
if overwrite is False:
print(
"WARNING: Overwrite should be a string value {true, in-place} ...")
self.check_overwrite()
if overwrite is True:
print(
"WARNING: Overwrite should be a string value {true, in-place} ... converting to true.")
overwrite = 'true'
if (not os.path.exists(project_path)):
# Create proj root dir if doesn't exist
os.mkdir(project_path)
elif overwrite == "in-place":
# Pass if proj root exists and complete overwrite not wanted
pass
else:
# Remove if exists and accept user's direction
if overwrite == "true":
shutil.rmtree(project_path)
while os.path.exists(project_path):
pass
os.mkdir(project_path)
else:
self.check_overwrite()
self.graph_path = os.path.join(self.project_path, 'graph')
self.tmp_path = os.path.join(self.project_path, 'tmp')
self.data_path = os.path.join(self.project_path, 'data')
self.dtm_path = os.path.join(self.project_path, 'dtm')
self.output_path = os.path.join(self.project_path, 'output')
self.vtk_path = os.path.join(self.project_path, 'vtk')
self.fault_file_csv = os.path.join(self.tmp_path, "faults.csv")
self.fault_output_file_csv = os.path.join(self.output_path,
"faults.csv")
self.structure_file_csv = os.path.join(self.tmp_path, "structure.csv")
self.geology_file_csv = os.path.join(self.tmp_path, "geology.csv")
self.mindep_file_csv = os.path.join(self.tmp_path, "mindep.csv")
self.strat_graph_file = os.path.join(self.graph_path,
"graph_strat_NONE.gml")
self.dtm_file = os.path.join(self.dtm_path, 'dtm.tif')
self.dtm_reproj_file = os.path.join(self.dtm_path, 'dtm_rp.tif')
if (not os.path.isdir(self.tmp_path)):
os.mkdir(self.tmp_path)
if (not os.path.isdir(self.data_path)):
os.mkdir(self.data_path)
if (not os.path.isdir(self.output_path)):
os.mkdir(self.output_path)
if (not os.path.isdir(self.dtm_path)):
os.mkdir(self.dtm_path)
if (not os.path.isdir(self.vtk_path)):
os.mkdir(self.vtk_path)
if (not os.path.isdir(self.graph_path)):
os.mkdir(self.graph_path)
self.quiet = quiet
if self.quiet == 'all':
enable_quiet_mode()
self.clut_path = kwargs['clut_path']
self.run_flags = kwargs['run_flags']
self.bbox_3d = bbox_3d
self.bbox = tuple([
bbox_3d["minx"], bbox_3d["miny"], bbox_3d["maxx"], bbox_3d["maxy"]
])
self.polygon = polygon
self.step_out = step_out
self.quiet = quiet
self.c_l = c_l
self.dtm_crs = dtm_crs
self.proj_crs = proj_crs
self.loop_projectfile = loopFilename
# Check input maps for missing values
drift_prefix = kwargs.get('drift_prefix', ['None'])
self.local = local
# - Check if fold file is always the same as fault or needs to be seperated
# TODO: Allow for input as a polygon, not just a bounding box.
structure_file, geology_file, fault_file, mindep_file, fold_file, c_l = m2l_map_checker.check_map(
structure_file, geology_file, fault_file, mindep_file, fold_file,
self.tmp_path, self.bbox, c_l, proj_crs, self.local, drift_prefix)
# Process and store workflow params
self.geology_file = geology_file
self.structure_file = structure_file
self.fault_file = fault_file
self.fold_file = fold_file
self.mindep_file = mindep_file
disable_quiet_mode()
def check_overwrite(self):
allow = input(
"Directory \"{}\" exists, overwrite? (y/[n])".format(
self.project_path))
if allow == "y":
shutil.rmtree(self.project_path)
while os.path.exists(self.project_path):
pass
os.mkdir(self.project_path)
else:
sys.exit(
'Either set overwrite to true or specify a different output_path.')
def preprocess(self):
"""[summary]
:param command: [description], defaults to ""
:type command: str, optional
"""
if self.quiet == 'all':
enable_quiet_mode()
geology = gpd.read_file(self.geology_file, bbox=self.bbox)
geology[self.c_l['g']].fillna(geology[self.c_l['g2']], inplace=True)
geology[self.c_l['g']].fillna(geology[self.c_l['c']], inplace=True)
faults = gpd.read_file(self.fault_file, bbox=self.bbox)
folds = gpd.read_file(self.fold_file, bbox=self.bbox)
structures = gpd.read_file(self.structure_file, bbox=self.bbox)
mindeps = None
try:
mindeps = gpd.read_file(self.mindep_file, bbox=self.bbox)
mindeps.crs = self.proj_crs
except Exception as e:
print("Warning: Valid mineral deposit file missing")
# Fix crs to project default and overwrite source
geology.crs = self.proj_crs
faults.crs = self.proj_crs
folds.crs = self.proj_crs
structures.crs = self.proj_crs
self.mindeps = mindeps
self.geology = geology
self.faults = faults
self.structures = structures
# Faults
self.faults_clip = faults.copy()
self.faults_clip.crs = self.proj_crs
self.faults_clip_file = os.path.join(self.tmp_path, "faults_clip.shp")
self.faults_clip.to_file(self.faults_clip_file)
# Geology
self.geol_clip = m2l_utils.explode(self.geology)
self.geol_clip.crs = self.proj_crs
self.geol_clip_file = os.path.join(self.tmp_path, "geol_clip.shp")
self.geol_clip.to_file(self.geol_clip_file)
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# Check if bedding data uses the strike convention instead of dip direction
if (self.c_l['otype'] == 'strike'):
structures['azimuth2'] = structures.apply(
lambda row: row[self.c_l['dd']] + 90.0, axis=1)
self.c_l['dd'] = 'azimuth2'
self.c_l['otype'] = 'dip direction'
structures.to_file(self.structure_file)
# Structures
list1 = [
'geometry', self.c_l['d'], self.c_l['dd'], self.c_l['sf'],
self.c_l['bo']
]
list2 = list(set(list1))
sub_pts = self.structures[list2]
structure_code = gpd.sjoin(sub_pts,
self.geol_clip,
how="left",
op="within")
minx, miny, maxx, maxy = self.bbox
y_point_list = [miny, miny, maxy, maxy, miny]
x_point_list = [minx, maxx, maxx, minx, minx]
bbox_geom = shapely.geometry.Polygon(zip(x_point_list, y_point_list))
polygo = gpd.GeoDataFrame(index=[0],
crs=self.proj_crs,
geometry=[bbox_geom])
is_bed = structure_code[self.c_l['sf']].str.contains(
self.c_l['bedding'], regex=False)
structure_clip = structure_code[is_bed]
structure_clip.crs = self.proj_crs
if (self.c_l['otype'] == 'strike'):
structure_clip['azimuth2'] = structure_clip.apply(
lambda row: row[self.c_l['dd']] + 90.0, axis=1)
self.c_l['dd'] = 'azimuth2'
self.c_l['otype'] = 'dip direction'
self.structure_clip = structure_clip[~structure_clip[self.c_l['o']].
isnull()]
self.structure_clip_file = os.path.join(self.tmp_path,
'structure_clip.shp')
self.structure_clip.to_file(self.structure_clip_file)
self.create_cmap()
try:
fig, ax = plt.subplots()
plt.tight_layout()
ax.ticklabel_format(axis='both', useOffset=False, style='plain')
ax.margins(0.0)
fig.set_facecolor("#ffffff00")
self.geology_figure = geology.copy().plot(
column=self.c_l['c'],
ax=ax,
figsize=(10, 10),
edgecolor='#000000',
linewidth=0.2,
cmap=self.cmap).get_figure()
# self.export_png()
fig, ax = plt.subplots()
base = geology.plot(column=self.c_l['c'],
figsize=(10, 10),
ax=ax,
edgecolor='#000000',
linewidth=0.2,
legend=True,
cmap=self.cmap)
leg = base.get_legend()
leg.set_bbox_to_anchor((1.04, 1))
structures.plot(ax=base, color='none', edgecolor='black')
faults.plot(ax=base,
cmap='rainbow',
column=self.c_l['f'],
figsize=(10, 10),
linewidth=0.4)
structures[[
'geometry', self.c_l['gi'], self.c_l['d'], self.c_l['dd']
]].plot(ax=base)
fig = self.polygon.plot(ax=base, color='none',
edgecolor='black').get_figure()
fig.savefig(os.path.join(self.tmp_path, "input-data.png"))
if self.quiet == 'None':
plt.show()
return
except Exception as e:
print(e)
disable_quiet_mode()
def create_cmap(self):
# Make colours consistent from map to model
formations = sorted([
formation.replace(" ", "_").replace('-', '_') for formation in
list(set(self.geol_clip[self.c_l['c']].to_numpy()))
])
temp_colours = [""] * len(formations)
self.colour_dict = dict(zip(formations, temp_colours))
try:
# Try to retrieve the clut reference
colour_ref = pd.read_csv(self.clut_path)
for formation in formations:
key = formation
colour = None
try:
colour = colour_ref[colour_ref['UNITNAME'] ==
key]['colour'].to_numpy()[0]
except Exception as e:
colour = ('#%02X%02X%02X' %
(random.randint(0, 255), random.randint(
0, 255), random.randint(0, 255)))
self.colour_dict[key] = colour
print(key, colour)
except Exception as e:
# Otherwise, just append a random set
self.clut_path = ""
random_colours = [
'#%02X%02X%02X' % (random.randint(
0, 255), random.randint(0, 255), random.randint(0, 255))
for i in range(len(formations))
]
i = 0
for key in self.colour_dict.keys():
self.colour_dict[key] = random_colours[i]
self.cmap = colors.ListedColormap(self.colour_dict.values(),
name='geol_key')
def export_csv(self):
# TODO: - Move away from tab seperators entirely (topology and map2model)
# Save geology polygons
hint_flag = False # use GSWA strat database to provide topology hints
sub_geol = self.geology[[
'geometry', self.c_l['o'], self.c_l['c'], self.c_l['g'],
self.c_l['u'], self.c_l['min'], self.c_l['max'], self.c_l['ds'],
self.c_l['r1'], self.c_l['r2']
]]
Topology.save_geol_wkt(sub_geol, self.geology_file_csv, self.c_l,
hint_flag)
# Save mineral deposits
if self.mindeps is not None:
sub_mindep = self.mindeps[[
'geometry', self.c_l['msc'], self.c_l['msn'], self.c_l['mst'],
self.c_l['mtc'], self.c_l['mscm'], self.c_l['mcom']
]]
Topology.save_mindep_wkt(sub_mindep, self.mindep_file_csv,
self.c_l)
# Save orientation data
sub_pts = self.structures[[
'geometry', self.c_l['gi'], self.c_l['d'], self.c_l['dd']
]]
Topology.save_structure_wkt(sub_pts, self.structure_file_csv, self.c_l)
# Save faults
sub_lines = self.faults[['geometry', self.c_l['o'], self.c_l['f']]]
Topology.save_faults_wkt(sub_lines, self.fault_file_csv, self.c_l)
def update_parfile(self):
Topology.save_parfile(self, self.c_l, self.output_path,
self.geology_file_csv, self.fault_file_csv,
self.structure_file_csv, self.mindep_file_csv,
self.bbox[0], self.bbox[1], self.bbox[2],
self.bbox[3], 500.0, 'Fe,Cu,Au,NONE')
def run_map2model(self, deposits, aus):
quiet_m2m = False
if self.quiet == 'all':
quiet_m2m = True
if self.mindeps is not None:
run_log = map2model.run(self.graph_path, self.geology_file_csv,
self.fault_file_csv, self.mindep_file_csv,
self.bbox_3d, self.c_l, quiet_m2m,
deposits)
else:
run_log = map2model.run(self.graph_path, self.geology_file_csv,
self.fault_file_csv, "", self.bbox_3d,
self.c_l, quiet_m2m, deposits)
print(run_log)
print("Resolving ambiguities using ASUD...", end='\toutput_dir:')
if aus:
Topology.use_asud(self.strat_graph_file, self.graph_path)
self.strat_graph_file = os.path.join(self.graph_path,
'ASUD_strat.gml')
print("Done.")
print("Generating topology graph display and unit groups...")
self.G = nx.read_gml(self.strat_graph_file, label='id')
selected_nodes = [n for n, v in self.G.nodes(data=True) if n >= 0]
if self.quiet == 'None':
nx.draw_networkx(self.G,
pos=nx.kamada_kawai_layout(self.G),
arrows=True,
nodelist=selected_nodes)
nlist = list(self.G.nodes.data('LabelGraphics'))
nlist.sort()
for node in nlist:
if node[0] >= 0:
elem = str(node[1]).replace("{'text':", "").replace(
", 'fontSize': 14}", "")
# second = elem.split(":").replace("'", "")
print(node[0], " ", elem)
# plt.savefig(os.path.join(self.tmp_path,"topology-fig.png"))
print("Topology figure saved to",
os.path.join(self.tmp_path, "topology-fig.png"))
# Save groups of stratigraphic units
groups, self.glabels, G = Topology.get_series(self.strat_graph_file,
'id')
quiet_topology = True
if self.quiet == 'None':
quiet_topology = False
Topology.save_units(
G,
self.tmp_path,
self.glabels,
Australia=True,
asud_strat_file="https://gist.githubusercontent.com/yohanderose/3b257dc768fafe5aaf70e64ae55e4c42/raw/8598c7563c1eea5c0cd1080f2c418dc975cc5433/ASUD.csv",
quiet=quiet_topology)
print("Done")
def load_dtm(self, source="AU"):
# group all Australian states codes under the global country code (ISO 3166 ALPHA-2)
polygon_ll = self.polygon.to_crs(self.dtm_crs)
minlong = polygon_ll.total_bounds[0] - self.step_out
maxlong = polygon_ll.total_bounds[2] + self.step_out
minlat = polygon_ll.total_bounds[1] - self.step_out
maxlat = polygon_ll.total_bounds[3] + self.step_out
print("Fetching DTM... ", end=" bbox:")
print(minlong, maxlong, minlat, maxlat)
if source in ("WA", "NSW", "VIC", "SA", "QLD", "ACT", "TAS"):
source = 'AU'
i, done = 0, False
while not done:
if i >= 10:
raise NameError(
f'map2loop error: Could not access DTM server after {i} attempts'
)
try:
print(f'Attempt: {i} ...', end='')
if source.upper() in ("AU", "AUSTRALIA"):
m2l_utils.get_dtm(self.dtm_file, minlong, maxlong, minlat,
maxlat)
elif source.upper() in ("T.H", "HAWAII"): # beware, TH is ISO 3166 code for Thailand
m2l_utils.get_dtm_hawaii(self.dtm_file, minlong, maxlong,
minlat, maxlat)
else: # try from opentopography
m2l_utils.get_dtm_topography_org(
self.dtm_file, minlong, maxlong, minlat, maxlat)
print("Succeeded !")
done = True
except:
time.sleep(1)
i += 1
print(f' Failed !')
elif source.startswith('http'):
i, done = 0, False
while not done:
if i >= 10:
raise NameError(
f'map2loop error: Could not access DTM server after {i} attempts'
)
try:
print(f'Attempt: {i} ...', end='')
if 'au' in source:
m2l_utils.get_dtm(self.dtm_file, minlong, maxlong, minlat,
maxlat, url=source)
elif 'hawaii' in source: # beware, TH is ISO 3166 code for Thailand
m2l_utils.get_dtm_hawaii(self.dtm_file, minlong, maxlong,
minlat, maxlat, url=source)
else: # try from opentopography
m2l_utils.get_dtm_topography_org(
self.dtm_file, minlong, maxlong, minlat, maxlat)
print("Succeeded !")
done = True
except:
time.sleep(1)
i += 1
print(f' Failed !')
else:
bbox = [
self.bbox_3d["minx"], self.bbox_3d["miny"],
self.bbox_3d["maxx"], self.bbox_3d["maxy"]
]
m2l_utils.get_local_dtm(self.dtm_file, source, self.dtm_crs, bbox)
m2l_utils.reproject_dtm(self.dtm_file,
self.dtm_reproj_file,
self.dtm_crs, self.proj_crs)
self.dtm = rasterio.open(self.dtm_reproj_file)
if self.quiet == 'None':
plt.imshow(self.dtm.read(1), cmap='terrain', vmin=0, vmax=1000)
plt.title('DTM')
plt.show()
def join_features(self):
# Save geology clips
quiet_topology = True
if self.quiet == "None":
quiet_topology = False
Topology.save_group(Topology, self.G, self.tmp_path, self.glabels,
self.geol_clip, self.c_l, quiet_topology)
def calc_depth_grid(self, dtb):
dtm = self.dtm
if dtb == "":
self.dtb = 0
self.dtb_null = 0
print("dtb and dtb_null set to 0")
return
# TODO: DTB need to be defined, every function call bellow here that has a False boolean is referencing to the workflow['cover_map'] flag
# dtb_grid = os.path.join(data_path,'young_cover_grid.tif') #obviously hard-wired for the moment
# dtb_null = '-2147483648' #obviously hard-wired for the moment
# cover_map_path = os.path.join(data_path,'Young_Cover_FDS_MGA_clean.shp') #obviously hard-wired for the moment
# dtb_clip = os.path.join(output_path,'young_cover_grid_clip.tif') #obviously hard-wired for the moment
# cover_dip = 10 # dip of cover away from contact
# cover_spacing = 5000 # of contact grid in metres
dtb_raw = rasterio.open(dtb_grid)
cover = gpd.read_file(cover_map_path)
with fiona.open(cover_map_path, "r") as shapefile:
shapes = [feature["geometry"] for feature in shapefile]
with rasterio.open(dtb_grid) as src:
out_image, out_transform = rasterio.mask.mask(src,
shapes,
crop=True)
out_meta = src.meta.copy()
out_meta.update({
"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform
})
with rasterio.open(dtb_clip, "w", **out_meta) as dest:
dest.write(out_image)
dtb = rasterio.open(dtb_clip)
m2l_geometry.process_cover(output_path,
dtm,
dtb,
dtb_null,
cover,
workflow['cover_map'],
cover_dip,
bbox,
proj_crs,
cover_spacing,
contact_decimate=3,
use_vector=True,
use_grid=True)
def export_orientations(self, orientation_decimate):
m2l_geometry.save_orientations(self.structure_clip, self.output_path,
self.c_l, orientation_decimate,
self.dtm, self.dtb, self.dtb_null,
False)
if self.quiet == 'None':
m2l_utils.plot_points(
os.path.join(self.output_path, 'orientations.csv'),
self.geol_clip, 'formation', 'X', 'Y', False, 'alpha')
# Create arbitrary points for series without orientation data
m2l_geometry.create_orientations(self.tmp_path, self.output_path,
self.dtm, self.dtb, self.dtb_null,
False, self.geol_clip,
self.structure_clip, self.c_l)
def export_contacts(self, contact_decimate, intrusion_mode):
ls_dict, ls_dict_decimate = m2l_geometry.save_basal_contacts(
self.tmp_path, self.dtm, self.dtb, self.dtb_null, False,
self.geol_clip, contact_decimate, self.c_l, intrusion_mode)
# Remove basal contacts defined by faults, no decimation
m2l_geometry.save_basal_no_faults(
os.path.join(self.tmp_path, 'basal_contacts.shp'),
os.path.join(self.tmp_path, 'faults_clip.shp'), ls_dict, 10,
self.c_l, self.proj_crs)
# Remove faults from decimated basal contacts then save
contacts = gpd.read_file(
os.path.join(self.tmp_path, 'basal_contacts.shp'))
m2l_geometry.save_basal_contacts_csv(contacts, self.output_path,
self.dtm, self.dtb, self.dtb_null,
False, contact_decimate, self.c_l)
# False in this call was already false and isn't the cover flag
if self.quiet == "None":
m2l_utils.plot_points(
os.path.join(self.output_path, 'contacts4.csv'),
self.geol_clip, 'formation', 'X', 'Y', False, 'alpha')
# Interpolates a regular grid of orientations from an shapefile of
# arbitrarily-located points and saves out four csv files of l, m & n
# direction cosines and dip dip direction data
def test_interpolation(self, interpolation_spacing, misorientation,
interpolation_scheme):
geology_file = self.geol_clip_file
structure_file = self.structure_clip_file
basal_contacts = os.path.join(self.tmp_path, 'basal_contacts.shp')
self.spacing = interpolation_spacing # grid spacing in meters
# misorientation = misorientation
self.scheme = interpolation_scheme
orientations = self.structures
quiet_interp = True
if self.quiet == "None":
quiet_interp = False
group_girdle = m2l_utils.plot_bedding_stereonets(
orientations, self.geology, self.c_l, quiet_interp)
super_groups, self.use_gcode3 = Topology.super_groups_and_groups(
group_girdle, self.tmp_path, misorientation)
# print(super_groups)
# print(self.geology['GROUP_'].unique())
bbox = self.bbox
orientation_interp, contact_interp, combo_interp = m2l_interpolation.interpolation_grids(
geology_file, structure_file, basal_contacts, bbox, self.spacing,
self.proj_crs, self.scheme, super_groups, self.c_l)
with open(os.path.join(self.tmp_path, 'interpolated_orientations.csv'),
'w') as f:
f.write('X, Y, l, m, n, dip, dip_dir\n')
for row in orientation_interp:
ostr = '{}, {}, {}, {}, {}, {}, {}\n'.format(
row[0], row[1], row[2], row[3], row[4], row[5], row[6])
f.write(ostr)
with open(os.path.join(self.tmp_path, 'interpolated_contacts.csv'),
'w') as f:
f.write('X, Y, l, m, angle\n')
for row in contact_interp:
ostr = '{}, {}, {}, {}, {}\n'.format(row[0], row[1], row[2],
row[3], row[4])
f.write(ostr)
with open(os.path.join(self.tmp_path, 'interpolated_combined.csv'),
'w') as f:
f.write('X, Y, l, m, n, dip, dip_dir\n')
for row in combo_interp:
ostr = '{}, {}, {}, {}, {}, {}, {}\n'.format(
row[0], row[1], row[2], row[3], row[4], row[5], row[6])
f.write(ostr)
if (self.spacing < 0):
self.spacing = -(bbox[2] - bbox[0]) / spacing
self.x = int((bbox[2] - bbox[0]) / self.spacing) + 1
self.y = int((bbox[3] - bbox[1]) / self.spacing) + 1
x = self.x
y = self.y
print(x, y)
dip_grid = np.ones((y, x))
dip_grid = dip_grid * -999
dip_dir_grid = np.ones((y, x))
dip_dir_grid = dip_dir_grid * -999
contact_grid = np.ones((y, x))
contact_grid = dip_dir_grid * -999
for row in combo_interp:
r = int((row[1] - bbox[1]) / self.spacing)
c = int((row[0] - bbox[0]) / self.spacing)
dip_grid[r, c] = float(row[5])
dip_dir_grid[r, c] = float(row[6])
for row in contact_interp:
r = int((row[1] - bbox[1]) / self.spacing)
c = int((row[0] - bbox[0]) / self.spacing)
contact_grid[r, c] = float(row[4])
self.dip_grid = dip_grid
self.dip_dir_grid = dip_dir_grid
if self.quiet == 'None':
print('interpolated dips')
plt.imshow(self.dip_grid,
cmap="hsv",
origin='lower',
vmin=-90,
vmax=90)
plt.show()
print('interpolated dip directions')
plt.imshow(self.dip_dir_grid,
cmap="hsv",
origin='lower',
vmin=0,
vmax=360)
plt.show()
print('interpolated contacts')
plt.imshow(contact_grid,
cmap="hsv",
origin='lower',
vmin=-360,
vmax=360)
plt.show()
def save_cmap(self):
"""Create a colourmap for the model using the colour code
"""
all_sorts = pd.read_csv(
os.path.join(self.tmp_path, 'all_sorts_clean.csv'))
colours = []
for code in all_sorts['code']:
colours.append([self.colour_dict[code]])
data = colours
expected_extra_cols = pd.DataFrame(columns=['colour'], data=data)
all_sorts = | pd.concat([all_sorts, expected_extra_cols], axis=1) | pandas.concat |
import os
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
from matplotlib import pyplot
from xgboost import plot_importance
import xgboost as xgb
from sklearn.preprocessing import StandardScaler
from hyperopt import fmin, hp, tpe, Trials, space_eval, STATUS_OK
from hyperopt.pyll import scope as ho_scope
from hyperopt.pyll.stochastic import sample as ho_sample
from functools import partial
from sklearn.model_selection import train_test_split, GridSearchCV, ShuffleSplit, cross_val_score, StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score, mean_squared_log_error, mean_squared_error
from src.functions import loading, get_var, setting_X, setting_y, objetivo #importing loading functions
import json
file_path = "/home/slimbook/git-repos/eolo-project/data/.raw/GFS_data"
csv_path = ("/home/slimbook/git-repos/eolo-project/data/processed/power_data.csv")
list_var = ["Vel100m"]
print("loading raw data and processing it...")
data_dictionary = loading(file_path) # All files and variables loaded as dictionary
var_to_test = get_var(data_dictionary, list_var, nz=5)
print("Setting X and y data...")
meteo = setting_X(var_to_test)
power = setting_y(csv_path)
train = | pd.concat([power, meteo], axis=1, join="inner") | pandas.concat |
from os.path import exists, join
import pandas as pd
import torch
import logging
from transformers import AutoModelForSequenceClassification
from train_bert import compute_negative_entropy, LMForSequenceClassification
from dataset import get_dataset_by_name, TokenizerDataModule
from torch.utils.data import DataLoader
from tqdm import tqdm
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme()
import glob
import numpy as np
from IPython.display import display
import os
from os.path import join
import re
import torch
from collections import namedtuple
import pdb
logging.basicConfig(
format="%(levelname)s:%(asctime)s:%(module)s:%(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
class ScoreHandler:
"""Standardize how scores are saved and loaded for a given model & dataset."""
def __init__(self, dataset: torch.utils.data.Dataset):
self.dataset = dataset
def save_scores(self, scores, root_dir: str, column_name: str, dataset: str):
"""Save the scores for a model on a dataset.
It uses a single csv file per dataset. Each column refers to the scores of a
single dataset.
Return: (datafram with scores, epath of the file containing the scores)
"""
file_name = f"scores_{dataset}.csv"
file_path = join(root_dir, file_name)
df = pd.read_csv(file_path) if exists(file_path) else self.dataset.data.copy()
if column_name in df.columns:
logging.info(f"Scores for {column_name} are present. Overriding them...")
df[column_name] = scores
df.to_csv(file_path, index=False)
return df, file_path
def load_model_from_folder(model_dir, pattern=None):
if pattern:
ckpt = glob.glob(join(model_dir, f"*{pattern}*"))[0]
else:
ckpt = glob.glob(f"{model_dir}/*.ckpt")[0]
print("Loading", ckpt)
if pattern:
model = LMForSequenceClassification.load_from_checkpoint(ckpt)
else:
model = AutoModelForSequenceClassification.from_pretrained(model_dir)
return model
def join_subwords(tokens):
span_start_idx = -1
spans = list()
for i, t in enumerate(tokens):
if t.startswith("#") and span_start_idx == -1:
span_start_idx = i - 1
continue
if not t.startswith("#") and span_start_idx != -1:
spans.append((span_start_idx, i))
span_start_idx = -1
# span open at the end
if span_start_idx != -1:
spans.append((span_start_idx, len(tokens)))
merged_tkns = list()
pop_idxs = list()
for span in spans:
merged = "".join([t.strip("#") for t in tokens[span[0] : span[1]]])
merged_tkns.append(merged)
# indexes to remove in the final sequence
for pop_idx in range(span[0] + 1, span[1]):
pop_idxs.append(pop_idx)
new_tokens = tokens.copy()
for i, (span, merged) in enumerate(zip(spans, merged_tkns)):
new_tokens[span[0]] = merged # substitue with whole word
mask = np.ones(len(tokens))
mask[pop_idxs] = 0
new_tokens = np.array(new_tokens)[mask == 1]
assert len(new_tokens) == len(tokens) - len(pop_idxs)
return new_tokens, pop_idxs, spans
def average_2d_over_spans(tensor, spans, reduce_fn="mean"):
# print("Spans #", spans)
slices = list()
last_span = None
for span in spans:
# first slice
if last_span is None:
slices.append(tensor[:, : span[0]])
else:
slices.append(tensor[:, last_span[1] : span[0]])
# average over the subwords
if reduce_fn == "mean":
slices.append(tensor[:, span[0] : span[1]].mean(-1).unsqueeze(-1))
else:
slices.append(tensor[:, span[0] : span[1]].sum(-1).unsqueeze(-1))
last_span = span
# last slice
if spans[-1][1] != tensor.shape[1]:
slices.append(tensor[:, last_span[1] :])
res = torch.cat(slices, dim=1)
# print("After average:", res.shape)
return res
def get_scores(y_true, scores_path):
scores = torch.load(scores_path)
y_pred = torch.zeros(scores.shape[0]).masked_fill(scores >= 0.5, 1)
fp_mask = (y_true == 0) & (y_pred == 1)
fp = torch.zeros(scores.shape[0]).masked_fill(fp_mask, 1)
fp_indexes = torch.nonzero(fp).squeeze(-1)
print(f"Found {fp_indexes.shape[0]} FPs")
return {"scores": scores, "y_pred": y_pred, "fp_indexes": fp_indexes}
#### VISUALIZATION: ENTROPY ####
def show_entropy(
models,
tokenizer,
max_sequence_length,
data,
names,
n_samples=2,
idxs=None,
regularization="entropy",
join=False,
layers_mean=False,
prompt=None,
exp=False,
remove_special=False,
labelsize=15,
titlesize=15,
set_figsize=True,
set_tightlayout=True,
):
def process_text(idx, text):
with torch.no_grad():
print(text)
encoding = tokenizer(
text,
add_special_tokens=True,
padding=True,
truncation=True,
max_length=max_sequence_length,
return_tensors="pt",
)
tokens = tokenizer.convert_ids_to_tokens(encoding["input_ids"][0])
if remove_special:
tokens = tokens[1:-1]
# print("Len:", len(tokens), "tokens:", tokens)
if join:
# join subwords for better visualization
new_tokens, pop_idxs, spans = join_subwords(tokens)
# print("Len new tokens", len(new_tokens))
tokens = new_tokens
heatmap_list = list()
final_entropies = list()
y_scores = list()
for i, (model, name) in enumerate(zip(models, names)):
if regularization == "entropy":
output = model(**encoding, output_attentions=True)
reg_target = output["attentions"]
else:
output = model(**encoding, output_norms=True)
norms = output["norms"]
afx_norms = [t[1] for t in norms]
reg_target = afx_norms
logits = output["logits"]
y_score = logits.softmax(-1)[0, 1]
print(y_score)
neg_entropy, entropies = compute_negative_entropy(
reg_target, encoding["attention_mask"], return_values=True
)
# print("Entropies shape:", entropies[0].shape)
# join_subwords(entropies, tokens)
# print(name, "Final entropy: ", -neg_entropy.item())
entropies = -entropies[0] # take positive entropy
entropies = torch.flipud(entropies) # top layers are placed to the top
# average subwords
if join and len(spans) > 0:
entropies = average_2d_over_spans(entropies, spans)
if layers_mean:
entropies = entropies.mean(0).unsqueeze(0)
if exp:
entropies = (1 / entropies).log()
if remove_special:
entropies = entropies[:, 1:-1]
heatmap_list.append(entropies)
final_entropies.append(-neg_entropy.item())
y_scores.append(y_score)
#### VISUALIZATION ####
if layers_mean:
figsize = (12, 2 * len(models))
else:
figsize = (6 * len(models), 6)
if set_figsize:
fig = plt.figure(constrained_layout=False, figsize=figsize)
else:
fig = plt.figure(constrained_layout=False)
if regularization == "entropy":
fig.suptitle(
f"H: Entropy on Attention (a), ID:{idx}"
) # , {data[idx]}")
else:
fig.suptitle(
f"Entropy on Norm (||a*f(zx)||), ID:{idx}"
) # , {data[idx]}")
if set_tightlayout:
fig.tight_layout()
# compute global min and global max
heatmap_tensor = torch.stack(heatmap_list)
glob_min = heatmap_tensor.min().item()
glob_max = heatmap_tensor.max().item()
# print("Glob max:", glob_max, "Glob min", glob_min)
for i, name in enumerate(names):
if layers_mean:
gspec = fig.add_gridspec(
len(models), 2, width_ratios=[20, 1], wspace=0.1, hspace=0.1
)
splot = fig.add_subplot(gspec[i, 0])
if i == (len(names) - 1):
cbar_ax = fig.add_subplot(gspec[:, 1])
sns.heatmap(
heatmap_list[i],
ax=splot,
cbar=True,
cbar_ax=cbar_ax,
square=True,
vmin=glob_min,
vmax=glob_max,
)
splot.set_xticks(np.arange(heatmap_list[i].shape[-1]) + 0.5)
splot.set_xticklabels(tokens, rotation=90, fontsize=labelsize)
[t.set_fontsize(labelsize) for t in cbar_ax.get_yticklabels()]
# title to colorbar
cbar_ax.set_title(
"log(1/H)", fontsize=titlesize
) if exp else cbar_ax.set_title("H", fontsize=titlesize)
else:
sns.heatmap(
heatmap_list[i],
ax=splot,
cbar=False,
square=True,
vmin=glob_min,
vmax=glob_max,
)
splot.set_xticklabels([])
splot.set_yticklabels([])
splot.set_title(
f"{name}, p(1|x)={y_scores[i]:.3f}, H={final_entropies[i]:.3f}",
fontsize=titlesize,
)
else:
width_ratios = [10] * len(models)
width_ratios += [1]
gspec = fig.add_gridspec(
1, len(models) + 1, width_ratios=width_ratios, wspace=0.2
)
splot = fig.add_subplot(gspec[0, i])
if i == (len(names) - 1):
cbar_ax = fig.add_subplot(gspec[0, -1])
sns.heatmap(
heatmap_list[i],
ax=splot,
cbar=True,
cbar_ax=cbar_ax,
square=True,
vmin=glob_min,
vmax=glob_max,
)
[t.set_fontsize(labelsize) for t in cbar_ax.get_yticklabels()]
# title to colorbar
cbar_ax.set_title(
"log(1/H)", fontsize=titlesize
) if exp else cbar_ax.set_title("H", fontsize=titlesize)
else:
sns.heatmap(heatmap_list[i], ax=splot, cbar=False, square=True)
if i == 0:
splot.set_ylabel("Layer", fontsize=labelsize)
splot.set_yticklabels(np.arange(11, -1, -1), fontsize=labelsize)
else:
splot.set_yticklabels([])
splot.set_xticks(np.arange(heatmap_list[i].shape[-1]) + 0.5)
splot.set_xticklabels(tokens, rotation=90, fontsize=labelsize)
splot.set_title(
f"{name}, p(1|x)={y_scores[i]:.3f}, H={final_entropies[i]:.3f}",
fontsize=titlesize,
)
# print(len(tokens), len(axes[i].get_xticklabels()))
# print(entropies.shape)
# axes[i].set_xticks(np.arange(heatmap_list[i].shape[-1]))
# axes[i].set_xticklabels(tokens, rotation=90)
# axes[i].set_title(f"{name}, p(1|x)={y_scores[i]:.3f}, e={final_entropies[i]:.3f}")
# axes[i].set_yticklabels([])
return fig
if prompt:
idx = "custom"
text = prompt
print("ID: ", idx, text)
return process_text(idx, text)
if idxs is None:
# pick random samples to show
idxs = np.random.randint(len(data), size=n_samples)
print(idxs)
for idx in idxs:
print("ID: ", idx, data[idx])
process_text(idx, data[idx]["text"])
def compare_sentences(
model,
tokenizer,
sentences,
max_sequence_length=120,
remove_special=True,
join=True,
show_log=True,
labelsize=15,
titlesize=15,
figsize=(12, 12),
):
processed = list()
with torch.no_grad():
for text in sentences:
encoding = tokenizer(
text,
add_special_tokens=True,
padding=True,
truncation=True,
max_length=max_sequence_length,
return_tensors="pt",
)
tokens = tokenizer.convert_ids_to_tokens(encoding["input_ids"][0])
if remove_special:
tokens = tokens[1:-1]
if join:
# join subwords for better visualization
new_tokens, pop_idxs, spans = join_subwords(tokens)
# print("Len new tokens", len(new_tokens))
tokens = new_tokens
output = model(**encoding, output_attentions=True)
logits = output["logits"]
y_score = logits.softmax(-1)[0, 1]
neg_entropy, entropies = compute_negative_entropy(
output["attentions"], encoding["attention_mask"], return_values=True
)
# print("Entropies shape:", entropies[0].shape)
# print(name, "Final entropy: ", -neg_entropy.item())
entropies = -entropies[0] # take positive entropy
# average subwords
if join and len(spans) > 0:
entropies = average_2d_over_spans(entropies, spans)
entropies = entropies.mean(0).unsqueeze(0)
if show_log:
entropies = (1 / entropies).log()
if remove_special:
entropies = entropies[:, 1:-1]
processed.append((tokens, y_score, entropies))
# print(processed)
fig = plt.figure(constrained_layout=False, figsize=figsize)
gspec = fig.add_gridspec(len(sentences) * 2, 1, hspace=2, wspace=5)
vmin = torch.stack([p[2] for p in processed]).min().item()
vmax = torch.stack([p[2] for p in processed]).max().item()
print(vmin, vmax)
for i, (tokens, y_score, entropies) in enumerate(processed):
splot = fig.add_subplot(gspec[i, 0])
# cbar_ax = fig.add_subplot(gspec[:, 1])
sns.heatmap(
entropies,
ax=splot,
cbar=False,
# cbar_ax=cbar_ax,
square=True,
# cmap="Reds",
annot=False,
vmin=vmin,
vmax=vmax,
)
splot.set_xticks(np.arange(entropies.shape[-1]) + 0.5)
splot.set_xticklabels(tokens, rotation=90, fontsize=labelsize)
splot.set_yticklabels([])
splot.set_title(
f"p(1|x)={y_score:.3f}",
fontsize=titlesize,
)
# [t.set_fontsize(labelsize) for t in cbar_ax.get_yticklabels()]
# title to colorbar
# cbar_ax.set_title(
# "log(1/H)", fontsize=titlesize
# ) if exp else cbar_ax.set_title("H", fontsize=titlesize)
# fig.tight_layout()
#### BIAS_ANALYSIS: parsing results and bias analysis
def match_pattern_concat(main_dir, pattern, verbose=True):
"""Find all files that match a patter in main_dir. Then concatenate their content into a pandas df."""
versions = glob.glob(join(main_dir, pattern))
if verbose:
print(f"Found {len(versions)} versions")
res = list()
for version in versions:
df = pd.read_csv(version)
filename = os.path.basename(version)
seed = re.search(r"([0-9]{1})", filename).group(1)
# print(filename, seed)
df["seed"] = seed
res.append(df)
return pd.concat(res)
def mean_std_across_subgroups(data: pd.DataFrame, metrics):
print("Found the following models:", data.model.unique())
model_groups = data.groupby("model")
means = list()
stds = list()
for model, group_df in model_groups:
subgroup_groups = group_df.groupby("subgroup").mean() # across seeds
for metric in metrics:
means.append(
{
"metric": metric,
"model_name": model,
"mean_across_subgroups": subgroup_groups[metric].mean(),
}
)
stds.append(
{
"metric": metric,
"model_name": model,
"std_across_subgroups": subgroup_groups[metric].std(),
}
)
return pd.DataFrame(means), | pd.DataFrame(stds) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
tamano_muestra = 120 #N
bandera_paso = False
iter = 0
lsupAnterior = -5
linfAnterior = -5
licentAnterior = -5
datos = | pd.read_csv('data.csv', header=None) | pandas.read_csv |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = | pd.date_range('2010-01-01', periods=35) | pandas.date_range |
import sys,os
import numpy as np
import pandas as pd
import h5py
import math
from scipy.stats import entropy
from collections import Counter
import pickle
# Get Euclidean Norm minus One
def get_ENMO(x,y,z):
enorm = np.sqrt(x*x + y*y + z*z)
ENMO = np.maximum(enorm-1.0, 0.0)
return ENMO
# Get tilt angles
def get_tilt_angles(x,y,z):
angle_x = np.arctan2(x, np.sqrt(y*y + z*z)) * 180.0/math.pi
angle_y = np.arctan2(y, np.sqrt(x*x + z*z)) * 180.0/math.pi
angle_z = np.arctan2(z, np.sqrt(x*x + y*y)) * 180.0/math.pi
return angle_x, angle_y, angle_z
# Get Locomotor Inactivity During Sleep
def get_LIDS(timestamp, ENMO):
df = pd.concat((timestamp, pd.Series(ENMO)), axis=1)
df.columns = ['timestamp','ENMO']
df.set_index('timestamp', inplace=True)
df['ENMO_sub'] = np.where(ENMO < 0.02, 0, ENMO-0.02) # assuming ENMO is in g
ENMO_sub_smooth = df['ENMO_sub'].rolling('600s').sum() # 10-minute rolling sum
df['LIDS_unfiltered'] = 100.0 / (ENMO_sub_smooth + 1.0)
LIDS = df['LIDS_unfiltered'].rolling('1800s').mean().values # 30-minute rolling average
return LIDS
def compute_entropy(df, bins=20):
hist, bin_edges = np.histogram(df, bins=bins)
p = hist/float(hist.sum())
ent = entropy(p)
return ent
# Aggregate statistics of features over a given time interval
def get_stats(timestamp, feature, token_interval):
feat_df = pd.DataFrame(data={'timestamp':timestamp, 'feature':feature})
feat_df.set_index('timestamp', inplace=True)
feat_mean = feat_df.resample(str(token_interval)+'S').mean()
feat_std = feat_df.resample(str(token_interval)+'S').std()
feat_min = feat_df.resample(str(token_interval)+'S').min()
feat_max = feat_df.resample(str(token_interval)+'S').max()
feat_mad = feat_df.resample(str(token_interval)+'S').apply(pd.DataFrame.mad)
feat_ent1 = feat_df.resample(str(token_interval)+'S').apply(compute_entropy, bins=20)
feat_ent2 = feat_df.resample(str(token_interval)+'S').apply(compute_entropy, bins=200)
stats = np.vstack((feat_mean['feature'], feat_std['feature'], feat_min['feature'],
feat_max['feature'], feat_mad['feature'], feat_ent1['feature'],
feat_ent2['feature'])).T
return stats
def get_categ(df, default='NaN'):
ctr = Counter(df)
for key in ctr:
ctr[key] = ctr[key]/float(len(df))
dom_categ = ctr.most_common()[0]
if dom_categ[1] >= 0.7: # If a category occurs more than 70% of time interval, mark that as dominant category
dom_categ = dom_categ[0]
else:
dom_categ = default
return dom_categ
def get_dominant_categ(timestamp, categ, token_interval, default='NaN'):
categ_df = pd.DataFrame(data={'timestamp':timestamp, 'category':categ})
categ_df.set_index('timestamp', inplace=True)
dom_categ = categ_df.resample(str(token_interval)+'S').apply(get_categ, default=default)
return np.array(dom_categ['category'])
# Get sequence labels in BIEO format - Beginning, Inside, End, Outside
def get_sequential_label(labels, nonwear, states):
# Initialize all labels as 'O'
seq_labels = ['O'] * len(labels)
# Rename first and last labels of the sequence
if labels[0] in states:
seq_labels[0] = 'B-' + labels[0]
if labels[-1] in states:
seq_labels[-1] = 'E-' + labels[-1]
# Rename all other labels based on their previous and next labels
for i in range(1,len(labels)-1):
# If nonwear, retain label as 'O'
if nonwear[i] is True or labels[i] not in states:
continue
# Label beginning of state
if labels[i] != labels[i-1]:
seq_labels[i] = 'B-' + labels[i]
else: # Inside a state
seq_labels[i] = 'I-' + labels[i]
# Label end of state
if labels[i] != labels[i+1]:
seq_labels[i] = 'E-' + labels[i]
return seq_labels
def convert2seq(features, labels, n_seq_tokens=10, user=None, position=None, dataset=None):
sequences = []
ntokens = len(labels)
columns = ['ENMO_mean','ENMO_std','ENMO_min','ENMO_max','ENMO_mad','ENMO_entropy1','ENMO_entropy2',
'angz_mean','angz_std','angz_min','angz_max','angz_mad','angz_entropy1','angz_entropy2',
'LIDS_mean','LIDS_std','LIDS_min','LIDS_max','LIDS_mad','LIDS_entropy1','LIDS_entropy2']
for st_idx in range(0,ntokens,n_seq_tokens):
end_idx = min(ntokens, st_idx+n_seq_tokens)
if (end_idx-st_idx) < (n_seq_tokens//2): # Discard last sequence if too short
continue
lbl_ctr = Counter(labels[st_idx:end_idx]).most_common()
lbl_ctr = [(lbl,float(val)/n_seq_tokens) for lbl,val in lbl_ctr]
# Discard sequences which are atleast 60% or more of 'O'
if lbl_ctr[0][0] == 'O' and lbl_ctr[0][1] >= 0.6:
continue
else:
feat_df = | pd.DataFrame(features[st_idx:end_idx], columns=columns) | pandas.DataFrame |
''' Starting with Commonwealth_Connect_Service_Requests.csv, meaning
the tickets feature. See more info in notebook #2
'''
import pandas as pd
import numpy as np
from geopy.distance import geodesic
def find_nearest_building(df,latI,lonI):
minDist = 4000
flag = True
for i in range(0,df.shape[0]):
lat = df['lat'].iloc[i]
lon = df['lon'].iloc[i]
dist = geodesic([lat,lon],[latI,lonI]).meters
if dist<minDist:
minDist = dist
nearestBuildingInDf = i
if minDist==4000:
flag=False
nearestBuildingInDf = pd.DataFrame()
return nearestBuildingInDf,flag
def fixLonLat(df,colName):
# extracting the lat/lon info to answer the question of where they are located:
extract = df[colName]
extract = extract.apply(lambda x: x.split('(',1)[1])
extract = extract.apply(lambda x: x.split(')',1)[0])
df['lat'] = extract.apply(lambda x: x.split(',',1)[0])
df['lon'] = extract.apply(lambda x: x.split(',',1)[1])
print('Range of Latitudes for input coordinates:',df['lat'].min(),df['lat'].max())
print('Range of Longitudes for input coordinates:',df['lon'].min(),df['lon'].max())
df['lat'] = df['lat'].astype('float')
df['lon'] = df['lon'].astype('float')
return df
def minMaxCoords(lat,lon,dlat,dlon):
minLat = lat-dlat
maxLat = lat+dlat
minLon = lon-dlon
maxLon = lon+dlon
return minLat,maxLat,minLon,maxLon
def findIdentifier(tickets,identifierLocation,dlat,dlon):
# running over the tickets/complaints, cutting part of the identifierLocation DataaFrame close to each
# ticket location, and finding the closest match building wise:
tickets_feature = pd.DataFrame()
tmp = pd.DataFrame()
for i in range(0, tickets.shape[0]):
lat = tickets['lat'].iloc[i]
lon = tickets['lon'].iloc[i]
minLat, maxLat, minLon, maxLon = minMaxCoords(lat, lon, dlat, dlon)
df = identifierLocation[identifierLocation['lat'] < maxLat]
df = df[df['lat'] > minLat]
df = df[df['lon'] < maxLon]
df = df[df['lon'] > minLon]
# print(df.shape[0])
# df now contains all the buildings withing the given lat/lon circle around the ticket location.
# one of these buildings is the one that received the ticket:
nearestBuildingIloc,flag = find_nearest_building(df, lat, lon)
if flag:
tmp = df.iloc[nearestBuildingIloc]
tmp['date'] = tickets['date'].iloc[i]
tmp['label'] = 1
tickets_feature = tickets_feature.append(tmp)
else:
print('no closest bldg for record ',i)
print(type(tickets_feature))
print(tickets_feature.shape[0])
return tickets_feature,flag
def fixDate(df,colName):
df[colName] = pd.to_datetime(df[colName],infer_datetime_format=True)
df['date'] = df[colName].dt.date
print('Input dates:',min(df['date']),'to',max(df['date']))
return df
identifierLocation = pd.read_csv('/Users/nbechor/Insight/SlipperySlope/data/processed/BldgID2WeatherIdentifier.csv')
## tickets labels:
tickets = pd.read_csv('/Users/nbechor/Insight/SlipperySlope/data/external/Snow_Ice_Sidewalk_Ordinance_Violations.csv')
tickets = fixDate(tickets,'OFFENSE DATE')
tickets = fixLonLat(tickets,'TICKET LOCATION')
lat_radius=abs(tickets['lat'].min()-tickets['lat'].max())/25
lon_radius = abs(tickets['lon'].min()-tickets['lon'].max())/25
#tickets_feature,flag = findIdentifier(tickets,identifierLocation,lat_radius,lon_radius)
#tickets_feature.to_csv('/Users/nbechor/Insight/SlipperySlope/data/interim/tickets_label.csv')
## Complaints dataset:
complaintsH = | pd.read_csv('/Users/nbechor/Insight/SlipperySlope/data/external/Unshoveled_Icy_Sidewalk_Complaints.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
from adv_finance.multiprocess import mp_pandas_obj
def mp_sample_tw(t1, num_co_events, molecule):
"""
Snippet 4.2 (page 62) Estimating The Average Uniqueness Of A Label
:param timestamps: (Series): Used for assigning weight. Larger value, larger weight e.g, log return
:param t1: (Series)
:param num_co_events: (Series)
:param molecule:
:return:
"""
# Derive average uniqueness over the event's lifespan
weight = pd.Series(index=molecule)
for t_in, t_out in t1.loc[weight.index].iteritems():
weight.loc[t_in] = (1 / num_co_events.loc[t_in:t_out]).mean()
return weight.abs()
def get_sample_tw(t1, num_co_events, num_threads=1):
"""
Calculate sampling weight with considering some attributes
:param timestamps:
:param t1:
:param num_co_events:
:param num_threads:
:return:
"""
weight = mp_pandas_obj(mp_sample_tw, ('molecule', t1.index), num_threads=num_threads,
t1=t1, num_co_events=num_co_events)
return weight
def mp_sample_w(t1, num_co_events, close, molecule):
"""
Snippet 4.10 (page 69) Determination Of Sample Weight By Absolute Return Attribution
:param t1:
:param num_co_events:
:param close:
:param molecule:
:return:
"""
# Derive sample weight by return attribution
ret = np.log(close).diff() # log-returns, so that they are additive
wght = | pd.Series(index=molecule) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# ## Compare compressed vs. raw results
#
# In this notebook, we want to compare mutation status classification results with varying numbers of PCA components as predictors against results with raw features (CpG beta values for methylation data, standardized per-gene expression values for RNA-seq data).
#
# Notebook parameters:
# * SIG_ALPHA (float): significance cutoff after FDR correction
# * PLOT_AUROC (bool): if True plot AUROC, else plot AUPR
# In[1]:
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from adjustText import adjust_text
import mpmp.config as cfg
import mpmp.utilities.analysis_utilities as au
# In[2]:
# set results directories
results_dir = Path(cfg.results_dirs['mutation'],
'methylation_results',
'gene').resolve()
# set significance cutoff after FDR correction
SIG_ALPHA = 0.001
# if True, save figures to ./images directory
SAVE_FIGS = True
# if True, plot AUROC instead of AUPR
PLOT_AUROC = False
if PLOT_AUROC:
plot_metric = 'auroc'
images_dir = Path(cfg.images_dirs['mutation'], 'auroc')
else:
plot_metric = 'aupr'
images_dir = Path(cfg.images_dirs['mutation'])
# In[3]:
# load raw data
raw_results_df = au.load_stratified_prediction_results(results_dir, 'gene')
print(raw_results_df.shape)
print(raw_results_df.seed.unique())
print(raw_results_df.training_data.unique())
raw_results_df.head()
# In[4]:
# load compressed data
compressed_results_df = au.load_compressed_prediction_results(results_dir, 'gene')
print(compressed_results_df.shape)
print(compressed_results_df.seed.unique())
print(compressed_results_df.n_dims.unique())
print(compressed_results_df.training_data.unique())
compressed_results_df.head()
# In[5]:
def label_points(x, y, gene, sig, ax):
text_labels = []
a = | pd.DataFrame({'x': x, 'y': y, 'gene': gene, 'sig': sig}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
#without the help of my intern, this option data scraper would never exist
#thank you, Olivia, much appreciated for the data etl
# In[1]:
import requests
import pandas as pd
import os
os.chdir('H:/')
# In[2]:
#scraping function
def scrape(url):
session=requests.Session()
session.headers.update(
{'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'})
response=session.get(url)
return response
# In[3]:
#get expiration id
def get_expiration(jsondata):
expiration=pd.DataFrame.from_dict(jsondata).T
expiration.reset_index(inplace=True,drop=True)
#unpack expiration id
var=locals()
for i in range(len(expiration)):
unpack=expiration.loc[i:i]
dictionary=unpack['expirations'].iloc[0]
del unpack['expirations']
del unpack['productIds']
del unpack['optionType']
var['a'+str(i)]=pd.DataFrame.from_dict(dictionary).T
var['a'+str(i)].columns=var['a'+str(i)].columns.str.replace('label','expiration-label')
for j in unpack.columns:
var['a'+str(i)][j]=unpack[j].iloc[0]
output=pd.concat([var['a'+str(i)] for i in range(len(expiration))])
return output
# In[4]:
#get group id
def get_groupid(jsondata):
commoditygroup=pd.DataFrame.from_dict(jsondata['filters']['group'])
var=locals()
for i in range(len(commoditygroup)):
var['a'+str(i)]= | pd.DataFrame.from_dict(commoditygroup['children'].iloc[i]) | pandas.DataFrame.from_dict |
import math
import pandas as pd
import csv
import pathlib
import wx
import matplotlib
import matplotlib.pylab as pL
import matplotlib.pyplot as plt
import matplotlib.backends.backend_wxagg as wxagg
import re
import numpy as np
import scipy
import scipy.interpolate
import sys
#from mpl_toolkits.mplot3d import Axes3D
#import wx.lib.inspection as wxli
class ERTAPP(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, title='ERT Editing',pos=(100,100),size=(500,500))
#Built from template here: https://wiki.wxpython.org/GridSizerTutorial
#Set up Panels
def setUpPanels(self):
self.topPanel = wx.Panel(self, wx.ID_ANY,size = (1000,10),name='Top Panel')
self.infoPanel = wx.Panel(self, wx.ID_ANY,size = (1000,50),name='Info Panel')
self.chartPanel = wx.Panel(self, wx.ID_ANY,size = (1000,500),name='Chart Panel')
self.bottomPanel= wx.Panel(self, wx.ID_ANY,size = (1000,130),name='Bottom Panel')
#need to create more panels, see here: https://stackoverflow.com/questions/31286082/matplotlib-in-wxpython-with-multiple-panels
def titleSetup(self):
bmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_OTHER, (4, 4))
self.titleIco = wx.StaticBitmap(self.topPanel, wx.ID_ANY, bmp)
self.title = wx.StaticText(self.topPanel, wx.ID_ANY, 'Advanced ERT Editing')
#Declare inputs for first row
def inputSetup(self):
bmp = wx.ArtProvider.GetBitmap(wx.ART_TIP, wx.ART_OTHER, (4, 4))
self.inputOneIco = wx.StaticBitmap(self.topPanel, wx.ID_ANY, bmp)
self.labelOne = wx.StaticText(self.topPanel, wx.ID_ANY, 'Input ERT Data')
self.inputTxtOne = wx.TextCtrl(self.topPanel, wx.ID_ANY, '')
self.inputTxtOne.SetHint('Enter data file path here')
self.inputBrowseBtn = wx.Button(self.topPanel, wx.ID_ANY, 'Browse')
self.Bind(wx.EVT_BUTTON, self.onBrowse, self.inputBrowseBtn)
self.readInFileBtn = wx.Button(self.topPanel, wx.ID_ANY, 'Read Data')
self.Bind(wx.EVT_BUTTON, self.onReadIn, self.readInFileBtn)
self.inputDataType = wx.Choice(self.topPanel, id=wx.ID_ANY,choices=['.DAT (LS)','.TXT (LS)','.DAT (SAS)', '.VTK', '.XYZ'],name='.TXT (LS)')
self.Bind(wx.EVT_CHOICE,self.onDataType,self.inputDataType)
self.autoShiftBx = wx.CheckBox(self.topPanel,wx.ID_ANY, 'Auto Shift?')
self.autoShiftBx.SetValue(True)
#Row 3 item(s)
self.TxtProfileName = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Profile Name: ')
self.TxtProfileRange = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Profile Length: ')
self.TxtDataPts = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Data Points: ')
self.TxtBlank = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.TxtBlank2 = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.TxtMinElectSpcng = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Min. Electrode Spacing: ')
self.TxtProjectName = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Project Name: ')
self.TxtArray = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Array: ')
self.msgProfileName = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgProfileRange = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgDataPts = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgMinElectSpcng = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgProjectName = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgArray = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
# DataViz Area item(s)
def dataVizSetup(self):
self.editSlider = wx.Slider(self.chartPanel, pos=(200,0), id=wx.ID_ANY, style=wx.SL_TOP | wx.SL_AUTOTICKS | wx.SL_LABELS, name='Edit Data')
self.Bind(wx.EVT_SCROLL, self.onSliderEditEVENT, self.editSlider)
self.dataVizMsg1 = wx.StaticText(self.chartPanel, wx.ID_ANY, '')
self.dataVizMsg2 = wx.StaticText(self.chartPanel, wx.ID_ANY, '')
self.dataVizInput = wx.TextCtrl(self.chartPanel, wx.ID_ANY, '')
self.dataVizInputBtn = wx.Button(self.chartPanel, -1, "Use Value")
self.dataVizInputBtn.Bind(wx.EVT_BUTTON, self.ONdataVizInput)
self.saveEditsBtn = wx.Button(self.chartPanel, -1, "Save Edits")
self.saveEditsBtn.Bind(wx.EVT_BUTTON, self.ONSaveEdits)
self.saveEditsBtn.SetBackgroundColour((100,175,100))
self.currentChart = 'Graph'
self.editDataChoiceList = ['AppResist','Resistance','Electrode x-Dists','Variance','PctErr','PseudoX','PseudoZ']
self.editDataChoiceBool = [False]*len(self.editDataChoiceList)
self.editDataValues = []
for i in self.editDataChoiceList:
self.editDataValues.append([0,0])
self.editDataType = wx.Choice(self.chartPanel, id=wx.ID_ANY,choices=self.editDataChoiceList,name='Edit Data')
self.editDataType.Bind(wx.EVT_CHOICE, self.onSelectEditDataType)
self.setEditToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'Unused',size=(25,30))
self.setEditToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.onSetEditToggle)
self.labelMinRem = wx.StaticText(self.chartPanel, wx.ID_ANY, 'Min.')
self.inputTxtMinRem = wx.TextCtrl(self.chartPanel, wx.ID_ANY,style=wx.TE_PROCESS_ENTER, name='')
self.inputTxtMinRem.Bind(wx.EVT_TEXT_ENTER, self.onEditDataValueChangeEvent)
self.labelMaxRem = wx.StaticText(self.chartPanel, wx.ID_ANY,'Max.')
self.inputTxtMaxRem = wx.TextCtrl(self.chartPanel, wx.ID_ANY,style=wx.TE_PROCESS_ENTER,name= '')
self.inputTxtMaxRem.Bind(wx.EVT_TEXT_ENTER, self.onEditDataValueChangeEvent)
self.editTypeToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'Remove',size=(25,50))
self.editTypeToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.onEditTypeToggle)
self.editLogicToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'OR',size=(25,25))
self.editLogicToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.onLogicToggle)
self.removePtsBtn = wx.Button(self.chartPanel, -1, "Edit Points")
self.removePtsBtn.Bind(wx.EVT_BUTTON, self.onRemovePts)
self.electrodeToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'On',size=(25,25))
self.electrodeToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ONtoggle)
self.GraphEditBtn = wx.Button(self.chartPanel, -1, "Graphic Editor", size=(100, 30))
self.GraphEditBtn.Bind(wx.EVT_BUTTON, self.graphChartEvent)
self.StatEditBtn = wx.Button(self.chartPanel, -1, "Statistical Editor", size=(100, 30))
self.Bind(wx.EVT_BUTTON, self.statChartEvent, self.StatEditBtn)
self.addGPSBtn = wx.Button(self.chartPanel, -1, "GPS Data", size=(100, 30))
self.addGPSBtn.Bind(wx.EVT_BUTTON, self.GPSChartEvent)
self.addTopoBtn = wx.Button(self.chartPanel, -1, "Topography Data", size=(100, 30))
self.addTopoBtn.Bind(wx.EVT_BUTTON, self.topoChartEvent)
self.reviewBtn = wx.Button(self.chartPanel, -1, "Review Edits", size=(100, 15))
self.reviewBtn.Bind(wx.EVT_BUTTON, self.reviewEvent)
def bottomAreaSetup(self):
# Row 4 items
self.reverseBx = wx.CheckBox(self.bottomPanel,wx.ID_ANY, 'Reverse Profile')
self.labelGPSIN = wx.StaticText(self.bottomPanel, wx.ID_ANY, 'GPS Data')
self.inputTxtGPS = wx.TextCtrl(self.bottomPanel, wx.ID_ANY, 'Enter GPS Filepath Here')
self.inputGPSBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Browse')
self.Bind(wx.EVT_BUTTON, self.onGPSBrowse, self.inputGPSBtn)
self.Bind(wx.EVT_CHECKBOX, self.onReverse, self.reverseBx)
self.dataEditMsg = wx.StaticText(self.bottomPanel, wx.ID_ANY, '')
self.labelTopoIN = wx.StaticText(self.bottomPanel, wx.ID_ANY, 'Topo Data')
self.inputTxtTopo = wx.TextCtrl(self.bottomPanel, wx.ID_ANY, 'Enter Topo Filepath Here')
self.inputTopoBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Browse')
self.includeTopoBx = wx.CheckBox(self.bottomPanel,wx.ID_ANY, 'Include Topography')
self.Bind(wx.EVT_BUTTON, self.onTopoBrowse, self.inputTopoBtn)
self.Bind(wx.EVT_CHECKBOX, self.onIncludeTopo, self.includeTopoBx)
#Bottom Row items
self.saveBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Export and Save Data')
self.cancelBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Cancel')
self.Bind(wx.EVT_BUTTON, self.onExport, self.saveBtn)
self.Bind(wx.EVT_BUTTON, self.onCancel, self.cancelBtn)
self.labelExport = wx.StaticText(self.bottomPanel, wx.ID_ANY, 'Export Data')
self.exportTXT = wx.TextCtrl(self.bottomPanel, wx.ID_ANY, 'Enter Export Filepath Here')
self.exportDataBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Browse')
self.Bind(wx.EVT_BUTTON, self.onExportBrowse, self.exportDataBtn)
#Set up chart
def chartSetup(self):
self.chartSizer = wx.BoxSizer(wx.VERTICAL)
self.figure = matplotlib.figure.Figure()
self.canvas = wxagg.FigureCanvasWxAgg(self.chartPanel, -1, self.figure)
self.axes = self.figure.add_subplot(111)
self.axes.set_xlabel('X-Distance (m)')
self.axes.set_ylabel('Depth (m)')
self.toolbar = wxagg.NavigationToolbar2WxAgg(self.canvas)
def sizersSetup(self):
#Set up sizers
self.baseSizer = wx.BoxSizer(wx.VERTICAL)
self.topSizer = wx.BoxSizer(wx.VERTICAL)
self.titleSizer = wx.BoxSizer(wx.HORIZONTAL)
self.inputSizer = wx.BoxSizer(wx.HORIZONTAL)
#self.readMsgSizer = wx.BoxSizer(wx.HORIZONTAL)
self.profileInfoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.profileTxtSizer1 = wx.BoxSizer(wx.VERTICAL)
self.profileTxtSizer2 = wx.BoxSizer(wx.VERTICAL)
self.profileMsgSizer1 = wx.BoxSizer(wx.VERTICAL)
self.profileMsgSizer2 = wx.BoxSizer(wx.VERTICAL)
self.profileInfoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.ctrlSizer = wx.BoxSizer(wx.VERTICAL)
self.chartSizer = wx.BoxSizer(wx.VERTICAL)
self.dataVizSizer = wx.BoxSizer(wx.HORIZONTAL)
self.vizInfoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.dataEditSizer = wx.BoxSizer(wx.HORIZONTAL)
self.bottomSizer = wx.BoxSizer(wx.VERTICAL)
self.GPSSizer = wx.BoxSizer(wx.HORIZONTAL)
self.TopoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.botSizer = wx.BoxSizer(wx.HORIZONTAL)
def addtoSizers(self):
#Add items to sizers
self.titleSizer.Add(self.title, 0, wx.ALIGN_CENTER)
self.inputSizer.Add(self.labelOne, 1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.inputTxtOne, 8,wx.EXPAND,5)
self.inputSizer.Add(self.inputBrowseBtn,1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.inputDataType,1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.readInFileBtn,1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.autoShiftBx, 1, wx.ALIGN_CENTER, 5)
#self.readMsgSizer.Add(self.msgLabelOne, 0, wx.ALL,5)
self.profileTxtSizer1.Add(self.TxtProfileName, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer1.Add(self.TxtProfileRange, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer1.Add(self.TxtDataPts, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer2.Add(self.TxtMinElectSpcng, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer2.Add(self.TxtArray, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer2.Add(self.TxtProjectName, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer1.Add(self.msgProfileName, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer1.Add(self.msgProfileRange, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer1.Add(self.msgDataPts, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer2.Add(self.msgMinElectSpcng, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer2.Add(self.msgArray, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer2.Add(self.msgProjectName, 0, wx.ALIGN_LEFT,5)
self.profileInfoSizer.Add(self.profileTxtSizer1, 1,wx.ALL,5)
self.profileInfoSizer.Add(self.profileMsgSizer1,3,wx.ALL,5)
self.profileInfoSizer.Add(self.profileTxtSizer2, 1, wx.ALL, 5)
self.profileInfoSizer.Add(self.profileMsgSizer2, 3, wx.ALL, 5)
self.topSizer.Add(self.titleSizer,1,wx.ALL,5)
self.topSizer.Add(self.inputSizer, 2, wx.ALL, 5)
#self.topSizer.Add(self.readMsgSizer, 1, wx.ALL, 5)
self.vizInfoSizer.Add(self.dataVizMsg1,16,wx.ALL,5)
self.vizInfoSizer.Add(self.dataVizMsg2, 24, wx.ALL, 5)
self.vizInfoSizer.Add(self.electrodeToggleBtn,1,wx.ALL,5)
self.vizInfoSizer.Add(self.dataVizInput, 1, wx.ALL, 5)
self.vizInfoSizer.Add(self.dataVizInputBtn,3,wx.ALL,5)
self.vizInfoSizer.Add(self.saveEditsBtn,3,wx.ALL,5)
self.ctrlSizer.Add(self.GraphEditBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.StatEditBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.addGPSBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.addTopoBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.reviewBtn,1,wx.ALL,5)
self.dataEditSizer.Add(self.editDataType,5, wx.ALL, 5)
self.dataEditSizer.Add(self.setEditToggleBtn,2,wx.ALL,5)
self.dataEditSizer.Add(self.labelMinRem, 2, wx.ALL, 5)
self.dataEditSizer.Add(self.inputTxtMinRem, 3, wx.ALL, 5)
self.dataEditSizer.Add(self.inputTxtMaxRem, 3, wx.ALL, 5)
self.dataEditSizer.Add(self.labelMaxRem, 2, wx.ALL, 5)
self.dataEditSizer.Add(self.editTypeToggleBtn,3,wx.ALL,5)
self.dataEditSizer.Add(self.editLogicToggleBtn,2,wx.ALL,5)
self.dataEditSizer.Add(self.removePtsBtn, 3, wx.ALL, 5)
self.chartSizer.Add(self.vizInfoSizer, 1, wx.ALL, 5)
self.chartSizer.Add(self.editSlider,1, wx.LEFT | wx.RIGHT | wx.EXPAND,94)
self.chartSizer.Add(self.canvas, 12, wx.EXPAND)
self.chartSizer.Add(self.toolbar, 1, wx.EXPAND)
self.chartSizer.Add(self.dataEditSizer,1,wx.EXPAND)
self.dataVizSizer.Add(self.ctrlSizer,1,wx.EXPAND)
self.dataVizSizer.Add(self.chartSizer,6,wx.EXPAND)
self.GPSSizer.Add(self.dataEditMsg, 2, wx.ALL, 5)
self.GPSSizer.Add(self.reverseBx, 1, wx.ALL, 5)
self.GPSSizer.Add(self.labelGPSIN, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.GPSSizer.Add(self.inputTxtGPS, 8, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.GPSSizer.Add(self.inputGPSBtn, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.TopoSizer.Add(self.includeTopoBx, 2, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.TopoSizer.Add(self.labelTopoIN, 1, wx.ALIGN_CENTER_VERTICAL| wx.ALL, 5)
self.TopoSizer.Add(self.inputTxtTopo, 8, wx.ALIGN_CENTER_VERTICAL| wx.ALL, 5)
self.TopoSizer.Add(self.inputTopoBtn, 1, wx.ALIGN_CENTER_VERTICAL| wx.ALL, 5)
self.botSizer.Add(self.labelExport, 1, wx.ALL, 5)
self.botSizer.Add(self.exportTXT,6, wx.ALL, 5)
self.botSizer.Add(self.exportDataBtn,1, wx.ALL, 5)
self.botSizer.Add(self.cancelBtn, 1, wx.ALL, 5)
self.botSizer.Add(self.saveBtn, 1, wx.ALL, 5)
#btnSizer.Add(saveEditsBtn,0,wx.ALL,5)
self.bottomSizer.Add(self.GPSSizer,0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.bottomSizer.Add(self.TopoSizer,0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.bottomSizer.Add(self.botSizer,0, wx.ALIGN_RIGHT | wx.ALL, 5)
def addtoPanels(self):
self.topPanel.SetSizer(self.topSizer)
self.infoPanel.SetSizer(self.profileInfoSizer)
self.chartPanel.SetSizer(self.dataVizSizer)
self.bottomPanel.SetSizer(self.bottomSizer)
self.topPanel.Layout()
self.baseSizer.Add(self.topPanel,1, wx.EXPAND,1)
self.baseSizer.Add(self.infoPanel,1,wx.EXPAND,1)
self.baseSizer.Add(self.chartPanel, 10, wx.EXPAND | wx.ALL, 5)
self.baseSizer.Add(self.bottomPanel, 1, wx.EXPAND | wx.ALL, 1)
self.SetSizer(self.baseSizer)
self.SetSize(1100,950)
def variableInfo(): #To see what the 'global' variables are
pass
#self.electxDataIN: list of all electrode xdistances
#self.xCols: list with numbers of columns with x-values, from initial read-in table. varies with datatype
#self.xData: list with all x-values of data points
#self.zData: list with all z-values of data points (depth)
#self.values: list with all resist. values of data points
#self.inputDataExt: extension of file read in, selected from initial drop-down (default = .dat (LS))
#self.xDF : dataframe with only x-dist of electrodes, and all of them
#self.dataHeaders: headers from original file read in, used for column names for dataframeIN
#self.dataListIN: nested list that will be used to create dataframe, with all read-in data
#self.dataframeIN: initial dataframe from data that is read in
#self.df: dataframe formatted for editing, but remaining static as initial input data
#self.dataframeEDIT: dataframe that is manipulated during editing
#self.electrodes: sorted list of all electrode xdistances
#self.electrodesShifted: shifted, sorted list of all electrode xdistances
#self.electState:list of booleans giving status of electrode (True = in use, False = edited out)
#self.electrodeElevs: surface elevation values at each electrode
#self.dataLengthIN: number of measurements in file/length of dataframes
#self.dataframeEDITColHeaders
#self.dataShifted: indicates whether data has been shifted
setUpPanels(self)
titleSetup(self)
inputSetup(self)
dataVizSetup(self)
bottomAreaSetup(self)
chartSetup(self)
sizersSetup(self)
addtoSizers(self)
addtoPanels(self)
#wxli.InspectionTool().Show(self)
#Initial Plot
def nullFunction(self,event):
pass
def onBrowse(self,event):
with wx.FileDialog(self,"Open Data File", style= wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.dataPath = pathlib.Path(fileDialog.GetPath())
fName = str(self.dataPath.parent) + '\\' + self.dataPath.name
self.inputDataExt = self.dataPath.suffix
try:
with open(self.dataPath,'r') as datafile:
self.inputTxtOne.SetValue(fName)
except IOError:
wx.LogError("Cannot Open File")
if self.inputDataExt.lower() == '.txt':
self.inputDataExt = '.TXT (LS)'
n = 1
elif self.inputDataExt.lower() == '.dat':
if self.dataPath.stem.startswith('lr'):
self.inputDataExt = '.DAT (SAS)'
n = 2
else:
self.inputDataExt = '.DAT (LS)'
n = 0
elif self.inputDataExt.lower() == '.vtk':
self.inputDataExt = '.VTK'
n=3
elif self.inputDataExt.lower() == '.xyz':
self.inputDataExt = '.XYZ'
n=4
else:
wx.LogError("Cannot Open File")
if self.inputDataExt == '.DAT (LS)' or self.inputDataExt == '.TXT (LS)':
outPath = self.dataPath.stem.split('-')[0]
else:
outPath = self.dataPath.stem.split('.')[0]
if outPath.startswith('lr'):
outPath = outPath[2:]
outPath = outPath +'_pyEdit.dat'
if self.includeTopoBx.GetValue():
outPath = outPath[:-4]
outPath = outPath + "_topo.dat"
self.exportTXT.SetValue(str(self.dataPath.with_name(outPath)))
self.inputDataType.SetSelection(n)
self.readInFileBtn.SetLabelText('Read Data')
def onGPSBrowse(self,event):
with wx.FileDialog(self,"Open GPS File", style= wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.GPSpath = pathlib.Path(fileDialog.GetPath())
gpsFName = str(self.GPSpath.parent) + '\\' + self.GPSpath.name
self.inputTxtGPS.SetValue(gpsFName)
self.getGPSVals()
def getGPSVals(self):
with open(self.GPSpath) as GPSFile:
data = csv.reader(GPSFile)
self.gpsXData = []
self.gpsYData = []
self.gpsLabels = []
for row in enumerate(data):
if row[0] == 0:
pass #headerline
else:
r = re.split('\t+', str(row[1][0]))
if row[0] == '':
pass
else:
self.gpsLabels.append(r[2])
self.gpsXData.append(float(r[3]))
self.gpsYData.append(float(r[4]))
def onTopoBrowse(self,event):
with wx.FileDialog(self,"Open Topo File", style= wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.topoPath = pathlib.Path(fileDialog.GetPath())
topoFName = str(self.topoPath.parent) + '\\' + self.topoPath.name
self.inputTxtTopo.SetValue(topoFName)
self.includeTopoBx.SetValue(True)
self.getTopoVals()
self.topoText()
def onIncludeTopo(self,event):
self.topoText()
def topoText(self):
if self.includeTopoBx.GetValue() == True:
#print('topo' not in self.exportTXT.GetValue())
if 'topo' not in self.exportTXT.GetValue():
#print("It's Not in")
if len(self.exportTXT.GetValue())>0:
outPath = self.exportTXT.GetValue()
outPath = outPath[:int(len(outPath)-4)]
outPath = outPath + "_topo.dat"
self.exportTXT.SetValue(outPath)
elif self.includeTopoBx.GetValue() == False:
if '_topo' in self.exportTXT.GetValue():
outPath = self.exportTXT.GetValue()
#print(outPath)
strInd = int(outPath.find("_topo"))
strInd2 = strInd + 5
outPath = outPath[:strInd]+outPath[strInd2:]
self.exportTXT.SetValue(outPath)
def onReverse(self,event):
self.reverseText()
def reverseText(self):
if self.reverseBx.GetValue() == True:
if '_rev' not in self.exportTXT.GetValue():
if len(self.exportTXT.GetValue())>0:
outPath = self.exportTXT.GetValue()
outPath = outPath[:int(len(outPath)-4)]
outPath = outPath + "_rev.dat"
self.exportTXT.SetValue(outPath)
elif self.reverseBx.GetValue() == False:
if '_rev' in self.exportTXT.GetValue():
outPath = self.exportTXT.GetValue()
#print(outPath)
strInd = int(outPath.find("_rev"))
strInd2 = strInd + 4
outPath = outPath[:strInd]+outPath[strInd2:]
self.exportTXT.SetValue(outPath)
def getTopoVals(self):
with open(self.topoPath) as topoFile:
data = csv.reader(topoFile)
topoXData = []
topoYData = []
topoLabels = []
for row in enumerate(data):
if row[0] == 0:
pass
else:
r = re.split('\t+', str(row[1][0]))
if r[0] == '':
pass
else:
topoLabels.append(r[0])
topoXData.append(float(r[1]))
topoYData.append(float(r[2]))
self.topoDF = pd.DataFrame([topoXData, topoYData]).transpose()
self.topoDF.columns = ["xDist", "Elev"]
def onDataType(self,event):
self.inputDataExt = self.inputDataType.GetString(self.inputDataType.GetSelection())
if self.inputDataExt == '.DAT (LS)':
self.headerlines = 8
elif self.inputDataExt == '.DAT (SAS)':
self.headerlines = 5
elif self.inputDataExt == '.VTK':
self.headerlines = 5 #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
elif self.inputDataExt == '.XYZ':
self.header = 5 #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
elif self.inputDataExt =='':
self.headerlines = 8
else:
if len(self.inputTxtOne.GetValue()) > 0:
try:
with open(self.dataPath, 'r') as datafile:
filereader = csv.reader(datafile)
start = 0
for row in enumerate(filereader):
if start == 0:
if 'N\\tTime' in str(row[1]):
start = 1
self.headerlines = row[0]
else:
continue
else:
continue
except:
self.headerlines = -1
wx.LogError('Data File not selected')
else:
self.headerlines = -1
def onReadIn(self, event):
self.onDataType(self) #initialize number of headerlines to use
self.dataHeader = []
filepath = pathlib.Path(self.inputTxtOne.GetValue())
self.ext = str(filepath.suffix)
filename = str(filepath.stem)
self.dataframeEDITColHeaders = ['MeasID','A(x)','A(z)','B(x)','B(z)','M(x)','M(z)','N(x)','N(z)', 'aVal', 'nFac','PseudoX','PseudoZ','Resistance','AppResist','Cycles','Variance','DataLevel','DtLvlMean','PctErr','Keep']
if self.ext.lower() == '.dat':
###############Need to update to fit .txt data format
dataLst = []
self.dataLead = []
self.dataTail = []
with open(filepath) as dataFile:
data = csv.reader(dataFile)
if self.inputDataExt == '.DAT (SAS)':
self.dataHeaders = ['M(x)','aVal','nFac','AppResist']
i = 0
dataList=[]
for row in enumerate(data):
if row[0]>self.headerlines: #Read in actual data
if row[0] > self.headerlines + datalength: #Read in data tail
self.dataTail.append(row[1])
else:
#It sometimes reads the lines differently. Sometimes as a list (as it should) other times as a long string
if len(row[1]) < 4:
#Entire row is read as string
dataList.append(re.split(' +', row[1][0]))
else:
#Row is read correctly as separate columns
dataList.append(row[1])
i+=1
else:
if row[0] == 3: #Read in data length
datalength = float(row[1][0])
self.dataLead.append(row[1])#Create data lead variable for later use
datalengthIN = i
self.fileHeaderDict = {}
self.dataListIN = dataList #Formatted global nested list is created of data read in
project = self.dataLead[0][0]
array = self.dataLead[2][0]
if float(array) == 3:
array = "Dipole-Dipole"
msrmtType = 'Apparent Resistivity'
self.fileHeaderDict['Filename'] = filename
self.fileHeaderDict['Project'] = project
self.fileHeaderDict['minElectSpcng'] = round(float(self.dataLead[1][0]),2)
self.fileHeaderDict['Array'] = array
self.fileHeaderDict["Type of Measurement"] = msrmtType
self.fileHeaderDict['DataPts'] = self.dataLead[3][0]
self.dataframeIN = pd.DataFrame(self.dataListIN)
#Sometimes the data is read in with an extra column at the beginning. This fixes that.
if len(self.dataframeIN.columns) > 4:
del self.dataframeIN[0]
self.dataframeIN.reindex([0, 1, 2, 3], axis='columns')
self.dataframeIN = self.dataframeIN.astype(float)
self.dataframeIN.columns = self.dataHeaders
self.dataframeCols = [-2, -3, -4, -5, -6, 0, -7, -8, -9, 1, 2, -10, -11, -12, 3, -1, -1, -13, -14, -15,-16] # neg val ind. colums that need to be calculated
self.dataframeEDIT = pd.DataFrame()
dataframelength = len(self.dataframeIN.index)
nullList = []
keepList = []
zeroList = []
for i in range(0, dataframelength):
nullList.append(-1)
zeroList.append(0.0)
keepList.append(True)
# Create dataframe that will be used in editing process (self.dataframeEDIT) one column at a time
for item in enumerate(self.dataframeEDITColHeaders):
if self.dataframeCols[
item[0]] > -1: # Columns from dataframeIN that are directly read to dataframeEDIT
self.dataframeEDIT[item[1]] = self.dataframeIN.iloc[:, self.dataframeCols[item[0]]]
self.dataframeEDIT[item[1]] = self.dataframeEDIT[item[1]].astype(float)
elif self.dataframeCols[item[0]] == -1: # Null list (can't calculate)
self.dataframeEDIT[item[1]] = nullList
elif self.dataframeCols[item[0]] == -2: # Measure ID
for i in range(0, dataframelength):
self.dataframeEDIT.loc[i, item[1]] = i
elif self.dataframeCols[item[0]] == -3: # A(x)
self.dataframeIN['A(x)'] = self.dataframeIN['M(x)'] + self.dataframeIN['aVal'] + (self.dataframeIN['aVal']*self.dataframeIN['nFac']) + self.dataframeIN['aVal']
self.dataframeEDIT['A(x)'] = self.dataframeIN['A(x)']
elif self.dataframeCols[item[0]] == -4: # A(z)
self.dataframeEDIT[item[1]] = zeroList
elif self.dataframeCols[item[0]] == -5: # B(x)
self.dataframeIN['B(x)'] = self.dataframeIN['M(x)'] + self.dataframeIN['aVal'] + (self.dataframeIN['aVal']*self.dataframeIN['nFac'])
self.dataframeEDIT['B(x)'] = self.dataframeIN['B(x)']
elif self.dataframeCols[item[0]] == -6: # B(z)
self.dataframeEDIT[item[1]] = zeroList
#elif self.dataframeCols[item[0]] == -6: # M(x)
#Reads in directly
elif self.dataframeCols[item[0]] == -7: # M(z)
self.dataframeEDIT[item[1]] = zeroList
elif self.dataframeCols[item[0]] == -8: #N(x)
self.dataframeIN['N(x)'] = self.dataframeIN['M(x)'] + self.dataframeIN['aVal']
self.dataframeEDIT['N(x)'] = self.dataframeIN['N(x)']
elif self.dataframeCols[item[0]] == -9: # N(z)
self.dataframeEDIT[item[1]] = zeroList
elif self.dataframeCols[item[0]] == -10: # PseudoX
self.dataframeEDIT['PseudoX'] = (((self.dataframeEDIT['A(x)'] + self.dataframeEDIT[
'B(x)']) / 2) + ((self.dataframeEDIT['M(x)'] + self.dataframeEDIT['N(x)']) / 2)) / 2
elif self.dataframeCols[item[0]] == -11: # PseudoZ
n = self.dataframeEDIT['nFac']
a = self.dataframeEDIT['aVal']
self.dataframeEDIT['PseudoZ'] = round((((n ** 2) * -0.0018) + 0.2752 * n + 0.1483) * a, 1)
elif self.dataframeCols[item[0]] == -12: #Resistance
PI = math.pi
n = self.dataframeEDIT['nFac']
a = self.dataframeEDIT['aVal']
appR = self.dataframeIN['AppResist']
if self.fileHeaderDict['Array'] == 'Dipole-Dipole':
self.dataframeEDIT['Resistance'] = appR/(PI * n * (n + 1) * (n + 2) * a)
else:
print(
'Array is not Dipole-Dipole, but Dipole-Dipole k-factor used to calculate App. Resistivity')
elif self.dataframeCols[item[0]] == -13: #DataLevel
self.dataframeEDIT['DataLevel'] = nullList
uniqueDepths = self.dataframeEDIT['PseudoZ'].unique()
uniqueDepths = list(set(uniqueDepths.flatten()))
self.dataLevels = len(uniqueDepths)
dataLength = len(self.dataframeEDIT['PseudoZ'])
for i in range(0, dataLength):
self.dataframeEDIT.loc[i, 'DataLevel'] = uniqueDepths.index(
self.dataframeEDIT.loc[i, 'PseudoZ'])
elif self.dataframeCols[item[0]] == -14: #DtLvlMean
for i in uniqueDepths:
df = self.dataframeEDIT[self.dataframeEDIT.iloc[:, 12] == i]
dtLvlMean = df['AppResist'].mean()
indexList = df.index.values.tolist()
for ind in indexList:
self.dataframeEDIT.loc[ind, 'DtLvlMean'] = dtLvlMean
elif self.dataframeCols[item[0]] == -15: #PctErr
self.dataframeEDIT['PctErr'] = (abs(
self.dataframeEDIT['DtLvlMean'] - self.dataframeEDIT['AppResist'])) / \
self.dataframeEDIT['DtLvlMean']
elif self.dataframeCols[item[0]] == -16: #Keep
self.dataframeEDIT[item[1]] = keepList
else:
self.dataframeEDIT[item[1]] = nullList
elif self.inputDataExt == '.DAT (LS)': # If it's .DAT (LS)
self.dataHeaders = ["NoElectrodes",'A(x)', 'A(z)', 'B(x)', 'B(z)', 'M(x)', 'M(z)', 'N(x)', 'N(z)', 'Resistance']
datalength=12
dataList = []
for row in enumerate(data):
if row[0]>int(self.headerlines) and row[0] <= float(self.headerlines + datalength):
strrow = str(row[1])
strrow = strrow[2:-2]
splitrow = strrow.split('\\t')
if len(splitrow) != 10:
newrow = []
for i in splitrow:
val = i.strip()
newrow.append(val)
if len(newrow) < 9:
newrow = re.split(' +',newrow[0])
row = [float(i) for i in newrow]
dataList.append(row)
else:
dataList.append(splitrow)
elif row[0] <= int(self.headerlines):
if isinstance(row[1], list):
val = str(row[1])[2:-2]
else:
val = row[1]
self.dataLead.append(val)
if row[0] == 6:
datalength = float(row[1][0])
else:
self.dataTail.append(row[1])
self.dataListIN = dataList
self.fileHeaderDict = {}
project = self.dataLead[0]
dataFrmt = self.dataLead[2]
array = int(self.dataLead[3])
if array == 3:
array = "Dipole-Dipole"
msrmtType = str(self.dataLead[5])
if msrmtType.strip() == '0':
msrmtType = "Apparent Resistivity"
else:
msrmtType = 'Resistance'
self.fileHeaderDict['Filename'] = filename
self.fileHeaderDict['Project'] = project
self.fileHeaderDict['minElectSpcng'] = str(round(float(self.dataLead[1]),2))
self.fileHeaderDict['Array'] = array
self.fileHeaderDict["Type of Measurement"] = msrmtType
self.fileHeaderDict['DataPts'] = str(self.dataLead[6])
self.fileHeaderDict['DistType'] = str(self.dataLead[7])
self.dataframeIN = pd.DataFrame(self.dataListIN)
self.dataframeIN.columns = self.dataHeaders
self.dataframeCols = [-2, 1, 2, 3, 4, 5, 6, 7, 8, -3, -4, -5, -6, 9, -7, -1, -1, -8, -9, -10, -11] # neg val ind. colums that need to be calculated
self.dataframeEDIT = pd.DataFrame()
dataframelength = len(self.dataframeIN.index)
nullList = []
keepList = []
for i in range(0, dataframelength):
nullList.append(-1)
keepList.append(True)
# Create dataframe that will be used in editing process (self.dataframeEDIT) one column at a time
for item in enumerate(self.dataframeEDITColHeaders):
if self.dataframeCols[item[0]] > -1: #Columns from dataframeIN that are directly read to dataframeEDIT
self.dataframeEDIT[item[1]] = self.dataframeIN.iloc[:, self.dataframeCols[item[0]]]
self.dataframeEDIT[item[1]] = self.dataframeEDIT[item[1]].astype(float)
elif self.dataframeCols[item[0]] == -1: #Null list (can't calculate)
self.dataframeEDIT[item[1]] = nullList
elif self.dataframeCols[item[0]] == -2:#Measure ID
for i in range(0,dataframelength):
self.dataframeEDIT.loc[i,item[1]] = i
elif self.dataframeCols[item[0]] == -3: #A spacing
self.dataframeEDIT[item[1]] = abs(self.dataframeEDIT['A(x)'] - self.dataframeEDIT['B(x)'])
elif self.dataframeCols[item[0]] == -4: #N-factor
self.dataframeEDIT[item[1]] = abs(self.dataframeEDIT['B(x)'] - self.dataframeEDIT['N(x)']) / self.dataframeEDIT['aVal']
elif self.dataframeCols[item[0]] == -5:#PseduoX
self.dataframeEDIT['PseudoX'] = (((self.dataframeEDIT['A(x)']+self.dataframeEDIT['B(x)'])/2)+((self.dataframeEDIT['M(x)']+self.dataframeEDIT['N(x)'])/2))/2
elif self.dataframeCols[item[0]] == -6: #PseduoZ
n = self.dataframeEDIT['nFac']
a = self.dataframeEDIT['aVal']
self.dataframeEDIT['PseudoZ'] = round((((n**2)*-0.0018)+0.2752*n+0.1483)*a,1)
elif self.dataframeCols[item[0]] == -7:#AppResistivity
PI = math.pi
n = self.dataframeEDIT['nFac']
a = self.dataframeEDIT['aVal']
R = self.dataframeEDIT['Resistance']
if self.fileHeaderDict['Array'] == 'Dipole-Dipole':
self.dataframeEDIT['AppResist'] = PI*n*(n+1)*(n+2)*a*R
else:
print('Array is not Dipole-Dipole, but Dipole-Dipole k-factor used to calculate App. Resistivity')
elif self.dataframeCols[item[0]] == -8: #DataLevel
self.dataframeEDIT['DataLevel'] = nullList
uniqueDepths = self.dataframeEDIT['PseudoZ'].unique()
uniqueDepths = list(set(uniqueDepths.flatten()))
self.dataLevels = len(uniqueDepths)
dataLength = len(self.dataframeEDIT['PseudoZ'])
for i in range(0, dataLength):
self.dataframeEDIT.loc[i, 'DataLevel'] = uniqueDepths.index(self.dataframeEDIT.loc[i, 'PseudoZ'])
elif self.dataframeCols[item[0]] == -9: # DtLvlMean
for i in uniqueDepths:
df = self.dataframeEDIT[self.dataframeEDIT.iloc[:, 12] == i]
dtLvlMean = df['AppResist'].mean()
indexList = df.index.values.tolist()
for ind in indexList:
self.dataframeEDIT.loc[ind, 'DtLvlMean'] = dtLvlMean
elif self.dataframeCols[item[0]] == -10: #PctErr
self.dataframeEDIT['PctErr'] = (abs(
self.dataframeEDIT['DtLvlMean'] - self.dataframeEDIT['AppResist'])) / \
self.dataframeEDIT['DtLvlMean']
elif self.dataframeCols[item[0]] == -11: #Keep
self.dataframeEDIT[item[1]] = keepList
else:
self.dataframeEDIT[item[1]] = nullList
self.readInFileBtn.SetLabelText("Reset Data")
elif self.ext.lower() == '.txt':
with open(filepath, 'r') as datafile:
filereader = csv.reader(datafile)
start = 0
end = 0
fileHeader = []
data = []
for row in enumerate(filereader):
if start == 0:
if row[0] <= 13:
fileHeader.append(row[1])
fileHeader[row[0]] = fileHeader[row[0]][:]
if 'N\\tTime' in str(row[1]):
start = 1
self.headerlines = row[0]
dataHdrTemp = str(row[1])
self.dataHeaders = dataHdrTemp[2:-2].split('\\t')
self.dataHeaders[1] = dataHdrTemp[1].strip()
self.fileHeaderDict = {}
for item in fileHeader:
if len(item) > 0:
self.fileHeaderDict[str(item[0]).split(":", 1)[0]] = str(item[0]).split(":", 1)[1].strip()
elif start == 1 and end == 0:
if len(row[1]) > 0:
data.append(str(row[1])[2:-1].split('\\t'))
else:
end = 1
else:
continue
self.dataListIN = data
self.dataframeIN = pd.DataFrame(self.dataListIN)
self.dataframeCols = [0, 6, 8, 9, 11, 12, 14, 15, 17, -2, -3, 18, 20, 26, 28, 29, 27, -4, -5, -6, -7] #neg val ind. colums that need to be calculated
self.dataframeEDIT = pd.DataFrame()
dataframelength = len(self.dataframeIN.index)
nullList = []
keepList = []
for i in range(0, dataframelength):
nullList.append(-1)
keepList.append(True)
# Create dataframe that will be used in editing process (self.dataframeEDIT) one column at a time
for item in enumerate(self.dataframeEDITColHeaders):
if self.dataframeCols[item[0]] > -1:
#print(item[1])
self.dataframeEDIT[item[1]] = self.dataframeIN.iloc[:, self.dataframeCols[item[0]]]
self.dataframeEDIT[item[1]] = self.dataframeEDIT[item[1]].astype(float)
elif self.dataframeCols[item[0]] == -2:
self.dataframeEDIT[item[1]] = abs(self.dataframeEDIT['A(x)'] - self.dataframeEDIT['B(x)'])
elif self.dataframeCols[item[0]] == -3:
self.dataframeEDIT[item[1]] = abs(self.dataframeEDIT['N(x)'] - self.dataframeEDIT['M(x)']) / self.dataframeEDIT['aVal']
elif self.dataframeCols[item[0]] == -4:
self.dataframeEDIT['DataLevel'] = nullList
uniqueDepths = self.dataframeEDIT['PseudoZ'].unique()
uniqueDepths = list(set(uniqueDepths.flatten()))
self.dataLevels = len(uniqueDepths)
dataLength = len(self.dataframeEDIT['PseudoZ'])
for i in range(0, dataLength):
self.dataframeEDIT.loc[i, 'DataLevel'] = uniqueDepths.index(self.dataframeEDIT.loc[i, 'PseudoZ'])
elif self.dataframeCols[item[0]] == -5:
for i in uniqueDepths:
df = self.dataframeEDIT[self.dataframeEDIT.iloc[:, 12] == i]
dtLvlMean = df['AppResist'].mean()
indexList = df.index.values.tolist()
for ind in indexList:
self.dataframeEDIT.loc[ind, 'DtLvlMean'] = dtLvlMean
elif self.dataframeCols[item[0]] == -6:
self.dataframeEDIT['PctErr'] = (abs(self.dataframeEDIT['DtLvlMean'] - self.dataframeEDIT['AppResist'])) / self.dataframeEDIT['DtLvlMean']
elif self.dataframeCols[item[0]] == -7:
self.dataframeEDIT[item[1]] = keepList
else:
self.dataframeEDIT[item[1]] = nullList
self.dataHeaders[1] = 'MeasTime'
if len(self.dataHeaders) > 37:
self.dataHeaders[37] = 'Extra'
self.dataTail = [0,0,0,0,0,0,0]
self.dataframeIN.columns = self.dataHeaders
self.readInFileBtn.SetLabelText("Reset Data")
self.fileHeaderDict['Filename'] = filename
self.fileHeaderDict['Project'] = self.fileHeaderDict['Project name']
self.fileHeaderDict['Array'] = self.fileHeaderDict['Protocol file'][21:-4]
self.fileHeaderDict['minElectSpcng'] = self.fileHeaderDict['Smallest electrode spacing']
self.fileHeaderDict['DataPts'] = len(self.dataframeIN)
self.dataLead = []
self.dataLead.append(self.fileHeaderDict['Project name'] + " " + self.fileHeaderDict['Filename'])
self.dataLead.append(self.fileHeaderDict['minElectSpcng'])
self.dataLead.append('11') #General Array format
self.dataLead.append(self.fileHeaderDict['Sub array code']) #tells what kind of array is used
self.dataLead.append('Type of measurement (0=app.resistivity,1=resistance)')
self.dataLead.append('0') #Col 26 in .txt (col 28 is app. resistivity)
self.dataLead.append(self.fileHeaderDict['DataPts'])
self.dataLead.append('2')
self.dataLead.append('0')
elif self.ext.lower() == '.vtk':
with open(filepath, 'r') as datafile:
filereader = csv.reader(datafile)
startLocs = 0
startData = 0
startLocInd = 'POINTS'
startDataInd = 'LOOKUP_TABLE'
endLocs = 0
endData = 0
endLocInd = []
endDataInd = []
fileLead = []
fileMid = []
fileTail = []
vtkdata = []
vtklocs = []
newrow = []
xLocPts = []
yLocPts = []
zLocPts = []
vPts = []
for row in enumerate(filereader):
if startLocs == 0:
fileLead.append(row[1])
fileLead[row[0]] = fileLead[row[0]][:]
if startLocInd in str(row[1]):
startLocs = 1
elif startLocs == 1 and endLocs == 0:
if endLocInd == row[1]:
endLocs = 1
else:
newrow = re.split(' +', str(row[1][0]))
newrow = newrow[1:]
vtklocs.append(newrow)
elif startData == 0:
fileMid.append(row[1])
if startDataInd in str(row[1]):
startData = 1
elif startData == 1 and endData == 0:
if row[1] == endDataInd:
endData == 1
else:
newrow = re.split(' +', str(row[1][0]))
newrow = newrow[1:]
vtkdata.append(newrow)
else:
fileTail.append(row[1])
fileTail[row[0]] = fileTail[row[0]][:]
xPtCols = [0,3,6,9]
yPtCols = [1,4,7,10]
zPtCols = [2,5,8,11]
for r in vtklocs:
Xs = 0.0
for x in xPtCols:
Xs = Xs + float(r[x])
xLocPts.append(Xs/4.0)
Ys = 0.0
for y in yPtCols:
Ys = Ys + float(r[y])
yLocPts.append(Ys/4.0)
Zs = 0.0
for z in zPtCols:
Zs = Zs + float(r[z])
zLocPts.append(Zs/4)
for d in vtkdata:
for i in d:
vPts.append(i)
self.dataframeIN = pd.DataFrame([xLocPts, yLocPts, zLocPts, vPts]).transpose()
self.dataframeIN.columns = ['X','Y','Z','Resistivity']
print(self.dataframeIN)
#Format vtk file
self.fileHeaderDict = {}
self.fileHeaderDict['Filename'] = filename
self.fileHeaderDict['Project'] = 'NA'
self.fileHeaderDict['Array'] = 'NA'
self.fileHeaderDict['minElectSpcng'] = str(round(self.dataframeIN.loc[1,'X'] - self.dataframeIN.loc[0,'X'],1))
self.fileHeaderDict['DataPts'] = len(self.dataframeIN)
elif self.ext.lower() == '.xyz':#!!!!!!!!!!!!!!!!
with open(filepath, 'r') as datafile:
filereader = csv.reader(datafile)
start = 0
startIndicator = 'Elevation'
end = 0
endIndicator = '/'
fileHeader = []
data = []
for row in enumerate(filereader):
if start == 0:
fileHeader.append(row[1])
fileHeader[row[0]] = fileHeader[row[0]][:]
if startIndicator in str(row[1]):
start = 1
elif start == 1 and end == 0:
if endIndicator in str(row[1]):
end = 1
else:
data.append(str(row[1])[2:-1].split('\\t'))
else:
continue
######format xyz input
else:
self.datVizMsg2.SetLabelText("Filepath Error. Must be .DAT, .TXT, .VTK, or .XYZ file")
self.dataLengthIN = len(self.dataframeIN.iloc[:,0])
self.read = 0
self.generateXY()
self.generateProfileInfo()
self.graphChart()
self.read = 1
def generateXY(self):
self.xCols = []
aVals = []
nFacs = []
yCols = []
valCols = []
self.xData = []
self.yData = []
self.zData = []
self.values = []
if self.inputDataExt == '.DAT (SAS)' or self.inputDataExt == '.DAT (LS)' or self.inputDataExt == '.TXT (LS)':
self.xCols = [11]
self.electrodeCols = [1,3,5,7]
aVals = 9
nFacs = 10
zCols = 12
valCols = 14 #13 is resistance; 14 is app. resistivity
if self.autoShiftBx.GetValue():
startPt = []
for c in self.electrodeCols:
startPt.append(float(self.dataframeEDIT.iloc[:,c].min()))
startPt = min(startPt)
if startPt != 0:
self.dataShifted = True
for c in self.electrodeCols:
for i in enumerate(self.dataframeEDIT.iloc[:,c]):
self.dataframeEDIT.iloc[i[0],c] = float(i[1]) - float(startPt)
if self.inputDataExt == '.DAT (LS)' or self.inputDataExt == '.TXT (LS)':
outPath = self.dataPath.stem.split('-')[0]
elif self.inputDataExt == '.DAT (SAS)':
outPath = self.dataPath.stem.split('.')[0]
if outPath.startswith('lr'):
outPath = outPath[2:]
outPath = outPath + '_shift_pyEdit.dat'
self.exportTXT.SetValue(str(self.dataPath.with_name(outPath)))
else:
self.dataShifted = False
if self.includeTopoBx.GetValue():
outPath = self.exportTXT.GetValue()[:-4]
outPath = outPath + "_topo.dat"
self.exportTXT.SetValue(outPath)
#Get all electrode xDistances
self.electxDataIN = []
for c in self.electrodeCols:
for row in self.dataframeEDIT.iloc[:,c]:
self.electxDataIN.append(round(float(row),0))
xDataIN = self.dataframeEDIT.iloc[:,self.xCols[0]].to_list()
for item in xDataIN:
self.xData.append(float(item))
zDataIN = self.dataframeEDIT.iloc[:,zCols].to_list()
for item in zDataIN:
self.zData.append(float(item))
valIN = self.dataframeEDIT.iloc[:,valCols].to_list()
for item in valIN:
self.values.append(float(item))
xDistCols = ['B(x)', 'A(x)', 'N(x)', 'M(x)']
xDF = pd.DataFrame(self.dataframeIN.loc[:,xDistCols[:]])
xDF.columns = xDistCols
xDF = xDF.astype(float)
self.xDF = pd.DataFrame()
self.xDF['A(x)'] = xDF['A(x)']
self.xDF['B(x)'] = xDF['B(x)']
self.xDF['M(x)'] = xDF['M(x)']
self.xDF['N(x)'] = xDF['N(x)']
xList = []
for item in xDistCols:
xDistList = self.dataframeIN.loc[:,item].to_list()
for item in xDistList:
xList.append(float(item))
#print(self.dataframeIN)
minvals = self.xDF.min()
self.minXDist = minvals.min()
maxvals = self.xDF.max()
self.maxXDist = maxvals.max()
#self.minXDist = min(self.xData)
#self.maxXDist = max(self.xData)
self.minDepth = min(self.zData)
self.maxDepth = max(self.zData)
self.maxResist = max(self.values)
elif self.inputDataExt == '.VTK':
self.dataframeIN = self.dataframeIN.astype(float)
for i in range(0,len(self.dataframeIN)):
self.xData.append(self.dataframeIN.loc[i,'X'])
self.yData.append(self.dataframeIN.loc[i,'Y'])
self.zData.append(self.dataframeIN.loc[i,'Z'])
self.values.append(self.dataframeIN.loc[i,"Resistivity"])
self.minXDist = min(self.xData)
self.maxXDist = max(self.xData)
self.minDepth = min(self.zData)
self.maxDepth = max(self.zData)
self.maxResist = max(self.values)
elif self.inputDataExt == '.XYZ':
pass
else:
pass
if self.zData[0] < 0:
for i in enumerate(self.zData):
self.zData[i[0]] = self.zData[i[0]]*-1
self.maxDepth = max(self.zData)
self.minResist = min(self.values)
self.maxResist = max(self.values)
self.fileHeaderDict['DataPts'] = len(self.dataframeIN)
dt = []
dt.append(self.xData)
dt.append(self.zData)
dt.append(self.values)
cols = ['xDist', 'Depth', 'Value']
df = pd.DataFrame(dt)
df = df.transpose()
df.columns = cols
if self.inputDataExt =='.XYZ' or self.inputDataExt == '.VTK':
for i in range(0,len(self.dataframeIN)):
self.df = df.copy()
self.df.loc[i,"DtLvlMean"] = 0.0
self.df.loc[i,'PctErr'] = 0.0
self.df.loc[i,'MeasID'] = i
self.electxDataIN = self.xData
self.electxDataIN = [float(i) for i in self.electxDataIN]
self.electxDataIN = sorted(set(self.electxDataIN))
else:
pass
xDataINList = []
self.electrodes = []
for i in self.electxDataIN:
xDataINList.append(round(i,0))
self.electrodes = sorted(xDataINList)
self.electState = []
for i in self.electrodes:
self.electState.append(bool(i*0+1))
print(self.electrodes)
self.electrodesShifted = []
if self.dataShifted:
for e in self.electrodes:
self.electrodesShifted.append(e-startPt)
self.dataEditMsg.SetLabelText(str(len(self.dataframeEDIT)) + ' data pts')
def generateProfileInfo(self):
self.msgProfileName.SetLabelText(str(self.fileHeaderDict['Filename']))
self.msgProfileRange.SetLabelText(str(round(self.minXDist,0)) + " - " + str(round(self.maxXDist,0)))
self.msgDataPts.SetLabelText(str(self.fileHeaderDict['DataPts']))
self.msgArray.SetLabelText(str(self.fileHeaderDict['Array']))
self.msgProjectName.SetLabelText(str(self.fileHeaderDict['Project']))
self.msgMinElectSpcng.SetLabelText(str(self.fileHeaderDict['minElectSpcng']))
self.electrodeToggleBtn.SetValue(True)
self.electrodeToggleBtn.SetBackgroundColour((0, 255, 0))
self.sliderVal = self.editSlider.GetValue()
self.dataVizMsg2.SetLabelText('Electrode at ' + str(self.sliderVal) + ' m')
def graphChartEvent(self, event):
self.graphChart()
def graphChart(self):
self.editSlider.Show()
if self.currentChart != 'Graph':
self.editSlider.SetValue(0)
self.currentChart = 'Graph'
self.dataVizMsg1.SetLabelText('Graphical Editing Interface')
self.saveEditsBtn.Hide()
self.dataVizInput.Show()
self.dataVizInputBtn.Show()
self.electrodeToggleBtn.Show()
x = []
z = []
v = []
pe = []
n1 = []
n2 = []
KeepList = self.dataframeEDIT['Keep'].to_list()
peList = self.dataframeEDIT['PctErr'].to_list()
for i in enumerate(KeepList):
if i[1]:
x.append(self.dataframeEDIT.loc[i[0],'PseudoX'])
z.append(self.dataframeEDIT.loc[i[0],'PseudoZ'])
v.append(self.dataframeEDIT.loc[i[0],'AppResist'])
pe.append(peList[i[0]])
self.axes.clear()
if 'scipy.interpolate' in sys.modules:
self.makeColormesh(x,z,v, pe,n1,n2)
else:
ptSize = round(100/self.maxXDist*125,1)
self.axes.scatter(x,z, c=v,edgecolors='black',s=ptSize, marker='h')
def makeColormesh(self,x,z,v, pe,xOmit,zOmit):
for i in enumerate(v):
v[i[0]] = abs(float(i[1]))
xi, zi = np.linspace(min(x), max(x), 300), np.linspace(min(z), max(z), 300)
xi, zi = np.meshgrid(xi, zi)
vi = scipy.interpolate.griddata((x, z), v, (xi, zi), method='linear')
ptSize = round(100 / self.maxXDist * 35, 1)
self.figure.clear()
self.axes = self.figure.add_subplot(111)
cmap = pL.cm.binary
my_cmap = cmap(np.arange(cmap.N))
my_cmap[:,-1] = np.linspace(0,1,cmap.N)
#my_cmap = cmap(np.arange(pe))
#my_cmap[:,-1] = np.linspace(0,1,pe)
my_cmap = matplotlib.colors.ListedColormap(my_cmap)
vmax = np.percentile(v, 98)
vmin = np.percentile(v, 2)
minx = min(x)
maxx = max(x)
minz = min(z)
maxz = max(z)
norm = matplotlib.colors.LogNorm(vmin = vmin, vmax = vmax)
#im = self.axes.imshow(vi, vmin=vmin, vmax=vmax, origin='lower',
im = self.axes.imshow(vi, origin='lower',
extent=[minx, maxx, minz, maxz],
aspect='auto',
cmap='nipy_spectral',
norm = norm,
interpolation='bilinear')
self.figure.colorbar(im, orientation='horizontal')
if self.currentChart == 'Graph':
self.axes.scatter(x, z, c=pe, edgecolors=None, s=ptSize, marker='o', cmap=my_cmap)
if abs(self.minDepth) < 10 :
self.axes.set_ylim(self.maxDepth * 1.15, 0)
else:
depthrange = abs(self.maxDepth-self.minDepth)
self.axes.set_ylim(self.minDepth-(depthrange*0.05), self.maxDepth + (depthrange*0.05))
self.axes.set_xlabel('X-Distance (m)')
self.axes.set_ylabel('Depth (m)')
self.axes.xaxis.tick_top()
self.editSlider.SetMax(int(self.maxXDist))
self.editSlider.SetMin(int(self.minXDist))
self.editSlider.SetTickFreq(5)
self.canvas.draw()
elif self.currentChart == 'Review':
self.axes.scatter(xOmit, zOmit, c='black', s=ptSize/1.5, marker='x')
if abs(self.minDepth) < 10 :
self.axes.set_ylim(self.maxDepth * 1.15, 0)
else:
depthrange = abs(self.maxDepth - self.minDepth)
self.axes.set_ylim(self.minDepth - (depthrange * 0.05), self.maxDepth + (depthrange * 0.05))
self.axes.set_xlabel('X-Distance (m)')
self.axes.set_ylabel('Elev/Depth (m)')
self.axes.xaxis.tick_top()
self.canvas.draw()
#self.axes.scatter(x, z, c=pe, edgecolors='none', s=ptSize, marker='h', alpha=0.5, cmap='binary')
def statChartEvent(self,event):
self.statChart()
def statChart(self):
self.dataVizMsg1.SetLabelText('Statistical Editing Interface')
self.dataVizMsg2.SetLabelText('Move slider to % err upper limit')
self.currentChart = 'Stat'
self.saveEditsBtn.Show()
self.dataVizInput.Show()
self.editSlider.Show()
self.dataVizInputBtn.Show()
self.electrodeToggleBtn.Hide()
peIndex = int(self.dataframeEDIT.columns.get_loc('PctErr'))
KeepList = self.dataframeEDIT.loc[:,'Keep'].to_list()
peList = self.dataframeEDIT.iloc[:,peIndex].to_list()
pctErr = []
for i in enumerate(KeepList):
if i[1]:
pctErr.append(float(peList[i[0]]) * 100)
self.editSlider.SetMin(0)
self.editSlider.SetMax(int(max(pctErr)))
self.editSlider.SetValue(int(max(pctErr)))
self.editSlider.SetTickFreq(1)
self.figure.clear()
self.axes = self.figure.add_subplot(111)
self.axes.hist(pctErr, bins=30)
self.axes.set_xlim(0, max(pctErr)*1.1)
self.axes.xaxis.tick_bottom()
self.canvas.draw()
def GPSChartEvent(self,event):
self.GPSChart()
def GPSChart(self):
self.editSlider.Hide()
self.electrodeToggleBtn.Hide()
self.dataVizInput.Hide()
self.dataVizInputBtn.Hide()
self.saveEditsBtn.Hide()
self.currentChart = 'GPS'
self.dataVizMsg1.SetLabelText('GPS Data Viewer')
if len(self.GPSpath.stem) < 1:
self.GPSpath = ''
self.dataVizMsg2.SetLabelText(str(self.GPSpath.stem))
self.getGPSVals()
self.figure.clear()
self.axes = self.figure.add_subplot(111)
xRange = max(self.gpsXData) - min(self.gpsXData)
yRange = max(self.gpsYData) - min(self.gpsYData)
if xRange!=0:
slope = abs(yRange/xRange)
else:
slope = 1000
if slope < 1:
if slope < 0.2:
xFact = 0.2
yFact = 5
elif slope < 0.6:
xFact = 0.2
yFact = 3
else:
xFact = 0.2
yFact = 1
else:
if slope > 4:
xFact = 5
yFact = 0.2
elif slope > 2:
xFact = 3
yFact = 0.2
else:
xFact = 1
yFact = 0.2
lowXlim = min(self.gpsXData) - xFact*xRange
upXlim = max(self.gpsXData) + xFact*xRange
lowYlim = min(self.gpsYData) - yFact*yRange
upYlim = max(self.gpsYData) + yFact*yRange
tick_spacing = 100
self.axes.scatter(self.gpsXData,self.gpsYData, s=20, marker='h')
self.axes.plot(self.gpsXData, self.gpsYData)
self.axes.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(tick_spacing))
self.axes.yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(tick_spacing))
self.axes.ticklabel_format(axis='both',style='plain')
self.axes.grid(which='major', axis='both',color=(0.8,0.8,0.8))
self.axes.set_xlim(lowXlim,upXlim)
self.axes.set_ylim(lowYlim,upYlim)
self.axes.set_xlabel('UTM Easting')
self.axes.set_ylabel('UTM Northing')
self.axes.xaxis.tick_bottom()
self.canvas.draw()
def topoChartEvent(self,event):
self.topoChart()
def topoChart(self):
self.editSlider.Hide()
self.electrodeToggleBtn.Hide()
self.dataVizInput.Hide()
self.dataVizInputBtn.Hide()
self.saveEditsBtn.Hide()
self.currentChart = 'Topo'
self.dataVizMsg1.SetLabelText('Topo Data Viewer')
self.dataVizMsg2.SetLabelText(str(self.topoPath.stem))
self.getTopoVals()
self.figure.clear()
self.axes = self.figure.add_subplot(111)
#tick_spacing = 100
self.axes.scatter(self.topoDF['xDist'],self.topoDF['Elev'], s=5, marker='h')
self.axes.plot(self.topoDF['xDist'],self.topoDF['Elev'])
self.axes.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(100))
#self.axes.yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(tick_spacing))
self.axes.ticklabel_format(axis='both',style='plain')
self.axes.grid(which='major', axis='both',color=(0.8,0.8,0.8))
self.axes.set_xlim(0-max(self.topoDF['xDist'])*.2,max(self.topoDF['xDist'])*1.2)
self.axes.set_ylim(min(self.topoDF['Elev'])*0.8,max(self.topoDF['Elev'])*1.2)
self.axes.set_xlabel('X-Distance Along Profile (m)')
self.axes.set_ylabel('Elevation Above MSL (m)')
self.axes.xaxis.tick_bottom()
self.canvas.draw()
def onSliderEditEVENT(self,event):
self.onSliderEdit()
def onSliderEdit(self):
self.sliderVal = float(self.editSlider.GetValue())
if self.currentChart == 'Graph':
if self.sliderVal in self.electrodes:
self.electrodeToggleBtn.Show()
toggleState = self.electState[int(self.electrodes.index(self.sliderVal))]
self.electrodeToggleBtn.SetValue(toggleState)
if toggleState == True:
self.dataVizMsg2.SetLabelText("Electrode at " + str(self.sliderVal) + " m is in use")
self.electrodeToggleBtn.SetLabelText('On')
self.electrodeToggleBtn.SetBackgroundColour((100, 255, 100))
else:
self.electrodeToggleBtn.SetLabelText('Off')
self.electrodeToggleBtn.SetBackgroundColour((255, 100, 100))
self.dataVizMsg2.SetLabelText("Electrode at " + str(self.sliderVal) + " m is not in use")
else:
self.dataVizMsg2.SetLabelText('No Electrode at this x-location')
self.electrodeToggleBtn.Hide()
elif self.currentChart == 'Stat':
currData = 0
for i in self.dataframeEDIT["Keep"]:
if i:
currData = currData + 1
peIndex = self.dataframeEDIT.columns.get_loc('PctErr')
dataCut = 0
for r in enumerate(self.dataframeEDIT.iloc[:, peIndex]):
if float(r[1]) >= float(self.sliderVal) / 100.0:
dataCut += 1
self.dataVizMsg2.SetLabelText(str(self.sliderVal)+'% Err: '+str(dataCut) + ' points will be deleted ('+
str(round(dataCut/currData*100,1))+'% of the current data).')
else:
self.dataVizMsg2.SetLabelText('Value: ' + str(self.sliderVal))
def onEditTypeToggle(self, event):
self.editTypeToggleState = self.editTypeToggleBtn.GetValue()
if self.editTypeToggleState == True:
self.editTypeToggleBtn.SetLabelText('Keep')
elif self.editTypeToggleState == False:
self.editTypeToggleBtn.SetLabelText('Remove')
def onSelectEditDataType(self,event):
choiceListInd = self.editDataType.GetSelection()
colNumBase = [14, 13, [1, 3, 5, 7], 16, 19, 11, 12]
self.setEditToggleState = self.editDataChoiceBool[choiceListInd]
self.setEditToggleBtn.SetValue(self.setEditToggleState)
if float(self.editDataValues[choiceListInd][0]) == 0 and float(self.editDataValues[choiceListInd][1]) == 0:
#Set min value in box
if type(colNumBase[choiceListInd]) is list:
minVal = []
for i in colNumBase[choiceListInd]:
minVal.append(self.dataframeEDIT[self.dataframeEDITColHeaders[i]].min())
minVal = min(minVal)
else:
minVal = self.dataframeEDIT[self.editDataChoiceList[choiceListInd]].min()
self.inputTxtMinRem.SetValue(str(minVal))
# Set max value in box
if type(colNumBase[choiceListInd]) is list:
maxVal = []
for i in colNumBase[choiceListInd]:
maxVal.append(self.dataframeEDIT[self.dataframeEDITColHeaders[i]].max())
maxVal = max(maxVal)
else:
maxVal = self.dataframeEDIT[self.editDataChoiceList[choiceListInd]].max()
self.inputTxtMaxRem.SetValue(str(maxVal))
else:
self.inputTxtMinRem.SetValue(str(self.editDataValues[choiceListInd][0]))
self.inputTxtMaxRem.SetValue(str(self.editDataValues[choiceListInd][1]))
if self.setEditToggleState:
self.setEditToggleBtn.SetLabelText('Used')
else:
self.setEditToggleBtn.SetLabelText('Not Used')
def onSetEditToggle(self,event):
self.setEditToggleState = self.setEditToggleBtn.GetValue()
choiceListInd = self.editDataType.GetSelection()
if self.setEditToggleState == True:
if self.editDataType.GetSelection() > -1:
self.editDataChoiceBool[choiceListInd] = True
self.setEditToggleBtn.SetLabelText('Used')
else:
self.setEditToggleState = False
self.setEditToggleBtn.SetValue(False)
elif self.setEditToggleState == False:
if self.editDataType.GetSelection() > -1:
self.editDataChoiceBool[choiceListInd] = False
self.setEditToggleBtn.SetLabelText('Not Used')
self.setEditDataValues()
def onEditDataValueChangeEvent(self,event):
self.setEditDataValues()
def setEditDataValues(self):
choiceListInd = self.editDataType.GetSelection()
colNumBase = [14, 13, [1, 3, 5, 7], 16, 19, 11, 12]
#Set Min Value Box
if self.inputTxtMinRem.GetValue().isnumeric():
self.editDataValues[choiceListInd][0] = float(self.inputTxtMinRem.GetValue())
elif self.inputTxtMinRem.GetValue().lower() == 'min':
if type(colNumBase[choiceListInd]) is list:
minVal = []
for i in colNumBase[choiceListInd]:
minVal.append(self.dataframeEDIT[self.dataframeEDITColHeaders[i]].min())
minVal = min(minVal)
else:
minVal = self.dataframeEDIT[self.editDataChoiceList[choiceListInd]].min()
self.inputTxtMinRem.SetValue(str(minVal))
self.editDataValues[choiceListInd][0] = float(minVal)
else:
pass
# self.editDataChoiceList = ['AppResist', 'Resistance', 'Electrode x-Dists', 'Variance', 'PctErr', 'PseudoX','PseudoZ']
#Set Max Value Box
if self.inputTxtMaxRem.GetValue().isnumeric():
self.editDataValues[choiceListInd][1] = float(self.inputTxtMaxRem.GetValue())
elif self.inputTxtMaxRem.GetValue().lower() == 'max':
if type(colNumBase[choiceListInd]) is list:
maxVal = []
for i in colNumBase[choiceListInd]:
maxVal.append(self.dataframeEDIT[self.dataframeEDITColHeaders[i]].max())
maxVal = max(maxVal)
else:
maxVal = self.dataframeEDIT[self.editDataChoiceList[choiceListInd]].max()
self.inputTxtMaxRem.SetValue(str(maxVal))
self.editDataValues[choiceListInd][1] = float(maxVal)
else:
pass
def onLogicToggle(self, event):
self.editLogicToggleState = self.editLogicToggleBtn.GetValue()
if self.editLogicToggleState == True:
self.editLogicToggleBtn.SetLabelText('AND')
elif self.editLogicToggleState == False:
self.editLogicToggleBtn.SetLabelText('OR')
def onRemovePts(self,event):
#self.editDataChoiceList = ['AppResist', 'Resistance', 'Electrode x-Dists', 'Variance', 'PctErr', 'PseudoX','PseudoZ']
self.setEditDataValues()
colNumBase = [14,13,[1,3,5,7],16,19,11,12]
colNums = []
for i in enumerate(colNumBase):
if self.editDataChoiceBool[i[0]]:
colNums.append(i[1])
colNames = self.dataframeEDIT.columns
if len(colNums) < 1:
pass
else:
if self.editLogicToggleBtn.GetLabelText() == 'AND': #AND
# Create list to hold items if they are to be acted on; starts all true, any false value makes false
editList = []
for k in range(0, self.dataLengthIN):
editList.append(1)
index = -1
for dTypeInUse in enumerate(self.editDataChoiceBool):
if dTypeInUse[1]:
for c in colNums:
if type(c) is list:
row = -1
for r in range(0, self.dataLengthIN):
row = row + 1
listBoolCt = 0
for item in c:
if self.dataframeEDIT.iloc[r,c] >= float(self.editDataValues[dTypeInUse[0]][0]) and self.dataframeEDIT.iloc[r,c] <= float(self.editDataValues[dTypeInUse[0]][1]):
listBoolCt = listBoolCt + 1
if listBoolCt == 0:
editList[row] = 0
else: #if the columns are not a list of columns
#Iterate through each row in col c to see if it is in range
for r in self.dataframeEDIT[colNames[c]]:
index = index + 1
if r < float(self.editDataValues[i2][0]) and r > float(self.editDataValues[i2][1]):
editList[index] = 0
elif self.editLogicToggleBtn.GetLabelText() == 'OR': #OR
for dTypeInUse in enumerate(self.editDataChoiceBool):
if dTypeInUse[1]:
for c in colNums:
if type(c) is list:
#Create editList if multiple columns involved
editList = []
for k in range(0, self.dataLengthIN):
editList.append(0)
for item in c:
row = -1
for r in self.dataframeEDIT[colNames[item]]:
if r >= float(self.editDataValues[dTypeInUse[0]][0]) and r <= float(self.editDataValues[dTypeInUse[0]][1]):
if self.editTypeToggleBtn.GetLabelText() == 'Remove':
self.dataframeEDIT.loc[row, 'Keep'] = False
elif self.editTypeToggleBtn.GetLabelText() == 'Keep':
self.dataframeEDIT.loc[row, 'Keep'] = True
else:
pass
else:
if self.editTypeToggleBtn.GetLabelText() == 'Keep':
self.dataframeEDIT.loc[row, 'Keep'] = False
else:
row = -1
for r in self.dataframeEDIT[colNames[c]]:
row = row + 1
if r >= float(self.editDataValues[dTypeInUse[0]][0]) and r <= float(self.editDataValues[dTypeInUse[0]][1]):
if self.editTypeToggleBtn.GetLabelText() == 'Remove':
self.dataframeEDIT.loc[row, 'Keep'] = False
elif self.editTypeToggleBtn.GetLabelText() == 'Keep':
self.dataframeEDIT.loc[row, 'Keep'] = True
else:
pass
else:
if self.editTypeToggleBtn.GetLabelText() == 'Keep':
self.dataframeEDIT.loc[row, 'Keep'] = False
else:
pass
self.graphChart()
def ONtoggle(self,event):
self.ToggleState = self.electrodeToggleBtn.GetValue()
self.sliderVal = self.editSlider.GetValue()
if self.ToggleState == True:
self.dataVizMsg2.SetLabelText("Electrode at "+ str(self.sliderVal) +" m is in use")
self.electrodeToggleBtn.SetLabelText('On')
self.electrodeToggleBtn.SetBackgroundColour((100,255,100))
xCols = [0,1,2,3]
keep=[]
for c in xCols:
for r in enumerate(self.xDF.iloc[:,c]):
if float(r[1]) == float(self.sliderVal):
keep.append(r[0])
for i in self.dataframeEDIT.index:
if i in keep:
self.dataframeEDIT.loc[[i],['Keep']] = True
eIndex = int(self.electrodes.index(self.sliderVal))
self.electState[eIndex] = True
elif self.ToggleState == False:
self.electrodeToggleBtn.SetLabelText('Off')
self.electrodeToggleBtn.SetBackgroundColour((255,100,100))
self.dataVizMsg2.SetLabelText("Electrode at " + str(self.sliderVal) + " m is not in use")
xCols = [0,1,2,3]
lose=[]
for c in xCols:
for r in enumerate(self.xDF.iloc[:,c]):
if float(r[1]) == float(self.sliderVal):
lose.append(r[0])
for i in self.dataframeEDIT.index:
if i in lose:
self.dataframeEDIT.loc[[i],['Keep']] = False
#change self.electState to True
eIndex = int(self.electrodes.index(self.sliderVal))
self.electState[eIndex] = False
else:
self.dataVizMsg2.SetLabelText("uhh, this is wierd")
dataRetained = 0
for i in self.dataframeEDIT["Keep"]:
if i:
dataRetained = dataRetained + 1
self.dataEditMsg.SetLabelText(str(dataRetained) + '/' + str(len(self.dataframeEDIT)) + 'pts (' + str(round(dataRetained/len(self.dataframeEDIT)*100,1)) + '%)')
self.graphChart()
def ONSaveEdits(self,event):
if self.currentChart == 'Graph':
#do nothing
pass
elif self.currentChart == 'Stat':
#self.sliderVal = float(self.editSlider.GetValue())
peIndex = self.dataframeEDIT.columns.get_loc('PctErr')
lose = []
for r in enumerate(self.dataframeEDIT.iloc[:, peIndex]):
if float(r[1]) >= float(self.sliderVal)/100.0:
lose.append(r[0])
kIndex = int(self.dataframeEDIT.columns.get_loc('Keep'))
for i in self.dataframeEDIT.index:
if i in lose:
self.dataframeEDIT.iloc[i, kIndex] = False
dataRetained = 0
for i in self.dataframeEDIT["Keep"]:
if i:
dataRetained = dataRetained + 1
self.dataEditMsg.SetLabelText(str(dataRetained) + '/' + str(len(self.dataframeEDIT)) + 'pts (' + str(
round(dataRetained / len(self.dataframeEDIT) * 100, 1)) + '%)')
self.statChart()
else:
pass
def ONdataVizInput(self,event):
if self.dataVizInput.GetValue().isnumeric():
if float(self.dataVizInput.GetValue()) < float(self.editSlider.GetMin()) or float(self.dataVizInput.GetValue()) > float(self.editSlider.GetMax()):
self.dataVizMsg2.SetValue('Error: Value must integer be between '+ str(self.editSlider.GetMin())+ ' and '+str(self.editSlider.GetMax()))
else:
self.editSlider.SetValue(int(self.dataVizInput.GetValue()))
self.dataVizInput.SetValue('')
else:
self.dataVizInput.SetValue('Error: Value must be numeric')
self.onSliderEdit()
def reviewEvent(self,event):
self.reviewChart()
def reviewChart(self):
self.editSlider.Hide()
self.currentChart = 'Review'
self.dataVizMsg1.SetLabelText('Review Edits')
self.saveEditsBtn.Hide()
self.electrodeToggleBtn.Hide()
self.dataVizInput.Hide()
self.dataVizInputBtn.Hide()
x = []
z = []
v = []
pe = []
xOmit = []
zOmit = []
self.createExportDF()
for i in enumerate(self.dataframeEDIT['Keep']):
x.append(self.dataframeEDIT.loc[i[0], 'PseudoX'])
z.append(self.dataframeEDIT.loc[i[0], 'PseudoZ'])
v.append(self.dataframeEDIT.loc[i[0], 'AppResist'])
if i[1]:
pass
else:
xOmit.append(self.dataframeEDIT.loc[i[0],'PseudoX'])
zOmit.append(self.dataframeEDIT.loc[i[0],'PseudoZ'])
if 'scipy.interpolate' in sys.modules:
self.makeColormesh(x,z,v,pe,xOmit,zOmit)
else:
ptSize = round(100/self.maxXDist*125,1)
self.axes.scatter(x,z, c=v,edgecolors='black',s=ptSize, marker='h')
#self.axes.scatter(x,z, c=v,s=ptSize, marker='h')
#self.axes.scatter(xOmit,zOmit,c='black',s=ptSize-ptSize*0.333,marker = 'x')
#minz = min(z)
#maxz = max(z)
#zspace = (maxz-minz)/10
#self.axes.set_ylim(minz-zspace,maxz+zspace)
#self.axes.set_xlabel('X-Distance (m)')
#self.axes.set_ylabel('Elev/Depth (m)')
#self.axes.xaxis.tick_top()
#self.canvas.draw()
#pass
def getClosestElev(self):
if len(self.inputTxtTopo.GetValue())>0 and 'Enter Topo Filepath Here' not in self.inputTxtTopo.GetValue():
if self.topoDF['xDist'].max() > max(self.electrodes) or self.topoDF['xDist'].min() > min(self.electrodes):
if self.topoDF['xDist'].max() > max(self.electrodes):
wx.LogError("File format error. Maximum topo X-Distance is greater than maximum electrode X-Distance.")
else:
wx.LogError("File format error. Minimum topo X-Distance is less than minimum electrode X-Distance.")
else:
self.electrodeElevs = [[] for k in range(len(self.electrodes))]#blank list
for x in enumerate(self.electrodes):
elecxDist = x[1]
elecIndex = x[0]
index = np.argmin(np.abs(np.array(self.topoDF['xDist']) - elecxDist))#finds index of closest elevation
nearestTopoxDist = self.topoDF.loc[index,'xDist']
nearestTopoElev = self.topoDF.loc[index,'Elev']
if nearestTopoxDist == x[1]:
self.electrodeElevs[elecIndex] = nearestTopoElev
elif nearestTopoxDist >= x[1]:
mapNum = nearestTopoxDist - self.electrodes[elecIndex]
mapDenom = nearestTopoxDist - self.topoDF.loc[index-1,'xDist']
mVal = float(mapNum/mapDenom)
self.electrodeElevs[elecIndex] = nearestTopoElev - (nearestTopoElev-self.topoDF.loc[index-1,'Elev'])*mVal
else:
mapNum = self.electrodes[elecIndex] - nearestTopoxDist
mapDenom = self.topoDF.loc[index+1,'xDist']-nearestTopoxDist
mVal = float(mapNum/mapDenom)
self.electrodeElevs[elecIndex] = nearestTopoElev + (nearestTopoElev-self.topoDF.loc[index-1,'Elev'])*mVal
blankList = [[] for k in range(len(self.dataframeEDIT['Keep']))] # blank list
self.dataframeEDIT['SurfElevs'] = blankList
elecXDistColNames = ['A(x)','B(x)','M(x)','N(x)']
elecElevColNames = ['A(z)','B(z)','M(z)','N(z)']
elecXDistColNums = [1,3,5,7]
for c in enumerate(elecXDistColNums):
for x in enumerate(self.dataframeEDIT[elecElevColNames[c[0]]]):
elecxDist = x[1]
elecIndex = x[0]
index = np.argmin(
np.abs(np.array(self.topoDF['xDist']) - elecxDist)) # finds index of closest elevation
nearestTopoxDist = self.topoDF.loc[index, 'xDist']
nearestTopoElev = self.topoDF.loc[index, 'Elev']
if nearestTopoxDist == x[1]:
self.dataframeEDIT.iloc[elecIndex,c[1]+1] = nearestTopoElev
elif nearestTopoxDist >= x[1]:
mapNum = nearestTopoxDist - self.dataframeEDIT.iloc[elecIndex,c[1]+1]
mapDenom = nearestTopoxDist - self.topoDF.loc[index - 1, 'xDist']
mVal = float(mapNum / mapDenom)
self.dataframeEDIT.iloc[elecIndex,c[1]+1] = nearestTopoElev - (
nearestTopoElev - self.topoDF.loc[index - 1, 'Elev']) * mVal
else:
mapNum = self.dataframeEDIT.iloc[elecIndex,c[1]+1] - nearestTopoxDist
mapDenom = self.topoDF.loc[index + 1, 'xDist'] - nearestTopoxDist
mVal = float(mapNum / mapDenom)
self.dataframeEDIT.iloc[elecIndex,c[1]+1] = nearestTopoElev + (
nearestTopoElev - self.topoDF.loc[index - 1, 'Elev']) * mVal
self.dataframeEDIT['PtElev'] = self.dataframeEDIT[elecElevColNames[c[0]]] - self.dataframeEDIT['PseudoZ']
else:
pass
if self.inputDataExt == '.DAT (LS)':
self.electrodeElevs = []
for x in enumerate(self.electrodes): #Checks if there's already elevation data??
found = 0
for xc in self.electrodeCols:
if found == 0:
for i in enumerate(self.dataframeEDIT.iloc[:,xc]):
if round(float(x[1]),2) == round(float(i[1]),2):
zc = xc + 1
found = 1
elev = self.dataframeEDIT.iloc[i[0],zc]
elif found == 1:
self.electrodeElevs.append(float(elev))
else:
wx.LogError("No Topography Data Found")
def onExportBrowse(self, event):
with wx.FileDialog(self, "Select Export Filepath", style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.exportPathname = pathlib.Path(fileDialog.GetPath())
try:
with open(self.exportPathname, 'r') as exportFile:
path = exportFile.name
self.exportTXT.SetValue(path)
except IOError:
wx.LogError("Cannot Open File")
def onExport(self, event):
dataDF,keepLength = self.createExportDF()
dataLeadDF = pd.DataFrame()
dataTailDF = pd.DataFrame()
#Create Data Lead
dataLeadDF[0] = self.dataLead
if self.inputDataExt == '.DAT (SAS)':
dataLeadList = []
dataLeadList.append(self.dataLead[0][0])
dataLeadList.append(self.dataLead[1][0])
dataLeadList.append(11)
dataLeadList.append(self.dataLead[2][0])
dataLeadList.append('Type of measurement (0=app.resistivity,1=resistance)')
dataLeadList.append(0)
dataLeadList.append(keepLength)
dataLeadList.append(2)
dataLeadList.append(0)
dataLeadDF = pd.DataFrame(dataLeadList)
else:
dataLeadDF.iloc[4,0] = 'Type of measurement (0=app.resistivity,1=resistance)'
dataLeadDF.iloc[6,0] = str(int(keepLength))
for c in range(1, 10):
dataLeadDF[int(c)] = [None, None, None, None, None, None, None, None, None]
#Create Data Tail
for c in range(0, 10):
if c < 1:
dataTailDF[int(c)] = [0, 0, 0, 0, 0, 0, 0]
else:
dataTailDF[int(c)] = [None, None, None, None, None, None, None]
#print(dataLeadDF)
#print(dataDF)
#print(dataTailDF)
DFList = [dataLeadDF, dataDF, dataTailDF]
self.exportDataframe = pd.concat(DFList, ignore_index=True, axis=0)
self.exportDataframe.to_csv(self.exportTXT.GetValue(), sep="\t", index=False, header=False)
def createExportDF(self):
dataDF = | pd.DataFrame(columns=["NoElectrodes",'A(x)', 'A(z)', 'B(x)', 'B(z)', 'M(x)', 'M(z)', 'N(x)', 'N(z)', 'Resistance']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import logging
import pandas as pd
import pytz
from tzlocal import windows_tz
import appdirs
import ws
LOG = logging.getLogger(__name__)
_TYPE_MAP = {'integer': int,
'unicode': str,
'string': str,
'boolean': bool,
'datetime': 'M8[ns]'}
def file_name(name, format):
return '.'.join((name, format))
def get_file(name, expiration=None):
if not os.path.exists(name):
return
expiration = | pd.core.datetools.to_offset(expiration) | pandas.core.datetools.to_offset |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual( | Timedelta(-239, unit='h') | pandas.Timedelta |
import csv
import snscrape.modules.twitter as sntwitter
import pandas as pd
import os.path
def get_company_twitter_posts(account_df1):
# Check if file exist
if os.path.isfile("output/twitter_sentiment_companies.csv"):
print("File already exist - skipping company data extraction.")
return | pd.read_csv("output/twitter_sentiment_companies.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from gensim.utils import tokenize
from gensim.parsing.preprocessing import remove_stopwords
from gensim.test.utils import common_texts
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
import pandas as pd
import numpy as np
from textblob import TextBlob
import spacy
from sklearn.feature_extraction.text import CountVectorizer
nlp = spacy.load('en_core_web_sm')
from sklearn.model_selection import TimeSeriesSplit
from src.data.cleaning import clean_news_headline
from src.utils import backend, home
def split_train_test(comb):
split = TimeSeriesSplit(n_splits=2)
for tr_idx, te_idx in split.split(comb):
if backend == 'pandas':
tr = comb.iloc[tr_idx, :]
te = comb.iloc[te_idx, :]
else:
tr = comb.loc[tr_idx.tolist()]
te = comb.iloc[te_idx, :]
assert tr.shape[1] == te.shape[1]
assert tr.shape[0] + te.shape[0] == comb.shape[0]
return tr, te
def split_features_target(combined, name, load=False):
print('splitting {} into x, y'.format(name))
if load:
print('loading from data/interim')
corpus = pd.read_csv(home / 'data' / 'interim' / '{}-features.csv'.format(name), index_col=0)
target = pd.read_csv(home / 'data' / 'interim' / '{}-target.csv'.format(name), index_col=0)
return corpus, target
target = combined.loc[:, 'Label'].to_frame()
target.columns = ['target']
corpus = combined.drop(['Label'], axis=1)
corpus = corpus.agg(' '.join, axis=1)
print('cleaning news headlines')
corpus = corpus.apply(clean_news_headline)
corpus = corpus.to_frame()
corpus.columns = ['news']
print('target shape {} distribution - {}'.format(target.shape, np.mean(target.values)))
print('saving to data/interim')
corpus.to_csv(home / 'data' / 'interim' / '{}-features.csv'.format(name))
target.to_csv(home / 'data' / 'interim' / '{}-target.csv'.format(name))
return corpus, target
def gensim_tokenize(docs):
tokens = []
for doc in docs:
doc = remove_stopwords(doc)
tokens.append(list(tokenize(doc, lower=True)))
return tokens
def get_doc_vecs(docs, model):
vecs = []
for sample in docs:
vecs.append(model.infer_vector(sample))
return np.array(vecs)
def make_document_vectors(x_tr, x_te):
tr_tokens = gensim_tokenize(x_tr.loc[:, 'news'].values)
te_tokens = gensim_tokenize(x_te.loc[:, 'news'].values)
documents = [TaggedDocument(doc, [i]) for i, doc in enumerate(tr_tokens)]
model = Doc2Vec(documents, vector_size=32, window=3, min_count=1, workers=4, verbose=1)
tr_vecs = get_doc_vecs(tr_tokens, model)
te_vecs = get_doc_vecs(te_tokens, model)
cols = ['doc2vec-{}'.format(i) for i in range(tr_vecs.shape[1])]
tr_vecs = pd.DataFrame(tr_vecs, index=x_tr.index, columns=cols)
te_vecs = | pd.DataFrame(te_vecs, index=x_te.index, columns=cols) | pandas.DataFrame |
from rest_framework import generics, status, permissions, mixins, views, viewsets
from rest_framework.response import Response
from rest_framework.parsers import MultiPartParser, FormParser, JSONParser
from rest_framework.decorators import permission_classes, action
from rest_framework.exceptions import ParseError, ValidationError
from django.db.models.query import QuerySet
from django.http import FileResponse, Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect
from django.core.files.base import ContentFile
from secretsauce.apps.portal.models import *
from secretsauce.apps.portal.serializers import *
from secretsauce.permissions import IsOwnerOrAdmin, AdminOrReadOnly
from secretsauce.utils import UploadVerifier, CostSheetVerifier
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from collections import defaultdict
from threading import Thread
import os, requests, json, re
FILLET = 'https://fillet.azurewebsites.net'
class DataBlockList(generics.ListCreateAPIView):
"""
List all datablocks or create a new datablock.
Usage example:
create: curl -X POST -F "name=test_file" -F "upload=@<EMAIL>" -F "project=<uuid>" localhost:8000/datablocks/
list: curl localhost:8000/datablocks/
"""
queryset = DataBlock.objects.all()
serializer_class = DataBlockListSerializer
parser_classes = [MultiPartParser, FormParser]
permission_classes = [IsOwnerOrAdmin]
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
verifier = UploadVerifier(request.FILES['upload'])
item_ids = verifier.get_schema()
self.perform_create(serializer, item_ids)
return_data = serializer.data
return_data['errors'] = verifier.errors
return Response(return_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def perform_create(self, serializer, item_ids):
data_block = serializer.save()
header_objects = [DataBlockHeader(data_block=data_block, item_id=item_id) for item_id in item_ids]
DataBlockHeader.objects.bulk_create(header_objects)
def get_queryset(self):
assert self.queryset is not None, (
"'%s' should either include a `queryset` attribute, "
"or override the `get_queryset()` method."
% self.__class__.__name__
)
queryset = self.queryset
if isinstance(queryset, QuerySet):
# Ensure queryset is re-evaluated on each request.
queryset = queryset.all()
project = self.request.query_params.get('project')
projects = queryset.filter(project=project)
for p in projects:
self.check_object_permissions(self.request, p)
return projects
class DataBlockDetail(generics.RetrieveDestroyAPIView):
"""
Retrieve or delete a datablock instance
"""
permission_classes = [IsOwnerOrAdmin]
queryset = DataBlock.objects.all()
serializer_class = DataBlockSingleSerializer
class DataBlockPrice(viewsets.ViewSet):
@action(methods=['get'], detail=True, permission_classes=[IsOwnerOrAdmin])
def average_prices(self, request, pk):
data_block = get_object_or_404(DataBlock.objects.all(), id=pk)
self.check_object_permissions(request, data_block)
df = pd.read_csv(data_block.upload).drop(['Wk', 'Tier', 'Groups', 'Store', 'Qty_'], axis=1)
df = df.groupby(['Item_ID']).mean()
output = df.to_dict()['Price_']
return Response(output)
class VizDataBlock(viewsets.ViewSet):
permission_classes=[IsOwnerOrAdmin]
parser_classes = [MultiPartParser]
max_query_size = 10
@action(methods=['get'], detail=True, url_path='vizdata/price', url_name='viz-price', )
def price(self, request, pk, *args, **kwargs):
data_block = self.get_object(pk)
self.check_object_permissions(request, data_block)
if 'items' not in request.query_params:
raise ParseError(detail="'items' required in query_params", code='invalid_data')
items = request.query_params.get('items').split(',')
try:
items = list(map(int, items))
except ValueError as e:
raise ParseError(e)
if len(items) > self.max_query_size:
raise ParseError(detail=f'Query is too large, maximum of {self.max_query_size} items only', code='query_size_exceeded')
body = self.obtain_prices(data_block.upload, items)
return Response(body, status=status.HTTP_200_OK)
@action(methods=['get'], detail=True, url_path='vizdata/qty', url_name='viz-qty')
def qty(self, request, pk, *args, **kwargs):
data_block = self.get_object(pk)
self.check_object_permissions(request, data_block)
if 'items' not in request.query_params:
raise ParseError(detail="'items' required in query_params", code='invalid_data')
items = request.query_params.get('items').split(',')
try:
items = list(map(int, items))
except ValueError as e:
raise ParseError(e)
if len(items) > self.max_query_size:
raise ParseError(detail=f'Query is too large, maximum of {self.max_query_size} items only', code='query_size_exceeded')
body = self.obtain_quantities(data_block.upload, items)
return Response(body, status=status.HTTP_200_OK)
def get_object(self, pk):
try:
return DataBlock.objects.get(id=pk)
except DataBlock.DoesNotExist:
raise Http404
def obtain_prices(self, file, items):
df = pd.read_csv(file, encoding='utf-8')
df = df[df['Item_ID'].isin(items)][['Item_ID', 'Price_']]
output = defaultdict(list)
for idx, row in df.iterrows():
item_id = int(row['Item_ID'])
price = row['Price_']
output[item_id].append(price)
final_output = sorted([(k, v) for k, v in output.items()], key=lambda x: x[0])
items, datasets = zip(*final_output)
final_output = {
'items': items,
'datasets': datasets
}
return final_output
def obtain_quantities(self, file, items):
df = | pd.read_csv(file, encoding='utf-8') | pandas.read_csv |
# converts warc file into a pandas dataframe type csv: html_and_text_big.csv
# each row of dataframe contains url,html,text for a specific html file
# also saves a csv of filtered text, containing justext extracted text: text_filtered_big.csv
# stripped of non-ascii and filtered for relevance (Parkland shooting)
# default text extractor is justext (get_text_js)
# can also use beautiful soup for text extractor (get_text_bs)
from warcio.archiveiterator import ArchiveIterator
from bs4 import BeautifulSoup
from justext import justext, get_stoplist
from tqdm import tqdm
import pandas as pd
from pathlib import Path
import re
global spam
spam = ['advertisement', 'getty', 'published', 'javascript', 'updated', 'jpghttps',
'posted', 'read more', 'photo gallery', 'play video', 'caption']
def clean_html(html):
soup = BeautifulSoup(html, 'html.parser')
if soup.body is None:
return None
for tag in soup.body.select('script'):
tag.decompose()
for tag in soup.body.select('style'):
tag.decompose()
return str(soup)
def get_text_bs(html):
soup = BeautifulSoup(html, 'html.parser')
body = soup.body
if body is None:
return None
for tag in body.select('script'):
tag.decompose()
for tag in body.select('style'):
tag.decompose()
text = body.get_text(separator='\n')
return text
def filter_pages(text):
if not isinstance(text,str):
print(text)
for f in spam:
if f in text.lower() and len(text)<100:
#print('spam found:',f)
return False
return True
def get_text_jt(html):
text = []
paragraphs = justext(html, get_stoplist("English"))
for paragraph in paragraphs:
if not paragraph.is_boilerplate:
if len(paragraph.text) > 15 and filter_pages(paragraph.text):
text.append(paragraph.text)
#else:
# print(len(paragraph.text),' :: ',paragraph.text)
text = ' '.join(t for t in text)
return text
def process_warc(file_path):
rows = []
dropped_count = 0
with open(file_path, 'rb') as stream:
for record in tqdm(ArchiveIterator(stream)):
try:
if record.rec_type == 'response':
url = record.rec_headers.get_header('WARC-Target-URI')
html_raw = record.content_stream().read()
html = html_raw.decode('utf-8')
html = clean_html(html)
text = get_text_jt(html_raw)
rows.append([url,html,text])
except:
dropped_count += 1
#print(dropped_count,'files dropped so far')
continue
print(dropped_count,'files dropped due to read errors')
df = pd.DataFrame(data=rows,columns=['url','html','text'])
return df
def filter_text(df):
text = df.text.dropna()
text = text.apply(lambda x: re.sub('\n+','. ',x))
text = text.apply(lambda x: re.sub('[\r\f\v]','',x))
text = text.apply(lambda x: re.sub('\t+',' ',x))
text = text.apply(lambda x: re.sub('[ ]{2,}',' ',x))
text = text.apply(lambda x: x.encode("ascii", errors="ignore").decode())
text = text.apply(lambda x: ' '.join(word for word in x.split() if word not in spam))
relevant = text.apply(lambda x: ('parkland' in x.lower() or \
'<NAME> douglas' in x.lower()) and \
'shooting' in x.lower())
nonempty = text.apply(lambda x: len(x)>0)
text = text[relevant & nonempty]
text = pd.DataFrame(data=text,columns=['text'])
return text
if __name__ == '__main__':
data_path = Path('/Users/ryankingery/Repos/text-summarization/data/')
if not data_path.exists():
data_path.mkdir()
if not (data_path/'html_and_text_big.csv').exists():
df = process_warc(data_path/'Shooting_Douglas_big.warc')
df.to_csv(data_path/'html_and_text_big.csv')
if not (data_path/'text_filtered_big.csv').exists():
text = filter_text(df)
text.to_csv(data_path/'text_filtered_big.csv')
else:
df = pd.read_csv(data_path/'html_and_text_big.csv',index_col=0)
text = | pd.read_csv(data_path/'text_filtered_big.csv',index_col=0, header=None) | pandas.read_csv |
import copy
import numpy as np
import pandas as pd
class CustomGeneticAlgorithm():
def server_present(self, server, time):
server_start_time = server[1]
server_duration = server[2]
server_end_time = server_start_time + server_duration
if (time >= server_start_time) and (time < server_end_time):
return True
return False
def deployed_to_hourlyplanning(self, deployed_hourly_cron_capacity):
deployed_hourly_cron_capacity_week = []
for day in deployed_hourly_cron_capacity:
deployed_hourly_cron_capacity_day = []
for server in day:
server_present_hour = []
for time in range(0, 24):
server_present_hour.append(
self.server_present(server, time))
deployed_hourly_cron_capacity_day.append(server_present_hour)
deployed_hourly_cron_capacity_week.append(
deployed_hourly_cron_capacity_day)
deployed_hourly_cron_capacity_week = np.array(
deployed_hourly_cron_capacity_week).sum(axis=1)
return deployed_hourly_cron_capacity_week
def generate_random_plan(self, n_days, n_racks):
period_planning = []
for _ in range(n_days):
day_planning = []
for server_id in range(n_racks):
start_time = np.random.randint(0, 23)
machines = np.random.randint(0, 12)
server = [server_id, start_time, machines]
day_planning.append(server)
period_planning.append(day_planning)
return period_planning
def generate_initial_population(self, population_size, n_days=7, n_racks=11):
population = []
for _ in range(population_size):
member = self.generate_random_plan(
n_days=n_days, n_racks=n_racks)
population.append(member)
return population
def calculate_fitness(self, deployed_hourly_cron_capacity, required_hourly_cron_capacity):
deviation = deployed_hourly_cron_capacity - required_hourly_cron_capacity
overcapacity = abs(deviation[deviation > 0].sum())
undercapacity = abs(deviation[deviation < 0].sum())
overcapacity_cost = 0.5
undercapacity_cost = 3
fitness = overcapacity_cost * overcapacity + undercapacity_cost * undercapacity
return fitness
def crossover(self, population, n_offspring):
n_population = len(population)
offspring = []
for _ in range(n_offspring):
random_one = population[np.random.randint(
low=0, high=n_population - 1)]
random_two = population[np.random.randint(
low=0, high=n_population - 1)]
dad_mask = np.random.randint(0, 2, size=np.array(random_one).shape)
mom_mask = np.logical_not(dad_mask)
child = np.add(np.multiply(random_one, dad_mask),
np.multiply(random_two, mom_mask))
offspring.append(child)
return offspring
def mutate_parent(self, parent, n_mutations):
size1 = parent.shape[0]
size2 = parent.shape[1]
for _ in range(n_mutations):
rand1 = np.random.randint(0, size1)
rand2 = np.random.randint(0, size2)
rand3 = np.random.randint(0, 2)
parent[rand1, rand2, rand3] = np.random.randint(0, 12)
return parent
def mutate_gen(self, population, n_mutations):
mutated_population = []
for parent in population:
mutated_population.append(self.mutate_parent(parent, n_mutations))
return mutated_population
def is_acceptable(self, parent):
return np.logical_not((np.array(parent)[:, :, 2:] > 12).any())
def select_acceptable(self, population):
population = [
parent for parent in population if self.is_acceptable(parent)]
return population
def select_best(self, population, required_hourly_cron_capacity, n_best):
fitness = []
for idx, deployed_hourly_cron_capacity in enumerate(population):
deployed_hourly_cron_capacity = self.deployed_to_hourlyplanning(
deployed_hourly_cron_capacity)
parent_fitness = self.calculate_fitness(deployed_hourly_cron_capacity,
required_hourly_cron_capacity)
fitness.append([idx, parent_fitness])
print('Current generation\'s optimal schedule has cost: {}'.format(
pd.DataFrame(fitness)[1].min()))
fitness_tmp = | pd.DataFrame(fitness) | pandas.DataFrame |
import dask.dataframe as dd
import pandas as pd
import pytest
import featuretools as ft
from featuretools.entityset import EntitySet, Relationship
def test_create_entity_from_dask_df(pd_es):
dask_es = EntitySet(id="dask_es")
log_dask = dd.from_pandas(pd_es["log"].df, npartitions=2)
dask_es = dask_es.entity_from_dataframe(
entity_id="log_dask",
dataframe=log_dask,
index="id",
time_index="datetime",
variable_types=pd_es["log"].variable_types
)
pd.testing.assert_frame_equal(pd_es["log"].df, dask_es["log_dask"].df.compute(), check_like=True)
def test_create_entity_with_non_numeric_index(pd_es, dask_es):
df = pd.DataFrame({"id": ["A_1", "A_2", "C", "D"],
"values": [1, 12, -34, 27]})
dask_df = dd.from_pandas(df, npartitions=2)
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
dask_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=dask_df,
index="id",
variable_types={"id": ft.variable_types.Id, "values": ft.variable_types.Numeric})
pd.testing.assert_frame_equal(pd_es['new_entity'].df.reset_index(drop=True), dask_es['new_entity'].df.compute())
def test_create_entityset_with_mixed_dataframe_types(pd_es, dask_es):
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27]})
dask_df = dd.from_pandas(df, npartitions=2)
# Test error is raised when trying to add Dask entity to entitset with existing pandas entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(dask_df), type(pd_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=dask_df,
index="id")
# Test error is raised when trying to add pandas entity to entitset with existing dask entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(df), type(dask_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
dask_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
def test_add_last_time_indexes():
pd_es = EntitySet(id="pd_es")
dask_es = EntitySet(id="dask_es")
sessions = pd.DataFrame({"id": [0, 1, 2, 3],
"user": [1, 2, 1, 3],
"time": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
sessions_dask = dd.from_pandas(sessions, npartitions=2)
sessions_vtypes = {
"id": ft.variable_types.Id,
"user": ft.variable_types.Id,
"time": ft.variable_types.DatetimeTimeIndex,
"strings": ft.variable_types.Text
}
transactions = pd.DataFrame({"id": [0, 1, 2, 3, 4, 5],
"session_id": [0, 0, 1, 2, 2, 3],
"amount": [1.23, 5.24, 123.52, 67.93, 40.34, 50.13],
"time": [pd.to_datetime('2019-01-10 03:53'),
pd.to_datetime('2019-01-10 04:12'),
pd.to_datetime('2019-02-03 10:34'),
pd.to_datetime('2019-01-01 12:35'),
pd.to_datetime('2019-01-01 12:49'),
pd.to_datetime('2017-08-25 04:53')]})
transactions_dask = dd.from_pandas(transactions, npartitions=2)
transactions_vtypes = {
"id": ft.variable_types.Id,
"session_id": ft.variable_types.Id,
"amount": ft.variable_types.Numeric,
"time": ft.variable_types.DatetimeTimeIndex,
}
pd_es.entity_from_dataframe(entity_id="sessions", dataframe=sessions, index="id", time_index="time")
dask_es.entity_from_dataframe(entity_id="sessions", dataframe=sessions_dask, index="id", time_index="time", variable_types=sessions_vtypes)
pd_es.entity_from_dataframe(entity_id="transactions", dataframe=transactions, index="id", time_index="time")
dask_es.entity_from_dataframe(entity_id="transactions", dataframe=transactions_dask, index="id", time_index="time", variable_types=transactions_vtypes)
new_rel = Relationship(pd_es["sessions"]["id"],
pd_es["transactions"]["session_id"])
dask_rel = Relationship(dask_es["sessions"]["id"],
dask_es["transactions"]["session_id"])
pd_es = pd_es.add_relationship(new_rel)
dask_es = dask_es.add_relationship(dask_rel)
assert pd_es['sessions'].last_time_index is None
assert dask_es['sessions'].last_time_index is None
pd_es.add_last_time_indexes()
dask_es.add_last_time_indexes()
pd.testing.assert_series_equal(pd_es['sessions'].last_time_index.sort_index(), dask_es['sessions'].last_time_index.compute(), check_names=False)
def test_create_entity_with_make_index():
values = [1, 12, -23, 27]
df = pd.DataFrame({"values": values})
dask_df = dd.from_pandas(df, npartitions=2)
dask_es = EntitySet(id="dask_es")
vtypes = {"values": ft.variable_types.Numeric}
dask_es.entity_from_dataframe(entity_id="new_entity", dataframe=dask_df, make_index=True, index="new_index", variable_types=vtypes)
expected_df = pd.DataFrame({"new_index": range(len(values)), "values": values})
pd.testing.assert_frame_equal(expected_df, dask_es['new_entity'].df.compute())
def test_single_table_dask_entityset():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
dask_es = EntitySet(id="dask_es")
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
values_dd = dd.from_pandas(df, npartitions=2)
vtypes = {
"id": ft.variable_types.Id,
"values": ft.variable_types.Numeric,
"dates": ft.variable_types.Datetime,
"strings": ft.variable_types.Text
}
dask_es.entity_from_dataframe(entity_id="data",
dataframe=values_dd,
index="id",
variable_types=vtypes)
dask_fm, _ = ft.dfs(entityset=dask_es,
target_entity="data",
trans_primitives=primitives_list)
pd_es = ft.EntitySet(id="pd_es")
pd_es.entity_from_dataframe(entity_id="data",
dataframe=df,
index="id",
variable_types={"strings": ft.variable_types.Text})
fm, _ = ft.dfs(entityset=pd_es,
target_entity="data",
trans_primitives=primitives_list)
# Use the same columns and make sure both indexes are sorted the same
dask_computed_fm = dask_fm.compute().set_index('id').loc[fm.index][fm.columns]
pd.testing.assert_frame_equal(fm, dask_computed_fm)
def test_single_table_dask_entityset_ids_not_sorted():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
dask_es = EntitySet(id="dask_es")
df = pd.DataFrame({"id": [2, 0, 1, 3],
"values": [1, 12, -34, 27],
"dates": [ | pd.to_datetime('2019-01-10') | pandas.to_datetime |
# -*- coding: utf-8 -*-
import pandas
import numpy
import sys
import unittest
from datetime import datetime
from pandas.testing import assert_frame_equal, assert_series_equal
import os
import copy
sys.path.append("..")
import warnings
import nPYc
from nPYc.enumerations import SampleType
from nPYc.enumerations import AssayRole
from nPYc.enumerations import VariableType
from generateTestDataset import generateTestDataset
import tempfile
from isatools import isatab
class test_msdataset_synthetic(unittest.TestCase):
"""
Test MSDataset object functions with synthetic data
"""
def setUp(self):
self.msData = nPYc.MSDataset('', fileType='empty')
self.msData.sampleMetadata = pandas.DataFrame(
{'Sample File Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'Sample Base Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'AssayRole': [AssayRole.Assay, AssayRole.PrecisionReference, AssayRole.PrecisionReference],
'SampleType': [SampleType.StudySample, SampleType.StudyPool, SampleType.ExternalReference],
'Sample Name': ['Sample1', 'Sample2', 'Sample3'], 'Acqu Date': ['26-May-17', '26-May-17', '26-May-17'],
'Acqu Time': ['16:42:57', '16:58:49', '17:14:41'], 'Vial': ['1:A,1', '1:A,2', '1:A,3'],
'Instrument': ['XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest'],
'Acquired Time': [datetime(2017, 5, 26, 16, 42, 57), datetime(2017, 5, 26, 16, 58, 49),
datetime(2017, 5, 26, 17, 14, 41)], 'Run Order': [0, 1, 2], 'Batch': [1, 1, 2],
'Correction Batch': [numpy.nan, 1, 2], 'Matrix': ['U', 'U', 'U'],
'Subject ID': ['subject1', 'subject1', 'subject2'], 'Sample ID': ['sample1', 'sample2', 'sample3'],
'Dilution': [numpy.nan, '60.0', '100.0'],'Exclusion Details': ['','','']})
self.msData.featureMetadata = pandas.DataFrame(
{'Feature Name': ['Feature1', 'Feature2', 'Feature3'], 'Retention Time': [6.2449, 2.7565, 5.0564],
'm/z': [249.124281, 381.433191, 471.132083]})
self.msData.featureMetadata['Exclusion Details'] = None
self.msData.featureMetadata['User Excluded'] = False
self.msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=self.msData.featureMetadata.index)
self.msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=self.msData.featureMetadata.index)
self.msData._intensityData = numpy.array([[10.2, 20.95, 30.37], [10.1, 20.03, 30.74], [3.065, 15.83, 30.16]])
# Attributes
self.msData.Attributes['FeatureExtractionSoftware'] = 'UnitTestSoftware'
# excluded data
self.msData.sampleMetadataExcluded = []
self.msData.intensityDataExcluded = []
self.msData.featureMetadataExcluded = []
self.msData.excludedFlag = []
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[0, :])
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata)
self.msData.excludedFlag.append('Samples')
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[:, 0])
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata)
self.msData.excludedFlag.append('Features')
# finish
self.msData.VariableType = VariableType.Discrete
self.msData.initialiseMasks()
def test_rsd_raises(self):
msData = nPYc.MSDataset('', fileType='empty')
with self.subTest(msg='No reference samples'):
msData.sampleMetadata = pandas.DataFrame(None)
with self.assertRaises(ValueError):
msData.rsdSP
with self.subTest(msg='Only one reference sample'):
msData.sampleMetadata = pandas.DataFrame([[nPYc.enumerations.AssayRole.PrecisionReference, nPYc.enumerations.SampleType.StudyPool]], columns=['AssayRole', 'SampleType'])
with self.assertRaises(ValueError):
msData.rsdSP
def test_getsamplemetadatafromfilename(self):
"""
Test we are parsing NPC MS filenames correctly (PCSOP.081).
"""
# Create an empty object with simple filenames
msData = nPYc.MSDataset('', fileType='empty')
msData.sampleMetadata['Sample File Name'] = ['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02_x',
'Test2_RPOS_ToF02_U2W03_b',
'Test3_RNEG_ToF03_S3W04_2',
'Test4_RPOS_ToF04_B1S1_SR_q',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01_9',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21']
msData._getSampleMetadataFromFilename(msData.Attributes['filenameSpec'])
##
# Check basename
##
basename = pandas.Series(['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_RPOS_ToF04_B1S1_SR',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21'],
name='Sample Base Name',
dtype='str')
| assert_series_equal(msData.sampleMetadata['Sample Base Name'], basename) | pandas.testing.assert_series_equal |
# basics
from typing import Callable
import pandas as pd
import os
from pandas.core.frame import DataFrame
# segnlp
from segnlp import utils
from segnlp import metrics
from segnlp.utils.baselines import MajorityBaseline
from segnlp.utils.baselines import RandomBaseline
from segnlp.utils.baselines import SentenceMajorityBaseline
from segnlp.utils.baselines import SentenceRandomBaseline
from segnlp.utils.baselines import SentenceBIOBaseline
class Baseline:
def __run_baseline(
self,
baseline,
name : str,
df:pd.DataFrame,
kwargs:dict,
metric_f: Callable,
task_labels: dict,
):
all_metrics = []
for rs in utils.random_ints(self.n_random_seeds):
kwargs["random_seed"] = rs
#init the baseline model
bl = baseline(**kwargs)
# run baseline
pred_df = bl(df.copy(deep=True))
#evaluate baseline
metrics = metric_f(
pred_df = pred_df,
target_df = df,
task_labels = task_labels
)
metrics["random_seed"] = rs
metrics["baseline"] = name
all_metrics.append(metrics)
score_df = pd.DataFrame(all_metrics)
return score_df
def __get_task_labels_ids(self) -> dict:
task_labels_ids = {k:list(v.values()) for k,v in self.label_encoder.label2id.items()}
for task in self.all_tasks:
if task in task_labels_ids:
continue
task_labels_ids[task] = []
task_labels_ids = {k:v for k,v in task_labels_ids.items() if ("+" not in k and "seg" not in k)}
return task_labels_ids
def __get_majority_labels(self, task_labels_ids):
if self.dataset_name == "PE":
majority_labels = {}
if "label" in task_labels_ids:
majority_labels["label"] = self.label_encoder.label2id["label"]["Premise"]
if "link_label" in task_labels_ids:
majority_labels["link_label"] = self.label_encoder.label2id["link_label"]["support"]
if "link" in task_labels_ids:
majority_labels["link"] = None
if self.dataset_name == "MTC":
majority_labels = None
return majority_labels
def __load_data(self):
df = pd.read_csv(self._path_to_df, index_col = 0)
splits = utils.load_pickle_data(self._path_to_splits)
val_df = df.loc[splits[0]["val"]]
test_df = df.loc[splits[0]["test"]]
return val_df, test_df
def __get_sentence_baselines_scores(self,
df: pd.DataFrame,
metric_f: Callable,
task_labels_ids : dict,
majority_labels : dict
):
score_dfs = []
baselines = zip(["majority", "random"],[SentenceMajorityBaseline, SentenceRandomBaseline])
for name, baseline in baselines:
score_df = self.__run_baseline(
baseline = baseline,
name = name,
df = df,
kwargs = dict(
task_labels = majority_labels if name == "majority" else task_labels_ids,
p = 1.0
),
metric_f = metric_f,
task_labels = self.task_labels
)
score_dfs.append(score_df)
sdf = pd.concat(score_dfs)
sdf.set_index("baseline", inplace = True)
return {
"random" : sdf.loc["random"].to_dict("list"),
"majority" : sdf.loc["majority"].to_dict("list")
}
def __get__baseline_scores(self,
df: pd.DataFrame,
metric_f: Callable,
task_labels_ids : dict,
majority_labels : dict
):
score_dfs = []
val_df = df.groupby("seg_id", sort = False).first()
baselines = zip(["majority", "random"],[MajorityBaseline, RandomBaseline])
for name, baseline in baselines:
score_df = self.__run_baseline(
baseline = baseline,
name = name,
df = val_df,
kwargs = dict(
task_labels = majority_labels if name == "majority" else task_labels_ids,
),
metric_f = metric_f,
task_labels = self.task_labels
)
score_dfs.append(score_df)
sdf = | pd.concat(score_dfs) | pandas.concat |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
).set_index("A")
self.ragged = DataFrame({"B": range(5)})
self.ragged.index = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
def test_doc_string(self):
df = DataFrame(
{"B": [0, 1, 2, np.nan, 4]},
index=[
Timestamp("20130101 09:00:00"),
| Timestamp("20130101 09:00:02") | pandas.Timestamp |
#%%
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import numpy.random as random
import gzip
import csv
import connectome_tools.celltype as ct
import connectome_tools.process_matrix as pm
import connectome_tools.process_graph as pg
from tqdm import tqdm
from joblib import Parallel, delayed
import networkx as nx
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
# load pairs
pairs = pm.Promat.get_pairs()
ipsi_pair_ids = pm.Promat.load_pairs_from_annotation('mw ipsilateral axon', pairs, return_type='all_pair_ids')
bilateral_pair_ids = pm.Promat.load_pairs_from_annotation('mw bilateral axon', pairs, return_type='all_pair_ids')
contra_pair_ids = pm.Promat.load_pairs_from_annotation('mw contralateral axon', pairs, return_type='all_pair_ids')
dVNC_pair_ids = pm.Promat.load_pairs_from_annotation('mw dVNC', pairs, return_type='all_pair_ids')
dSEZ_pair_ids = pm.Promat.load_pairs_from_annotation('mw dSEZ', pairs, return_type='all_pair_ids')
RGN_pair_ids = pm.Promat.load_pairs_from_annotation('mw RGN', pairs, return_type='all_pair_ids')
sensories_pair_ids = [pm.Promat.load_pairs_from_annotation(x, pairs, return_type='all_pair_ids') for x in pymaid.get_annotated('mw brain inputs').name]
all_sensories = [x for sublist in sensories_pair_ids for x in sublist]
# %%
# EXPERIMENT 1: removing edges from contralateral and bilateral neurons -> effect on path length?
# load previously generated paths
all_edges_combined = pd.read_csv('data/edges_threshold/ad_all-paired-edges.csv', index_col=0)
# iterations for random edge removal as control
n_init = 40
# excise edges and generate graphs
e_contra_contra, e_contra_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(contra_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_bi_contra, e_bi_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_bi_ipsi, e_bi_ipsi_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids, all_sensories), 'ipsilateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_all_contra, e_all_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids + contra_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
# %%
# this chunk is incomplete
# write all graphs to graphml
# read all graph from graphml
graph = pg.Analyze_Nx_G(all_edges_combined, graph_type='directed')
shuffled_graphs = Parallel(n_jobs=-1)(delayed(nx.readwrite.graphml.read_graphml)(f'interhemisphere/csv/shuffled_graphs/iteration-{i}.graphml', node_type=int, edge_key_type=str) for i in tqdm(range(n_init)))
shuffled_graphs = [pg.Analyze_Nx_G(edges=x.edges, graph=x) for x in shuffled_graphs]
# %%
# generate and save paths
cutoff=5
# generate and save paths for experimental
save_path = [f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra']
experimental = [e_contra_contra, e_bi_contra, e_bi_ipsi, e_all_contra]
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(experimental[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=save_path[i]) for i in tqdm((range(len(experimental)))))
# generate and save paths for controls
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_contra_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_bi_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_bi_ipsi_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_all_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
# %%
# analyze paths: total and count per # hops
def process_paths(excise_paths, control_paths, edges_removed):
excise_count = len(excise_paths)
control_counts = [len(x) for x in control_paths]
path_counts_data = []
for row in zip(control_counts, [f'control-{edges_removed}']*len(control_counts)):
path_counts_data.append(row)
path_counts_data.append([excise_count, f'excised-{edges_removed}'])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/processed/excised_graph_{edges_removed}.csv')
# count per # hops
excise_path_counts = [len(x) for x in excise_paths]
control_path_counts = [[len(x) for x in path] for path in control_paths]
path_counts_length_data = []
for i, path_length in enumerate(control_path_counts):
for row in zip(path_length, [f'control-{edges_removed}']*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for row in zip(excise_path_counts, [f'excised-{edges_removed}']*len(excise_path_counts), [0]*len(excise_path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/processed/excised_graph_{edges_removed}_path_lengths.csv')
cutoff=5
n_init = 40
excise_Cc_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra.csv.gz')
control_Cc_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Cc_paths, control_Cc_paths, edges_removed='Contra-contra')
excise_Bc_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra.csv.gz')
control_Bc_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Bc_paths, control_Bc_paths, edges_removed='Bilateral-contra')
excise_Bi_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi.csv.gz')
control_Bi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Bi_paths, control_Bi_paths, edges_removed='Bilateral-ipsi')
excise_Ac_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra.csv.gz')
control_Ac_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Ac_paths, control_Ac_paths, edges_removed='All-contra')
# wildtype paths
graph_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/all_paths_sens-to-dVNC_cutoff{cutoff}.csv.gz')
excise_count = len(graph_paths)
path_counts_data = []
path_counts_data.append([excise_count, f'wildtype'])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/processed/wildtype.csv')
path_counts_length_data = []
excise_path_counts = [len(x) for x in graph_paths]
for row in zip(excise_path_counts, [f'wildtype']*len(excise_path_counts), [0]*len(excise_path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/processed/wildtype_path_lengths.csv')
# %%
##########
# EXPERIMENT 2: removing random number of ipsi vs contra edges, effect on paths
#
# load previously generated paths
all_edges_combined = pd.read_csv('data/edges_threshold/ad_all-paired-edges.csv', index_col=0)
# iterations for random edge removal as control
n_init = 8
# excise edges and generate graphs
random_ipsi500, random_contra500 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 500, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
random_ipsi1000, random_contra1000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 1000, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
random_ipsi2000, random_contra2000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 2000, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
random_ipsi4000, random_contra4000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 4000, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
# %%
# generate and save paths
cutoff=5
# generate and save paths for controls
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-500-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-500-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra500[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-1000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-1000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra1000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-2000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-2000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra2000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-4000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-4000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra4000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
# %%
# analyze paths: total and count per # hops
def process_paths_ipsi_contra(ipsi_paths, contra_paths, count_removed):
ipsi_counts = [len(x) for x in ipsi_paths]
contra_counts = [len(x) for x in contra_paths]
path_counts_data = []
for row in zip(ipsi_counts, [f'ipsi-{count_removed}']*len(ipsi_counts)):
path_counts_data.append(row)
for row in zip(contra_counts, [f'contra-{count_removed}']*len(contra_counts)):
path_counts_data.append(row)
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges/processed/excised_graph_random-ipsi-contra_{count_removed}-removed.csv')
# count per # hops
ipsi_path_counts = [[len(x) for x in path] for path in ipsi_paths]
contra_path_counts = [[len(x) for x in path] for path in contra_paths]
path_counts_length_data = []
for i, path_length in enumerate(ipsi_path_counts):
for row in zip(path_length, [f'ipsi-{count_removed}']*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(contra_path_counts):
for row in zip(path_length, [f'contra-{count_removed}']*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges/processed/excised_graph_random-ipsi-contra_{count_removed}-removed_path-lengths.csv')
cutoff=5
n_init = 8
count_removed = 500
random_ipsi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_paths, random_contra_paths, count_removed)
count_removed = 1000
random_ipsi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_paths, random_contra_paths, count_removed)
count_removed = 2000
random_ipsi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_paths, random_contra_paths, count_removed)
count_removed = 4000
random_ipsi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_paths, random_contra_paths, count_removed)
# %%
##########
# EXPERIMENT 3: removing random number of ipsi vs contra edges, effect on paths on just one side of brain
#
# load previously generated paths
all_edges_combined_split = pd.read_csv('data/edges_threshold/pairwise-threshold_ad_all-edges.csv', index_col=0)
left = pm.Promat.get_hemis('left')
right = pm.Promat.get_hemis('right')
# iterations for random edge removal as control
n_init = 8
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
dVNC_left = list(np.intersect1d(dVNC, left))
dVNC_right = list(np.intersect1d(dVNC, right))
all_sensories = ct.Celltype_Analyzer.get_skids_from_meta_meta_annotation('mw brain sensory modalities')
all_sensories_left = list(np.intersect1d(all_sensories, left))
all_sensories_right = list(np.intersect1d(all_sensories, right))
# generate wildtype graph
split_graph = pg.Analyze_Nx_G(all_edges_combined_split, graph_type='directed', split_pairs=True)
# excise edges and generate graphs
random_ipsi500_left, random_ipsi500_right, random_contra500 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 500, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi1000_left, random_ipsi1000_right, random_contra1000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 1000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi2000_left, random_ipsi2000_right, random_contra2000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 2000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi4000_left, random_ipsi4000_right, random_contra4000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 4000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi8000_left, random_ipsi8000_right, random_contra8000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 8000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
#random_ipsi8764_left, random_ipsi8764_right, random_contra8764 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 8764, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
# %%
# generate and save paths
cutoff=5
# generate wildtype paths
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_wildtype'
pg.Prograph.generate_save_simple_paths(split_graph.G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=save_path)
# generate and save paths
count = 500
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra500[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 1000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra1000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 2000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra2000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 4000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra4000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 8000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra8000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
'''
count = 8764
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra8000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
'''
# %%
# analyze paths: total and count per # hops
def process_paths_ipsi_contra(ipsi_left_paths, ipsi_right_paths, contra_paths, count_removed):
ipsi_left_counts = [len(x) for x in ipsi_left_paths]
ipsi_right_counts = [len(x) for x in ipsi_right_paths]
contra_counts = [len(x) for x in contra_paths]
path_counts_data = []
for row in zip(ipsi_left_counts, [f'ipsi-left']*len(ipsi_left_counts), [count_removed]*len(ipsi_left_counts)):
path_counts_data.append(row)
for row in zip(ipsi_right_counts, [f'ipsi-right']*len(ipsi_right_counts), [count_removed]*len(ipsi_right_counts)):
path_counts_data.append(row)
for row in zip(contra_counts, [f'contra']*len(contra_counts), [count_removed]*len(contra_counts)):
path_counts_data.append(row)
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition', 'edges_removed'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_{count_removed}-removed.csv')
# count per # hops
ipsi_left_path_counts = [[len(x) for x in path] for path in ipsi_left_paths]
ipsi_right_path_counts = [[len(x) for x in path] for path in ipsi_right_paths]
contra_path_counts = [[len(x) for x in path] for path in contra_paths]
path_counts_length_data = []
for i, path_length in enumerate(ipsi_left_path_counts):
for row in zip(path_length, [f'ipsi-left']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(ipsi_right_path_counts):
for row in zip(path_length, [f'ipsi-right']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(contra_path_counts):
for row in zip(path_length, [f'contra']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'edges_removed', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'edges_removed', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_{count_removed}-removed_path-lengths.csv')
cutoff=5
n_init = 8
count_removed = 500
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 1000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 2000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 4000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 8000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
# wildtype paths
graph_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_wildtype.csv.gz')
wt_count = len(graph_paths)
path_counts_data = []
path_counts_data.append([wt_count, f'wildtype', 0])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition', 'edges_removed'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype.csv')
path_counts_length_data = []
path_counts = [len(x) for x in graph_paths]
for row in zip(path_counts, [f'wildtype']*len(path_counts), [0]*len(path_counts), [0]*len(path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'edges_removed', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype_path_lengths.csv')
# %%
# plot total paths per condition from left -> left paths
total_paths = pd.concat([pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_500-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_1000-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_2000-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_4000-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_8000-removed.csv', index_col=0)], axis=0)
wildtype = pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype.csv', index_col=0)
total_paths = pd.concat([total_paths, pd.DataFrame([[wildtype['count'].values[0], 'contra', 0]], columns = total_paths.columns),
pd.DataFrame([[wildtype['count'].values[0], 'ipsi-left', 0]], columns = total_paths.columns),
pd.DataFrame([[wildtype['count'].values[0], 'ipsi-right', 0]], columns = total_paths.columns)], axis=0)
# plot raw number of paths (all lengths), after removing edges of different types
fig, ax = plt.subplots(1,1, figsize=(2,2))
sns.lineplot(data = total_paths, x='edges_removed', y='count', hue='condition', err_style='bars', linewidth=0.75, err_kws={'elinewidth':0.75}, ax=ax)
ax.set(ylim=(0, 1100000))
plt.savefig('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/path-counts_left-to-left_removing-edge-types.pdf', format='pdf', bbox_inches='tight')
# normalized plot of all paths (all lengths), after removing edges of different types
max_control_paths = total_paths[total_paths.edges_removed==0].iloc[0, 0]
total_paths.loc[:, 'count'] = total_paths.loc[:, 'count']/max_control_paths
fig, ax = plt.subplots(1,1, figsize=(2,2))
sns.lineplot(data = total_paths, x='edges_removed', y='count', hue='condition', err_style='bars', linewidth=0.75, err_kws={'elinewidth':0.75}, ax=ax)
ax.set(ylim=(0, 1.05))
plt.savefig('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/path-counts_left-to-left_removing-edge-types_normalized.pdf', format='pdf', bbox_inches='tight')
# plot total paths per path length from left -> left paths
total_paths = pd.concat([pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_500-removed_path-lengths.csv'),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_1000-removed_path-lengths.csv'),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_2000-removed_path-lengths.csv'),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_4000-removed_path-lengths.csv'),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_8000-removed_path-lengths.csv')], axis=0)
wildtype = pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype_path_lengths.csv')
total_paths_normalized = []
for i in range(len(total_paths.index)):
length = total_paths.iloc[i].path_length
row = [total_paths.iloc[i].condition, total_paths.iloc[i].N,
total_paths.iloc[i].edges_removed, total_paths.iloc[i].path_length,
total_paths.iloc[i].value/wildtype[wildtype.path_length==length].value.values[0]] # normalized path counts by wildtype
total_paths_normalized.append(row)
total_paths_normalized = pd.DataFrame(total_paths_normalized, columns = total_paths.columns)
for removed in [500, 1000, 2000, 4000, 8000]:
fig, ax = plt.subplots(1,1, figsize=(2,2))
sns.lineplot(data=total_paths_normalized[total_paths_normalized.edges_removed==removed], x='path_length', y='value', hue='condition', err_style='bars', linewidth=0.75, err_kws={'elinewidth':0.75}, ax=ax)
ax.set(ylim=(0, 1.1))
plt.savefig(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/path-length-counts_left-to-left_removing-{removed}-edge-types.pdf', format='pdf', bbox_inches='tight')
# %%
# how many nodes are in each type of path?
# %%
##########
# EXPERIMENT 4: removing random number of ipsi vs contra edges, effect on paths on just one side of brain to opposite side
#
# load previously generated paths
all_edges_combined_split = pd.read_csv('data/edges_threshold/pairwise-threshold_ad_all-edges.csv', index_col=0)
left = pm.Promat.get_hemis('left')
right = pm.Promat.get_hemis('right')
# iterations for random edge removal as control
n_init = 8
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
dVNC_left = list(np.intersect1d(dVNC, left))
dVNC_right = list(np.intersect1d(dVNC, right))
all_sensories = ct.Celltype_Analyzer.get_skids_from_meta_meta_annotation('mw brain sensory modalities')
all_sensories_left = list(np.intersect1d(all_sensories, left))
all_sensories_right = list(np.intersect1d(all_sensories, right))
# generate wildtype graph
split_graph = pg.Analyze_Nx_G(all_edges_combined_split, graph_type='directed', split_pairs=True)
# excise edges and generate graphs
random_ipsi500_left, random_ipsi500_right, random_contra500 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 500, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi1000_left, random_ipsi1000_right, random_contra1000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 1000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi2000_left, random_ipsi2000_right, random_contra2000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 2000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi4000_left, random_ipsi4000_right, random_contra4000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 4000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi8000_left, random_ipsi8000_right, random_contra8000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 8000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
# %%
# generate and save paths
cutoff=5
# generate wildtype paths
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_wildtype'
pg.Prograph.generate_save_simple_paths(split_graph.G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=save_path)
# generate and save paths
count = 500
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra500[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 1000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra1000[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 2000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra2000[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 4000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra4000[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 8000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra8000[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
# %%
# analyze paths: total and count per # hops
def process_paths_ipsi_contra(ipsi_left_paths, ipsi_right_paths, contra_paths, count_removed):
ipsi_left_counts = [len(x) for x in ipsi_left_paths]
ipsi_right_counts = [len(x) for x in ipsi_right_paths]
contra_counts = [len(x) for x in contra_paths]
path_counts_data = []
for row in zip(ipsi_left_counts, [f'ipsi-left']*len(ipsi_left_counts), [count_removed]*len(ipsi_left_counts)):
path_counts_data.append(row)
for row in zip(ipsi_right_counts, [f'ipsi-right']*len(ipsi_right_counts), [count_removed]*len(ipsi_right_counts)):
path_counts_data.append(row)
for row in zip(contra_counts, [f'contra']*len(contra_counts), [count_removed]*len(contra_counts)):
path_counts_data.append(row)
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition', 'edges_removed'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_{count_removed}-removed.csv')
# count per # hops
ipsi_left_path_counts = [[len(x) for x in path] for path in ipsi_left_paths]
ipsi_right_path_counts = [[len(x) for x in path] for path in ipsi_right_paths]
contra_path_counts = [[len(x) for x in path] for path in contra_paths]
path_counts_length_data = []
for i, path_length in enumerate(ipsi_left_path_counts):
for row in zip(path_length, [f'ipsi-left']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(ipsi_right_path_counts):
for row in zip(path_length, [f'ipsi-right']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(contra_path_counts):
for row in zip(path_length, [f'contra']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'edges_removed', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'edges_removed', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_{count_removed}-removed_path-lengths.csv')
cutoff=5
n_init = 8
count_removed = 500
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 1000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 2000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 4000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 8000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
# wildtype paths
graph_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_wildtype.csv.gz')
wt_count = len(graph_paths)
path_counts_data = []
path_counts_data.append([wt_count, f'wildtype', 0])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition', 'edges_removed'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype_to-dVNC-right.csv')
path_counts_length_data = []
path_counts = [len(x) for x in graph_paths]
for row in zip(path_counts, [f'wildtype']*len(path_counts), [0]*len(path_counts), [0]*len(path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'edges_removed', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype_path_lengths_to-dVNC-right.csv')
# %%
# plot total paths per condition from left -> right paths
total_paths = pd.concat([pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_500-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_1000-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_2000-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_4000-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_8000-removed.csv', index_col=0)], axis=0)
wildtype = pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype_to-dVNC-right.csv', index_col=0)
total_paths = pd.concat([total_paths, pd.DataFrame([[wildtype['count'].values[0], 'contra', 0]], columns = total_paths.columns),
pd.DataFrame([[wildtype['count'].values[0], 'ipsi-left', 0]], columns = total_paths.columns),
pd.DataFrame([[wildtype['count'].values[0], 'ipsi-right', 0]], columns = total_paths.columns)], axis=0)
# raw plot of all paths (all lengths), after removing edges of different types
fig, ax = plt.subplots(1,1, figsize=(1.5,1.5))
sns.lineplot(data = total_paths, x='edges_removed', y='count', hue='condition', err_style='bars', linewidth=0.5, err_kws={'elinewidth':0.5}, ax=ax)
ax.set(ylim=(0, 1100000))
plt.savefig('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/path-counts_left-to-right_removing-edge-types.pdf', format='pdf', bbox_inches='tight')
# normalized plot of all paths (all lengths), after removing edges of different types
max_control_paths = total_paths[total_paths.edges_removed==0].iloc[0, 0]
total_paths.loc[:, 'count'] = total_paths.loc[:, 'count']/max_control_paths
fig, ax = plt.subplots(1,1, figsize=(2,2))
sns.lineplot(data = total_paths, x='edges_removed', y='count', hue='condition', err_style='bars', linewidth=0.75, err_kws={'elinewidth':0.75}, ax=ax)
ax.set(ylim=(0, 1.05))
plt.savefig('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/path-counts_left-to-right_removing-edge-types_normalized.pdf', format='pdf', bbox_inches='tight')
# plot total paths per path length from left -> left paths
total_paths = pd.concat([ | pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_500-removed_path-lengths.csv') | pandas.read_csv |
"""
Code for "How Is Earnings News Transmitted to Stock Prices?" by
<NAME> and <NAME>.
Python 2
The main function takes the TAS (Time and Sales) file for one exchange on one
month and extracts only the trades from daily files, creating trade files.
"""
from os import listdir
import os
import pandas as pd
from datetime import datetime
import gzip
import shutil
import hashlib
import sys
outdir = 'M:\\vgregoire\\TRTH_Trades\\'
# Checks the md5 has to make sure the raw file is not corrupted
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
# This function takes the TAS (Time and Sales) file for one exchange on one
# month and extracts only the trades from daily files, creating trade files.
def process_task(exch, y, m):
mdir = 'Y:\\' + exch + '\\TAS\\' + str(y) + '\\' + str(m).zfill(2) + '\\'
# List dates in the monthly directory
ls = listdir(mdir)
ls = [fn for fn in ls if fn[15:23] == 'TAS-Data' and fn.endswith('.gz')]
ls_df = pd.DataFrame(ls, columns=['Filename'])
ls_df['Date'] = ls_df.Filename.apply(lambda x: datetime.strptime(x[4:14], '%Y-%m-%d'))
# Get all files related to each specific date.
dates_fn = {datetime.strptime(x[4:14], '%Y-%m-%d'):[] for x in ls}
for fn in ls:
dates_fn[datetime.strptime(fn[4:14], '%Y-%m-%d')].append(fn)
# Process all dates
for date in dates_fn:
fn = dates_fn[date]
trade_cols = ['#RIC', 'Date[G]', 'Time[G]', 'GMT Offset', 'Type',
'Ex/Cntrb.ID', 'Price', 'Volume', 'Market VWAP',
'Qualifiers', 'Seq. No.', 'Exch Time',
'Trd/Qte Date']
str_cols = ['Ex/Cntrb.ID', 'Exch Time', 'Trd/Qte Date']
dtypes = {x: object for x in str_cols}
dfs = []
for f in fn:
# Validate file
md5_f = md5(mdir+f)
with open(mdir+f+'.md5sum', 'r') as f_cs:
md5_check = f_cs.readline()[:32]
if md5_f != md5_check:
sys.stderr.write('Wrong checksum for ' + f)
continue
# Read the file by chunk to limit memory usage, filtering on trades.
for chunk in pd.read_csv(mdir + f, chunksize=10000, usecols=trade_cols,
dtype=dtypes):
df_trades = chunk[chunk.Type=='Trade'].copy()
del df_trades['Type']
dfs.append(df_trades)
df = | pd.concat(dfs) | pandas.concat |
import time
import numpy as np
import pandas as pd
from scipy.io import arff
from bitmap_mapper.bitmap_mapper_interface import BitmapMapperInterface
from feature_extractor.feature_extractor import FeatureExtractor
class CommonData:
def __init__(self, feature_extractor: FeatureExtractor, bitmap_mapper: BitmapMapperInterface):
self.__extractor = feature_extractor
self.__mapper = bitmap_mapper
self._class_names = None
def classes_str_to_array(self, class_str: str) -> np.ndarray:
index = np.where(self._class_names == class_str)
res = np.zeros(self.get_class_count(), dtype=np.int64)
res[index] = 1
if sum(res) == 0:
raise RuntimeError('Unknown class')
return res
def classes_array_to_str(self, class_arr: np.ndarray) -> str:
if len(class_arr) != len(self._class_names):
raise RuntimeError('Unknown class')
index = np.where(class_arr == 1)
res = self._class_names[index]
if len(res) == 0:
raise RuntimeError('Unknown class')
return res
def _extract_features_from_path(self, path: str):
data = arff.loadarff(path)
df = pd.DataFrame(data[0])
classes = np.array([s[0].decode() for s in df.iloc[:, -1:].values])
self._class_names = np.unique(classes)
self._class_names.sort()
mapped_classes = np.empty((len(classes), self.get_class_count()))
for i in range(len(classes)):
mapped_classes[i] = self.classes_str_to_array(classes[i])
feature_list = np.empty((len(classes), self.__extractor.feature_count()))
self.__mapper.set_bitmap_size(30)
i = 0
for row in df.iloc[:, :-1].iterrows():
start = time.process_time_ns()
bitmap = self.__mapper.convert_series(row[1].values.tolist())
feature_list[i] = self.__extractor.calculate_features(bitmap)
end = time.process_time_ns()
print(f"Set {i + 1} converted at {(end - start) / 1e6} ms")
i += 1
return feature_list, mapped_classes
def _extract_features_from_path_without_classes(self, path: str):
data = arff.loadarff(path)
df = | pd.DataFrame(data[0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import datetime
import yfinance as yf
from pandas_datareader import data as pdr
from flask import current_app
from stk_predictor.extensions import db
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
def get_ticker_from_yahoo(ticker, start_date, end_date):
yf.pdr_override()
try:
new_trading_df = pdr.get_data_yahoo(
ticker, start_date, end_date, interval='1d')
new_trading_df = new_trading_df.drop(
['Open', 'High', 'Low', 'Adj Close'], axis=1)
new_trading_df = new_trading_df.dropna('index')
new_trading_df = new_trading_df.reset_index()
new_trading_df.columns = ['trading_date',
'intraday_close', 'intraday_volumes']
his_trading_df = pd.read_sql('aapl', db.engine, index_col='id')
df = pd.concat([his_trading_df, new_trading_df]
).drop_duplicates('trading_date')
df = df.sort_values(by='trading_date')
df = df.reset_index(drop=True)
if len(df) > 0:
df.to_sql("aapl", db.engine, if_exists='replace', index_label='id')
return df
else:
# t = pd.read_sql('aapl', db.engine, index_col='id')
return None
except Exception as ex:
raise RuntimeError(
"Catch Excetion when retrieve data from Yahoo...", ex)
return None
def get_news_from_finviz(ticker):
"""Request news headline from finviz, according to
company ticker's name
Parameters
-----------
ticker: str
the stock ticker name
Return
----------
df : pd.DataFrame
return the latest 2 days news healines.
"""
current_app.logger.info("Job >> Enter Finviz news scrape step...")
base_url = 'https://finviz.com/quote.ashx?t={}'.format(ticker)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36'
}
parsed_news = []
try:
res = requests.get(base_url, headers=headers)
if res.status_code == 200:
texts = res.text
soup = BeautifulSoup(texts)
news_tables = soup.find(id="news-table")
for x in news_tables.findAll('tr'):
text = x.a.get_text()
date_scrape = x.td.text.split()
if len(date_scrape) == 1:
time = date_scrape[0]
else:
date = date_scrape[0]
time = date_scrape[1]
parsed_news.append([date, time, text])
# filter the recent day news
df = pd.DataFrame(parsed_news, columns=['date', 'time', 'texts'])
df['date'] = pd.to_datetime(df.date).dt.date
one_day_period = (datetime.datetime.today() -
datetime.timedelta(days=1)).date()
df_sub = df[df.date >= one_day_period]
return df_sub
else:
raise RuntimeError("HTTP response Error {}".format(
res.status_code)) from None
except Exception as ex:
current_app.logger.info("Exception in scrape Finviz.", ex)
raise RuntimeError("Exception in scrape Finviz.") from ex
def prepare_trading_dataset(df):
"""Prepare the trading data set.
Time series analysis incoporate previous data for future prediction,
We need to retrieve historical data to generate features.
Parameters
-----------
df: DataFrame
the stock ticker trading data, including trading-date, close-price, volumes
window: int, default = 400
feature engineer windows size. Using at most 400 trading days to construct
features.
Return
----------
array_lstm : np.array
return the array with 3 dimensions shape -> [samples, 1, features]
"""
if len(df) == 0:
raise RuntimeError(
"Encounter Error in >>make_dataset.prepare_trading_dataset<<... \
Did not catch any news.") from None
else:
df['log_ret_1d'] = np.log(df['intraday_close'] / df['intraday_close'].shift(1))
df['log_ret_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).sum()
df['log_ret_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).sum()
df['log_ret_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).sum()
df['log_ret_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).sum()
df['log_ret_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).sum()
df['log_ret_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).sum()
df['log_ret_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).sum()
df['log_ret_20w'] = pd.Series(df['log_ret_1d']).rolling(window=100).sum()
df['log_ret_24w'] = pd.Series(df['log_ret_1d']).rolling(window=120).sum()
df['log_ret_28w'] = pd.Series(df['log_ret_1d']).rolling(window=140).sum()
df['log_ret_32w'] = pd.Series(df['log_ret_1d']).rolling(window=160).sum()
df['log_ret_36w'] = pd.Series(df['log_ret_1d']).rolling(window=180).sum()
df['log_ret_40w'] = pd.Series(df['log_ret_1d']).rolling(window=200).sum()
df['log_ret_44w'] = pd.Series(df['log_ret_1d']).rolling(window=220).sum()
df['log_ret_48w'] = pd.Series(df['log_ret_1d']).rolling(window=240).sum()
df['log_ret_52w'] = pd.Series(df['log_ret_1d']).rolling(window=260).sum()
df['log_ret_56w'] = pd.Series(df['log_ret_1d']).rolling(window=280).sum()
df['log_ret_60w'] = | pd.Series(df['log_ret_1d']) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # Generate Generative Model Figures
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('matplotlib', 'inline')
import os
import glob
from collections import OrderedDict
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import pandas as pd
import plotnine as p9
import seaborn as sns
import pdb
import scipy.stats as ss
# In[2]:
def get_dataframes(
result_dir, file_path,
starting_point=0,
ending_point=30,
step=5, num_of_points=4
):
"""
This function grabs the result tsv files
and loads then into a dictionary strucutre
[relationship] -> dataframe
Args:
result_dir - the directory containing all the results
file_path - the path to extract the result files
starting_point - the point to start each subgraph with in plot_graph function
ending_point - the point to end each subgraph with
step - the number to increase the middle points with
num_of_points - the number of points to plot between the start and end points
"""
# Build up X axis by gathering relatively evely spaced points
query_points = [starting_point]
query_points += [1 + step*index for index in range(num_of_points)]
query_points += [ending_point]
return {
# Get the head word of each file that will be parsed
os.path.splitext(os.path.basename(file))[0].split("_")[0]:
pd.read_csv(file, sep="\t")
.query("lf_num in @query_points", engine="python", local_dict={"query_points":query_points})
.assign(
lf_num=lambda x:x['lf_num'].map(lambda y: str(y) if y != ending_point else 'All')
)
for file in glob.glob(f"{result_dir}/{file_path}")
}
# In[3]:
file_tree = OrderedDict({
"DaG":
{
"DaG": "../../../disease_gene/disease_associates_gene/label_sampling_experiment/results/DaG/results",
"CtD": "../../../disease_gene/disease_associates_gene/label_sampling_experiment/results/CtD/results",
"CbG": "../../../disease_gene/disease_associates_gene/label_sampling_experiment/results/CbG/results",
"GiG": "../../../disease_gene/disease_associates_gene/label_sampling_experiment/results/GiG/results",
"All": "../../../disease_gene/disease_associates_gene/label_sampling_experiment/results/all/results",
},
"CtD":
{
"DaG": "../../../compound_disease/compound_treats_disease/label_sampling_experiment/results/DaG/results",
"CtD": "../../../compound_disease/compound_treats_disease/label_sampling_experiment/results/CtD/results",
"CbG": "../../../compound_disease/compound_treats_disease/label_sampling_experiment/results/CbG/results",
"GiG": "../../../compound_disease/compound_treats_disease/label_sampling_experiment/results/GiG/results",
"All": "../../../compound_disease/compound_treats_disease/label_sampling_experiment/results/all/results",
},
"CbG":
{
"DaG": "../../../compound_gene/compound_binds_gene/label_sampling_experiment/results/DaG/results",
"CtD": "../../../compound_gene/compound_binds_gene/label_sampling_experiment/results/CtD/results",
"CbG": "../../../compound_gene/compound_binds_gene/label_sampling_experiment/results/CbG/results",
"GiG": "../../../compound_gene/compound_binds_gene/label_sampling_experiment/results/GiG/results",
"All": "../../../compound_gene/compound_binds_gene/label_sampling_experiment/results/all/results",
},
"GiG":
{
"DaG": "../../../gene_gene/gene_interacts_gene/label_sampling_experiment/results/DaG/results",
"CtD": "../../../gene_gene/gene_interacts_gene/label_sampling_experiment/results/CtD/results",
"CbG": "../../../gene_gene/gene_interacts_gene/label_sampling_experiment/results/CbG/results",
"GiG": "../../../gene_gene/gene_interacts_gene/label_sampling_experiment/results/GiG/results",
"All": "../../../gene_gene/gene_interacts_gene/label_sampling_experiment/results/all/results",
}
})
# In[4]:
# End total of label functions for each point
end_points = {
"DaG": 30,
"CtD": 22,
"CbG": 20,
"GiG": 28,
"All": 100
}
# In[5]:
critical_val = ss.norm.ppf(0.975)
# In[6]:
color_names = {
"turquoise": pd.np.array([27, 158, 119, 255])/255,
"orange": pd.np.array([217, 95, 2, 255])/255,
"purple": pd.np.array([117, 112, 179, 255])/255,
"pink": pd.np.array([231, 41, 138, 255])/255,
"light-green": | pd.np.array([102, 166, 30, 255]) | pandas.np.array |
#!/usr/bin/python
'''
Tracks colonies through time in a single imaging field
'''
import cv2
import numpy as np
import glob
import os
import warnings
import pandas as pd
from PIL import Image
from string import punctuation
def _convert_to_number(val_str):
'''
Converts val_str to an int or float or logical (in that order) if
possible
'''
# NEED UNITTEST FOR JUST THIS METHOD?
try:
output_val = int(val_str)
except:
try:
output_val = float(val_str)
except:
if val_str.lower() == 'true':
output_val = True
elif val_str.lower() == 'false':
output_val = False
else:
output_val = val_str
return(output_val)
def _process_parameter_vals(val_str):
'''
Returns val_str split by semicolon into list only if semicolon
is present
Converts val_str or all elements of resulting list to int if
possible
'''
# NEED UNITTEST FOR JUST THIS METHOD?
if ';' in val_str:
split_val_str = val_str.split(';')
output_val = [_convert_to_number(val) for val in split_val_str]
else:
output_val = _convert_to_number(val_str)
return(output_val)
def _unprocess_parameter_vals(val):
'''
Joins any lists with semicolons
'''
if isinstance(val, list):
val_str = ';'.join([str(x) for x in val])
else:
# don't convert to str here or nan will be written as 'nan'
# and not as blank
val_str = val
return(val_str)
def _get_global_params_from_phasewise_df(analysis_config_df_phasewise):
'''
Returns a pandas series of parameters (rows) that are identical in
all columns of analysis_config_df_phasewise, and a df with those
rows removed
'''
if len(analysis_config_df_phasewise.index)>0:
global_param_row_bool_ser = analysis_config_df_phasewise.eq(
analysis_config_df_phasewise.iloc[:, 0], axis=0
).all(axis=1)
if any(global_param_row_bool_ser):
global_params = \
global_param_row_bool_ser.index[global_param_row_bool_ser]
global_param_ser = analysis_config_df_phasewise.loc[
global_params, analysis_config_df_phasewise.columns[0]
]
analysis_config_df_phasewise.drop(
index = global_params, inplace = True
)
else:
global_param_ser = pd.Series(name = 'all', dtype = object)
else:
global_param_ser = pd.Series(name = 'all', dtype = object)
return(global_param_ser, analysis_config_df_phasewise)
def _separate_global_params(analysis_config_df_prelim):
'''
Separate out parameters that apply to all phases into pandas df
analysis_config_df_indiv and pandas ser global_param_ser_part
'''
# pivot analysis_config_df to have phases as columns
analysis_config_df_pivot = analysis_config_df_prelim.pivot(
index = 'Parameter', columns = 'PhaseNum', values = 'Value'
)
if 'all' in analysis_config_df_pivot.columns:
# initialize global param series
global_param_ser_part = analysis_config_df_pivot['all'].dropna()
# drop 'all' column from analysis_config_df_pivot
analysis_config_df_indiv = \
analysis_config_df_pivot.drop(
columns = ['all']
).dropna(axis = 0, how = 'all')
else:
global_param_ser_part = pd.Series(name = 'all', dtype = object)
analysis_config_df_indiv = \
analysis_config_df_pivot
# convert column names of analysis_config_df to int and use to
# specify phases
analysis_config_df_indiv.columns = analysis_config_df_indiv.columns.astype(int)
# check that parameters defined as 'all' and parameters defined
# by individual phases are mutually exclusive
indiv_phase_param_set = set(analysis_config_df_indiv.index)
global_param_set = set(global_param_ser_part.index)
double_defined_params = \
set.intersection(indiv_phase_param_set, global_param_set)
if len(double_defined_params) > 0:
raise ValueError((
'Parameters may be defined either with PhaseNum set to "all" '
'or set to individual phase number integers; the following '
'parameters were defined with both: \n{0}'
).format(str(double_defined_params)))
# Add all parameters with
# identical values across phases to global_param_ser_part, and
# remove those parameters from analysis_config_df_indiv
new_global_param_ser_part, analysis_config_df_indiv = \
_get_global_params_from_phasewise_df(analysis_config_df_indiv)
global_param_ser_part = \
global_param_ser_part.append(new_global_param_ser_part)
return(global_param_ser_part, analysis_config_df_indiv)
def write_setup_file(
setup_file_out_path,
global_param_ser,
analysis_config_df_indiv,
phase_list
):
'''
Writes csv file containing all parameters from
global_param_ser and analysis_config_df_indiv to
setup_file_out_path
'''
# make dataframe for global params
global_param_df = \
pd.DataFrame(global_param_ser, columns = ['Value'])
# convert index to 'Parameter' column
global_param_df = global_param_df.reset_index().rename(
columns = {'index':'Parameter'}
)
# add PhaseNum column
global_param_df['PhaseNum'] = 'all'
if len(analysis_config_df_indiv.index) > 0:
# make dataframe for phase-specific params
phasewise_param_df = analysis_config_df_indiv.melt(
# value_vars = phase_list,
var_name = 'PhaseNum',
value_name = 'Value',
ignore_index = False
)
# drop rows where Value is NA
phasewise_param_df.dropna(subset = ['Value'], inplace = True)
# convert index to 'Parameter' column
phasewise_param_df = phasewise_param_df.reset_index().rename(
columns = {'index':'Parameter'}
)
# combine global and phasewise dfs
combined_df_prelim = pd.concat([global_param_df, phasewise_param_df])
else:
combined_df_prelim = global_param_df
# add explanation column and reorder columns
combined_df = pd.merge(param_description_df,combined_df_prelim)
combined_df = \
combined_df[['Parameter', 'Value', 'PhaseNum', 'Explanation']]
# convert lists in Parameter to semicolon-joined strings
combined_df.Value = combined_df.Value.map(
_unprocess_parameter_vals
)
combined_df.to_csv(setup_file_out_path, index = False)
# load dataframe of parameters and descriptions
PIE_package_path = os.path.abspath(os.path.dirname(__file__))
parameter_file = os.path.join(
PIE_package_path, 'PIE_data', 'param_descriptions.csv'
)
# convert strings to int where possible, and convert values
# separated by semicolon to lists
param_description_df = pd.read_csv(
parameter_file,
converters =
{'Default': _process_parameter_vals},
na_filter = False
)
# list fields that must be specified in analysis config
required_fields_general = \
['fluor_channel_scope_labels', 'fluor_channel_names',
'fluor_channel_thresholds', 'fluor_channel_timepoints',
'timepoint_spacing', 'hole_fill_area',
'cleanup', 'perform_registration', 'max_proportion_exposed_edge',
'cell_intensity_num', 'input_path',
'output_path', 'im_file_extension', 'label_order_list',
'max_xy_position_num', 'first_timepoint', 'max_timepoint_num',
'first_xy_position', 'extended_display_positions',
'timepoint_label_prefix', 'position_label_prefix',
'main_channel_label', 'main_channel_imagetype', 'im_format',
'linked_phase', 'max_area_pixel_decrease',
'max_area_fold_decrease', 'max_area_fold_increase',
'min_colony_area', 'max_colony_area', 'min_correlation',
'min_foldX', 'minimum_growth_time', 'growth_window_timepoints',
'min_neighbor_dist', 'max_colony_num']
required_fields_minimal = \
['fluor_channel_scope_labels', 'fluor_channel_names',
'fluor_channel_thresholds', 'fluor_channel_timepoints',
'input_path', 'first_xy_position', 'extended_display_positions',
'timepoint_label_prefix',
'output_path', 'im_file_extension', 'label_order_list',
'max_xy_position_num',
'position_label_prefix',
'im_format',
'linked_phase']
class _ImageRetriever(object):
'''
Retrieves image for current position, channel, timepoint
'''
def get_image(self, timepoint, position, channel_label):
'''
Returns single cv2 image for current timepoint, position, and
channel_label
'''
pass
class _IndivImageRetriever(_ImageRetriever):
'''
Retrieves image for current position, channel, timepoint for images
saved as individual files for every timepoint/color
'''
def get_image(self, **kwargs):
'''
Returns single cv2 image from current im_filepath
(Doesn't use timepoint or channel information passed to it)
'''
### !!! NEEDS UNITTEST
image = cv2.imread(kwargs['im_filepath'], cv2.IMREAD_ANYDEPTH)
# cv2.imread returns none if im_filepath doesn't exist
return(image)
class _StackImageRetriever(_ImageRetriever):
'''
Retrieves image for current position, channel, timepoint for images
saved as stack of images for every timepoint/color
'''
#TODO: Integrate micromanager and NIS elements imagestacks - currently can only read data saved as individual tifs
def __init__(self, im_path):
pass
def get_image(self, **kwargs):
'''
Returns single cv2 image for current timepoint, position, and
channel_label
'''
pass
class MinimalAnalysisConfig(object):
'''
Handles experimental configuration details in for experiments
without timepoints or main channel
'''
def __init__(self, phase_num, input_path, output_path, im_file_extension,
label_order_list, max_xy_position_num, position_label_prefix,
fluor_channel_df, im_format, extended_display_positions,
timepoint_label_prefix, xy_position_vector):
'''
Reads setup_file and creates analysis configuration
'''
# max timepoint number
self.max_timepoint_num = 1
# specify phase
self.phase_num = phase_num
# max xy position label
self.max_xy_position_num = int(max_xy_position_num)
# path of input images
self.input_path = input_path
# path of output image folder
self.output_path = output_path
# file extension of input images
self.im_file_extension = im_file_extension.strip(punctuation)
# labels used for xy position
self.position_label_prefix = position_label_prefix
# labels used for timepoint number
self.timepoint_label_prefix = timepoint_label_prefix
self.fluor_channel_df = fluor_channel_df
# column names: fluor_channel_label,
# fluor_channel_column_name, fluor_threshold
# save order in which time, position, channel labels are listed
self.label_order_list = label_order_list
self._check_label_order_list()
# set up folder to save outputs
self._create_output_paths()
# set up list of possible xy positions
self.xy_position_vector = xy_position_vector
# find size of images
self._find_im_size()
# set up image retriever depending on im_format
if im_format == 'individual':
self.image_retriever = _IndivImageRetriever()
else:
raise ValueError('image format ' + im_format + ' not recognized')
if isinstance(extended_display_positions, list):
self.extended_display_positions = extended_display_positions
else:
self.extended_display_positions = [int(extended_display_positions)]
def __eq__(self, other):
identical_vars = self.__dict__ == other.__dict__
identical_class = self.__class__ == other.__class__
return(identical_vars and identical_class)
def _check_fluor_channel_label_spec(self):
'''
Check whether there are fluorescent channels that should have
labels in filenames
'''
fluor_channel_labels_specified = \
len(self.fluor_channel_df.index)>0 and (
len(self.fluor_channel_df.index)>1 or
any(self.fluor_channel_df.fluor_channel_label != '') or
any(pd.notnull(self.fluor_channel_df.fluor_channel_label))
)
return(fluor_channel_labels_specified)
def _check_label_order_list(self):
'''
Check that only elements specified in label_order_list have
prefixes (i.e. are expected to be in filenames)
'''
if self.timepoint_label_prefix and \
'timepoint' not in self.label_order_list:
raise ValueError(
"'timepoint' missing from label_order_list; either include it, "
"or set timepoint_label_prefix to a blank value"
)
if self.position_label_prefix and \
'position' not in self.label_order_list:
raise ValueError(
"'position' missing from label_order_list; either include it, "
"or set position_label_prefix to a blank value"
)
fluor_channel_labels_specified = self._check_fluor_channel_label_spec()
if fluor_channel_labels_specified and \
'channel' not in self.label_order_list:
raise ValueError(
"'channel' missing from label_order_list; either include it, "
"or don't specify any fluorescent channels with "
"fluor_channel_scope_labels"
)
def _create_output_paths(self):
'''
Creates output paths for phase data, and sets output paths for
growth rate and colony property dataframes
'''
self._create_phase_output()
self.combined_gr_write_path = \
os.path.join(self.output_path, 'growth_rates_combined.csv')
self.combined_tracked_properties_write_path = \
os.path.join(self.output_path, 'colony_properties_combined.csv')
self.col_properties_output_folder = \
os.path.join(self.output_path, 'positionwise_colony_properties')
self.movie_folder = \
os.path.join(self.output_path, 'movies')
try:
os.makedirs(self.col_properties_output_folder)
except:
pass
def _create_phase_output(self):
'''
Creates folder for results of colony properties across phases,
as well as within-phase results, if they don't already exist
'''
# NEED UNITTEST FOR JUST THIS METHOD?
self.phase_output_path = os.path.join(self.output_path,
('phase_' + str(self.phase_num)))
try:
os.makedirs(self.phase_output_path)
except:
pass
self.phase_col_property_mats_output_folder = \
os.path.join(self.phase_output_path,
'positionwise_colony_property_matrices')
try:
os.makedirs(self.phase_col_property_mats_output_folder)
except:
pass
# create filename for growth rate output file for current phase
self.phase_gr_write_path = \
os.path.join(self.phase_output_path, 'growth_rates.csv')
# create filename for filtered colony output file for current
# phase
self.filtered_colony_file = \
os.path.join(self.phase_output_path, 'filtered_colonies.csv')
def _find_im_size(self):
'''
Assumes all images are the same size
'''
# find image files in self.input_path
im_files = [
f for f in os.listdir(self.input_path)
if f.endswith(self.im_file_extension)
]
# open *some* input image
im_to_use = im_files[0]
self.size_ref_im = im_to_use
# NB: Pillow doesn't open jpegs saved through matlab with weird
# bitdepths, i.e. in old matlab PIE code
with Image.open(os.path.join(self.input_path,im_to_use)) as im:
self.im_width, self.im_height = im.size
def _reformat_values(self, int_to_format, max_val_num):
'''
Returns int_to_format as string, padded with 0s to match the
number of digits in max_val_num
If int_to_format is None, returns empty string
If int_to_format is special character #, returns a string with
'#' repeated the same number of times as digits in max_val_num
If int_to_format is special character *, returns a string of a
glob expression specifying 0-9 repeated the same number of
times as digits in max_val_num
'''
### !!! NEEDS UNITTEST
digit_num = np.ceil(np.log10(max_val_num+1)).astype(int)
if int_to_format is None:
formatted_string = ''
elif int_to_format == '#':
formatted_string = '#'*digit_num
elif int_to_format == '*':
formatted_string = '[0-9]'*digit_num
else:
formatted_string = '{:0>{}d}'.format(int_to_format, digit_num)
return(formatted_string)
def generate_filename(self, timepoint, position, channel_label):
'''
Returns filename for image file given timepoint, position,
channel_label, as well as its image label (filename without
extension)
If timepoint, position, or channel_label is None, they are not
included
If timepoint or position is special character #, time/position
digits are replaced with # repeated the appropriate number of
times
If timepoint or position is special character *, time/position
digits are replaced with a glob expression searching for a
digit present the correct number of times
'''
### !!! NEEDS UNITTEST
im_label = self.create_file_label(timepoint, position, channel_label)
im_filepath = os.path.join(self.input_path, im_label + '.' +
self.im_file_extension)
return(im_filepath, im_label)
def create_file_label(self, timepoint, position, channel_label):
'''
Creates label for image filename, concatenating formatted
timepoint, xy position, and provided channel label in the
correct order
If timepoint, position, or channel_label is None, they are not
included
If timepoint or position is special character #, time/position
digits are replaced with # repeated the appropriate number of
times
If timepoint or position is special character *, time/position
digits are replaced with a glob expression searching for a
digit present the correct number of times
'''
### !!! NEEDS UNITTEST
current_timepoint_str = str(self.timepoint_label_prefix) + \
self._reformat_values(timepoint, self.max_timepoint_num)
current_position_str = str(self.position_label_prefix) + \
self._reformat_values(position, self.max_xy_position_num)
current_point_label_dict = \
{'timepoint': current_timepoint_str,
'channel': channel_label,
'position': current_position_str}
file_label = ''
# loop through ordered list of labels and append correct info to
# filename one-by-one
for label_key in self.label_order_list:
current_label = current_point_label_dict[label_key]
# current label may be np.nan if channel not specified
if isinstance(current_label, str) and current_label != '':
file_label = file_label + current_point_label_dict[label_key]
return(file_label)
def get_image(self, timepoint, channel):
'''
Returns an image at current xy position for timepoint and
channel, as well as the image's 'image label' (filename without
extension) and the time (in seconds) at which it was taken
'''
### !!! NEEDS UNITTEST
im_filepath, im_label = \
self.generate_filename(timepoint, self.xy_position_idx, channel)
image = self.image_retriever.get_image(im_filepath = im_filepath,
timepoint = timepoint, channel = channel)
# get image time
if image is None or timepoint is None:
image_time = None
else:
# if timepoint dict exists, get time value from there;
# otherwise, get it from the file modification date
if self.timepoint_dict:
image_time = self.timepoint_dict[timepoint]
else:
image_time = os.path.getmtime(im_filepath)
return(image, im_label, image_time)
def set_xy_position(self, xy_position_idx):
'''
Sets the xy position to be used by the analysis config
'''
### !!! NEEDS UNITTEST
if xy_position_idx not in self.xy_position_vector:
raise IndexError('Unexpected xy position index ' + str(xy_position_idx) +
' in phase ' + str(self.phase_num))
# current position being imaged
self.xy_position_idx = xy_position_idx
# determine wether non-essential info (threshold plot outputs,
# boundary images, etc) need to be saved for this experiment
self.save_extra_info = \
xy_position_idx in self.extended_display_positions
# create filename for tracked colony properties output file for
# current xy position
self.tracked_properties_write_path = \
os.path.join(self.col_properties_output_folder,
'xy_' + str(xy_position_idx) +
'_col_props_with_tracking_pos.parquet')
class AnalysisConfig(MinimalAnalysisConfig):
'''
Handles experimental configuration details
'''
def __init__(self, phase_num, hole_fill_area, cleanup, perform_registration,
max_proportion_exposed_edge, cell_intensity_num,
input_path, output_path, im_file_extension,
label_order_list, max_xy_position_num, first_timepoint,
max_timepoint_num, timepoint_spacing, timepoint_label_prefix,
position_label_prefix, main_channel_label, main_channel_imagetype,
fluor_channel_df, im_format, extended_display_positions,
xy_position_vector, minimum_growth_time,
growth_window_timepoints, max_area_pixel_decrease,
max_area_fold_decrease, max_area_fold_increase, min_colony_area,
max_colony_area, min_correlation, min_foldX, min_neighbor_dist, max_colony_num):
'''
Reads setup_file and creates analysis configuration
'''
# set up channel labels
self.main_channel_label = main_channel_label
super(AnalysisConfig, self).__init__(
phase_num, input_path, output_path, im_file_extension,
label_order_list, max_xy_position_num, position_label_prefix,
fluor_channel_df, im_format, extended_display_positions,
timepoint_label_prefix, xy_position_vector
)
# max timepoint number
self.max_timepoint_num = int(max_timepoint_num)
# specify image analysis parameters
self.hole_fill_area = float(hole_fill_area)
self.cleanup = bool(cleanup)
self.perform_registration = bool(perform_registration)
self.max_proportion_exposed_edge = float(max_proportion_exposed_edge)
self.cell_intensity_num = int(cell_intensity_num)
# specify growth rate analysis parameters
self.minimum_growth_time = int(minimum_growth_time)
growth_window_timepoint_int = int(growth_window_timepoints)
if growth_window_timepoint_int == 0:
self.growth_window_timepoints = self.max_timepoint_num
else:
self.growth_window_timepoints = growth_window_timepoint_int
self.max_area_pixel_decrease = float(max_area_pixel_decrease)
self.max_area_fold_decrease = float(max_area_fold_decrease)
self.max_area_fold_increase = float(max_area_fold_increase)
self.min_colony_area = float(min_colony_area)
self.max_colony_area = float(max_colony_area)
self.min_correlation = float(min_correlation)
self.min_foldX = float(min_foldX)
self.min_neighbor_dist = float(min_neighbor_dist)
self.max_colony_num = float(max_colony_num)
# set up dictionary of timepoint times
self._set_up_timevector(timepoint_spacing, first_timepoint)
# find time of first existing file
self._find_first_timepoint()
# specify type of image ('bright' or 'dark') that is in
# the main channel
self.main_channel_imagetype = main_channel_imagetype
self._run_parameter_tests()
def _check_fluor_channel_label_spec(self):
'''
Check whether there are fluorescent channels that should have
labels in filenames
'''
fluor_channel_labels_specified = \
self.main_channel_label or (
len(self.fluor_channel_df.index)>0 and (
len(self.fluor_channel_df.index)>1
or
any(self.fluor_channel_df.fluor_channel_label != '')
or
any(pd.notnull(
self.fluor_channel_df.fluor_channel_label
))
)
)
return(fluor_channel_labels_specified)
def _set_up_timevector(self, timepoint_spacing, first_timepoint):
'''
Sets up a dictionary of timepoint times, in seconds, at which
images were taken, if such information is provided
timepoint spacing can be:
a vector of seconds at each timepoint
a single number of the elapsed seconds between timepoints
None, in which case this information is taken from file
modification time
Also creates a list of timepoints
'''
### !!! NEEDS UNITTEST
self.timepoint_list = \
range(first_timepoint, (self.max_timepoint_num + 1))
if type(timepoint_spacing) is list:
self.timepoint_dict = \
dict(zip(self.timepoint_list, timepoint_spacing))
elif type(timepoint_spacing) is int or type(timepoint_spacing) is float:
timepoint_spacing_vector = \
list(np.array(self.timepoint_list, dtype = float) *
timepoint_spacing)
self.timepoint_dict = \
dict(zip(self.timepoint_list, timepoint_spacing_vector))
elif timepoint_spacing is None:
self.timepoint_dict = None
else:
raise TypeError('timepoint_spacing must be either a list of ' +
'numbers of same length as the number of timepoints in the ' +
'experiment, a single integer/float, or None')
def _find_first_timepoint(self):
'''
Finds and writes the time of the first timepoint of this imaging
phase
'''
### !!! NEEDS UNITTEST
first_timepoint_file = \
os.path.join(self.phase_output_path, 'first_timepoint_time.txt')
try:
with open(first_timepoint_file) as f:
self.first_timepoint_time = int(f.readline())
except:
if self.timepoint_dict is None:
# if no timepoint dict, find the modification time of
# the first image captured in this phase
self.first_timepoint_time = np.inf
for current_file in os.listdir(self.input_path):
if current_file.endswith(self.im_file_extension):
current_time = \
os.path.getmtime(os.path.join(self.input_path,
current_file))
self.first_timepoint_time = \
np.min([self.first_timepoint_time, current_time])
else:
self.first_timepoint_time = \
self.timepoint_dict[self.timepoint_list[0]]
# write to text file
with open(first_timepoint_file, 'w') as f:
f.write('%d' % self.first_timepoint_time)
def _run_parameter_tests(self):
'''
Runs tests to ensure certain parameters have correct values
'''
if self.min_colony_area < 0:
raise ValueError('min_colony_area must be 0 or more')
def get_position_colony_data_tracked_df(self, remove_untracked = False):
'''
Reads and returns tracked colony properties for the current
xy position
If remove_untracked is true, removes any rows with missing
time_tracking_id (corresponding to colonies that weren't tracked
because e.g. they are a minor piece of a broken-up colony)
'''
pos_tracked_col_prop_df = \
pd.read_parquet(self.tracked_properties_write_path)
if remove_untracked:
pos_tracked_col_prop_df = \
pos_tracked_col_prop_df[
pos_tracked_col_prop_df.time_tracking_id.notna()]
return(pos_tracked_col_prop_df)
def get_colony_data_tracked_df(self, remove_untracked = False,
filter_by_phase = True):
'''
Reads and returns dataframe of tracked phase colony properties
output
If remove_untracked is True,
removes any rows with missing time_tracking_id (corresponding
to colonies that weren't tracked because e.g. they are a minor
piece of a broken-up colony)
'''
colony_properties_df_total = \
pd.read_csv(self.combined_tracked_properties_write_path)
if filter_by_phase:
colony_properties_df = colony_properties_df_total[
colony_properties_df_total.phase_num == self.phase_num]
else:
colony_properties_df = colony_properties_df_total
if remove_untracked and not colony_properties_df.empty:
colony_properties_df = \
colony_properties_df[
colony_properties_df.time_tracking_id.notna()]
return(colony_properties_df)
def get_gr_data(self):
'''
Reads and returns dataframe of growth rates
'''
gr_df = pd.read_csv(self.combined_gr_write_path)
return(gr_df)
def get_property_mat_path(self, col_property):
'''
Gets path to property matrix
'''
write_path = os.path.join(self.phase_col_property_mats_output_folder,
(col_property + '_property_mat.csv'))
return(write_path)
class _AnalysisConfigFileProcessor(object):
'''
Reads an analysis config csv file and creates a dictionary with an
AnalysisConfig object for each phase of the experiment; for each
phase, the 0 position in the list stored in the dictionary is the
phase analysis config, and the 1 position is the postphase analysis
config
'''
# TODO: Maybe need some safety checks to see that things you'd
# expect to be the same across all phases (e.g. position numbers)
# actually are?
def __init__(self):
# set default parameter values to be used for every phase; any
# of these that are different in the setup file will be
# modiefied based on that
# don't have defaults for 'required' parameters
param_default_df = param_description_df.loc[
param_description_df.Type != 'required'
]
self._default_param_ser = pd.Series(
data = param_default_df.Default.to_list(),
index = param_default_df.Parameter.to_list()
)
def _set_global_vals(self):
'''
Check that parameters in config file related to number of
imaging positions, analysis output, and format in which input
images are saved apply across all phases, and set them as
attributes of self
'''
### !!! NEEDS UNITTEST!
required_global_params = \
param_description_df.Parameter[
param_description_df.RequiredGlobal
].to_list()
global_param_set = set(self._global_param_ser.index)
if not set(required_global_params).issubset(global_param_set):
raise ValueError((
'The following parameters must have PhaseNum set to "all",'
' or be identical across all phases: {0}\nOf these, {1} '
'differs among phases.').format(
', '.join(required_global_params),
', '.join(list(set.difference(
set(required_global_params), global_param_set
)))
)
)
self.output_path = self._global_param_ser.output_path
self.im_format = self._global_param_ser.im_format
self.max_xy_position_num = \
self._global_param_ser.max_xy_position_num
# set up list of possible xy positions
self.xy_position_vector = \
range(self._global_param_ser.first_xy_position,
(self.max_xy_position_num + 1))
if not self._global_param_ser.extended_display_positions:
self.extended_display_positions = []
elif isinstance(
self._global_param_ser.extended_display_positions,
int):
self.extended_display_positions = [
self._global_param_ser.extended_display_positions]
else:
self.extended_display_positions = \
self._global_param_ser.extended_display_positions
def _check_phase_numbers(self, analysis_config_df_prelim):
'''
Checks that PhaseNum column contains only 'all' or integers,
throws warning about dropping any non-blanks
If PhaseNum isn't specified at all, sets it to '1' for all rows
Removes any columns where PhaseNum or Parameter not specified
correctly
'''
if not 'PhaseNum' in analysis_config_df_prelim.columns:
analysis_config_df_prelim['PhaseNum'] = str(1)
phase_num_vals = np.array([
phase.lower().strip() for phase in
analysis_config_df_prelim.PhaseNum
])
phase_num_pass_bool = np.array([
phase.isdigit() or phase=='all' for phase in phase_num_vals
])
phase_num_fail_vals = phase_num_vals[~phase_num_pass_bool]
if any(phase_num_fail_vals!=''):
drop_phases = \
list(np.unique(phase_num_fail_vals[phase_num_fail_vals!='']))
warnings.warn((
"PhaseNum may only be 'all' or an integer; dropping disallowed"
" phases {0}"
).format(str(drop_phases)), UserWarning)
# only keep rows with allowed phase num
drop_indices = analysis_config_df_prelim.index[np.logical_or(
~phase_num_pass_bool, analysis_config_df_prelim.Parameter == ''
)]
analysis_config_df_prelim.drop(index = drop_indices, inplace = True)
return(analysis_config_df_prelim)
def _check_req_completeness(self, setup_ser, required_fields):
'''
Checks whether all parameters in setup_ser are present in
required_fields; if not, raises error
'''
missing_fields = \
set.difference(set(required_fields), set(setup_ser.index))
if len(missing_fields) > 0:
raise ValueError((
'Missing required fields {0} in PhaseNum {1}; '
'if your experiment has multiple phases, check that '
'you have specified every parameter for every phase (either '
'individually or my marking "all" under PhaseNum)'
).format(str(missing_fields), str(setup_ser.name))
)
def _check_extra_params(self, setup_ser, required_fields):
'''
Checks whether any parameters are in setup_ser that aren't present in
required_fields; if there are, raise warning
'''
extra_fields = \
set.difference(set(setup_ser.index), set(required_fields))
if len(extra_fields) > 0:
warnings.warn(
('Unused parameters: {0}').format(str(extra_fields)),
UserWarning
)
setup_ser.drop(list(extra_fields), inplace = True)
return(setup_ser)
def _create_phase_conf_ser(
self,
template_setup_ser,
current_setup_ser,
required_fields
):
# NEED UNITTEST!!!
'''
Generates a pandas series, phase_conf_ser, that inherits
parameters from template_setup_ser unless they're also specified in
current_setup_ser, in which case the parameters in
current_setup_ser are used
'''
# take all possible fields from current_setup_ser, get missing
# ones from template_setup_ser
reqd_template_fields = \
set.difference(set(required_fields), set(current_setup_ser.index))
# create combined series from template fields that are missing
# in current_setup_ser, and current_setup_ser
template_subset_ser_to_use = \
template_setup_ser[
list(
set.intersection(
reqd_template_fields,
set(template_setup_ser.index)
)
)
]
phase_conf_ser = pd.concat(
[template_subset_ser_to_use, current_setup_ser]
)
return(phase_conf_ser)
def _organize_config_df(self, analysis_config_df_prelim):
'''
Creates self.analysis_config_df with PhaseNum as columns, and
self._global_param_ser that contains values for global params
'''
global_param_ser_part, analysis_config_df_indiv = \
_separate_global_params(analysis_config_df_prelim)
# define phases
indiv_phases = analysis_config_df_indiv.columns.copy().to_list()
# create a subset of default parameters for phase-specific
# params only
default_param_ser_indiv = \
self._default_param_ser.drop(list(set.intersection(
set(self._default_param_ser.index),
set(global_param_ser_part.index)
)))
# for each phase, fill in missing values with defaults
analysis_config_dict = dict()
# if no linked_phase in any phases, skip over that part
if 'linked_phase' in global_param_ser_part.index:
if global_param_ser_part.linked_phase == '':
self._no_postphase = True
elif len(self.analysis_config_df.columns) > 1:
raise ValueError(
'If linked_phase is specified for all phases ' +
'simultaneously, it must be left blank')
else:
self._no_postphase = False
for phase in indiv_phases:
# get only the parameters specified for the current phase
curr_phase_vals_part = analysis_config_df_indiv[phase].dropna()
if self._no_postphase or \
(('linked_phase' not in curr_phase_vals_part.index) &
(self._default_param_ser.linked_phase in ['', phase])) or \
curr_phase_vals_part.linked_phase in ['', phase]:
curr_req_fields = required_fields_general
else:
curr_req_fields = required_fields_minimal
curr_phase_vals_full = self._create_phase_conf_ser(
default_param_ser_indiv,
curr_phase_vals_part,
curr_req_fields
)
curr_phase_vals_full = \
self._check_extra_params(curr_phase_vals_full, curr_req_fields)
analysis_config_dict[phase] = curr_phase_vals_full
analysis_config_df = pd.DataFrame(analysis_config_dict)
# again, check for global parameters in analysis_config_df
new_global_param_ser_part, self.analysis_config_df = \
_get_global_params_from_phasewise_df(analysis_config_df)
# create self._global_param_ser by using default values for any
# parameters still missing from both analysis_config_df_indiv
# and from global_param_ser_part
specified_indiv_phase_default_params = list(set.intersection(
set(self.analysis_config_df.index),
set(self._default_param_ser.index)
))
self._global_param_ser = self._create_phase_conf_ser(
self._default_param_ser.drop(specified_indiv_phase_default_params),
global_param_ser_part.append(new_global_param_ser_part),
required_fields_general
)
self._global_param_ser = self._check_extra_params(
self._global_param_ser, required_fields_general
)
if len(indiv_phases)>0:
self.phases = indiv_phases
else:
self.phases = [1]
def _create_fluor_channel_df(self, phase_conf_ser, phase_num):
'''
Creates a dataframe from phase_conf_ser with info on every
fluorescent channel imaged
'''
### !!! NEED UNITTEST?
# create empty df if every fluor property is an empty string
list_of_fluor_properties = [phase_conf_ser.fluor_channel_scope_labels,
phase_conf_ser.fluor_channel_names,
phase_conf_ser.fluor_channel_thresholds,
phase_conf_ser.fluor_channel_timepoints]
if all([x == '' for x in list_of_fluor_properties]):
fluor_channel_df = pd.DataFrame(columns =
['fluor_channel_label', 'fluor_channel_column_name',
'fluor_threshold', 'fluor_timepoint'])
else:
# create df with a row for every channel
# (use np.size here, not len function, to get accurate
# lengths for single-string fluor_channel_scope_labels)
channel_num = np.size(phase_conf_ser.fluor_channel_scope_labels)
fluor_channel_df = \
pd.DataFrame({
'fluor_channel_label':
phase_conf_ser.fluor_channel_scope_labels,
'fluor_channel_column_name':
phase_conf_ser.fluor_channel_names,
'fluor_threshold':
phase_conf_ser.fluor_channel_thresholds,
'fluor_timepoint':
phase_conf_ser.fluor_channel_timepoints},
index = np.arange(0, channel_num))
# raise error if only some fluor properties are empty strings
mutually_required_fluor_properties = [
'fluor_channel_column_name',
'fluor_threshold',
'fluor_timepoint']
for prop in mutually_required_fluor_properties:
if '' in fluor_channel_df[prop]:
raise ValueError(
prop +
' is not set for one of the channels in phase ' +
str(phase_num) +
'; these values must either all be left blank, or '
'all filled')
# raise error if any non-unique values in columns
unique_properties = [
'fluor_channel_label',
'fluor_channel_column_name']
for prop in unique_properties:
if not fluor_channel_df[prop].is_unique:
raise ValueError(
'Non-unique values identified in ' + prop +
'for phase ' + str(phase_num) + ': ' +
str(fluor_channel_df[prop]))
return(fluor_channel_df)
def _create_analysis_config(self, phase_num, phase_conf_ser):
'''
Creates AnalysisConfig object based on phase_conf_ser, the
series corresponding to the Value column of the subset of
self.analysis_config_df that applies to the current phase
'''
### NEED UNITTEST FOR JUST THIS METHOD?
self._check_req_completeness(phase_conf_ser, required_fields_general)
fluor_channel_df = \
self._create_fluor_channel_df(phase_conf_ser, phase_num)
# if timepoint spacing tab is empty, set timepoint_spacing to
# None (i.e. get info from files)
if phase_conf_ser.timepoint_spacing == '':
timepoint_spacing = None
else:
timepoint_spacing = phase_conf_ser.timepoint_spacing
# create AnalysisConfig object
current_analysis_config = AnalysisConfig(
phase_num,
phase_conf_ser.hole_fill_area,
phase_conf_ser.cleanup,
phase_conf_ser.perform_registration,
phase_conf_ser.max_proportion_exposed_edge,
phase_conf_ser.cell_intensity_num,
phase_conf_ser.input_path,
phase_conf_ser.output_path,
phase_conf_ser.im_file_extension,
phase_conf_ser.label_order_list,
phase_conf_ser.max_xy_position_num,
phase_conf_ser.first_timepoint,
phase_conf_ser.max_timepoint_num,
timepoint_spacing,
phase_conf_ser.timepoint_label_prefix,
phase_conf_ser.position_label_prefix,
phase_conf_ser.main_channel_label,
phase_conf_ser.main_channel_imagetype,
fluor_channel_df,
phase_conf_ser.im_format,
self.extended_display_positions,
self.xy_position_vector,
phase_conf_ser.minimum_growth_time,
phase_conf_ser.growth_window_timepoints,
phase_conf_ser.max_area_pixel_decrease,
phase_conf_ser.max_area_fold_decrease,
phase_conf_ser.max_area_fold_increase,
phase_conf_ser.min_colony_area,
phase_conf_ser.max_colony_area,
phase_conf_ser.min_correlation,
phase_conf_ser.min_foldX,
phase_conf_ser.min_neighbor_dist,
phase_conf_ser.max_colony_num)
return(current_analysis_config)
def _create_postphase_analysis_config(self, phase_num, phase_conf_ser):
'''
Creates MinimalAnalysisConfig object based on phase_conf_ser,
the series corresponding to the Value column of the subset of
self.analysis_config_df that applies to the current phase
'''
### NEED UNITTEST FOR JUST THIS METHOD?
self._check_req_completeness(phase_conf_ser, required_fields_minimal)
fluor_channel_df = \
self._create_fluor_channel_df(phase_conf_ser, phase_num)
# create MinimalAnalysisConfig object
postphase_analysis_config = MinimalAnalysisConfig(
phase_num,
phase_conf_ser.input_path,
phase_conf_ser.output_path,
phase_conf_ser.im_file_extension,
phase_conf_ser.label_order_list,
phase_conf_ser.max_xy_position_num,
phase_conf_ser.position_label_prefix,
fluor_channel_df,
phase_conf_ser.im_format,
self.extended_display_positions,
phase_conf_ser.timepoint_label_prefix,
self.xy_position_vector
)
return(postphase_analysis_config)
def _create_analysis_config_df(self):
'''
Loops through phases and creates a pandas df of AnalysisConfig
objects
'''
# NEED UNITTEST FOR JUST THIS METHOD?
# create a pandas df for storing AnalysisConfig objects
analysis_config_obj_df = \
pd.DataFrame({'analysis_config': None,
'postphase_analysis_config': None}, index = self.phases)
# create a phase setup series containing the parameters that
# apply to all phases
for phase_num in self.phases:
# get phase setup series containing the parameters that
# apply only to the current phase_num
# safe to simply concatenate parameter series here because
# all defaults should be filled in and all global
# parameters separated out
if len(self.analysis_config_df.columns)==0 and \
len(self.analysis_config_df.index)==0:
# all parameters are global
current_phase_setup = self._global_param_ser.copy()
else:
current_phase_setup = pd.concat([
self._global_param_ser,
self.analysis_config_df[phase_num].dropna()
])
current_phase_setup.name = phase_num
# set where to store object
if self._no_postphase or current_phase_setup.linked_phase in \
['',phase_num]:
storage_phase = phase_num
config_type = 'analysis_config'
# create AnalysisConfig object from current phase setup ser
current_analysis_config = \
self._create_analysis_config(
phase_num,
current_phase_setup
)
elif not (
self.analysis_config_df.at[
'linked_phase', current_phase_setup.linked_phase
]
in ['', current_phase_setup.linked_phase]
):
# check that linked_phase doesn't have its own
# linked_phase
raise ValueError(
(
'Phase {0}\'s linked phase listed as Phase {1}, but '
'that has its own linked phase.'
).format(
str(phase_num),str(current_phase_setup.linked_phase)
)
)
else:
# treat as postphase
storage_phase = current_phase_setup.linked_phase
config_type = 'postphase_analysis_config'
# create AnalysisConfig object from current phase setup ser
current_analysis_config = \
self._create_postphase_analysis_config(
phase_num,
current_phase_setup
)
# store AnalysisConfig object in pandas df
analysis_config_obj_df.at[storage_phase, config_type] = \
current_analysis_config
# in case any phase rows are empty (because those phases are
# child phases of other phases), remove rows with all None
analysis_config_obj_df.dropna(0, how = 'all', inplace = True)
return(analysis_config_obj_df)
def _check_file_existance(self, analysis_config_obj_df):
"""
Print expected generic input file for each phase and print
warning if no expected input files found
"""
for phase_num, row in analysis_config_obj_df.iterrows():
analysis_config = row.analysis_config
generic_file, _ = analysis_config.generate_filename(
'#', '#', analysis_config.main_channel_label
)
glob_filename, _ = analysis_config.generate_filename(
'*', '*', analysis_config.main_channel_label
)
file_expect_str = \
"Expected input file location and format for main channel " + \
"(colony recogntion) images from phase {phase_num} is:\n" + \
"{generic_file}"
print(file_expect_str.format(phase_num=phase_num,generic_file=generic_file))
curr_files = glob.glob(glob_filename)
if len(curr_files)==0:
warnings.warn(
f"No expected main channel files found for phase {phase_num}"
)
def process_analysis_config_file(self, analysis_config_path):
'''
Reads csv file in analysis_config_path and creates pandas df of
AnalysisConfig objects for each phase
'''
# read in config file
# convert strings to int where possible, and convert values
# separated by semicolon to lists
analysis_config_df_prelim = pd.read_csv(
analysis_config_path,
dtype = {'PhaseNum': str},
converters =
{'Value': _process_parameter_vals},
na_filter = False
)
# check file format
if not {'Parameter','Value'}.issubset(
set(analysis_config_df_prelim.columns)
):
raise IndexError(
'Could not find columns Parameter and Value in your'
' setup file, ' + analysis_config_path + '; the most common '
'cause of this is that your file is not correctly saved in '
'comma-separated mode. You can check this by opening the file '
'in a text editor (e.g. wordpad). If unable to resolve this '
'issue, create a new setup file from scratch using PIE\'s '
'setup wizard'
)
# add PhaseNum if it's not specified
# drop rows where PhaseNum or Parameter is not specified correctly
analysis_config_df_prelim_phase_filt = \
self._check_phase_numbers(analysis_config_df_prelim)
# set up global and phase-specific parameter dfs/series
self._organize_config_df(analysis_config_df_prelim_phase_filt)
# check that phase for global parameters correctly specified,
# and set them as attributes
self._set_global_vals()
# create df of analysis config objects
analysis_config_obj_df = self._create_analysis_config_df()
# test that expected files exist
self._check_file_existance(analysis_config_obj_df)
return(analysis_config_obj_df)
class _SetupWizard(object):
'''
Queries and holds parameter values for creating setup file
'''
def __init__(self):
self.param_description_df = param_description_df.set_index('Parameter')
params_by_type_df = param_description_df[['Parameter','Type']].groupby('Type')['Parameter'].apply(list)
self._param_dict_by_type = dict(params_by_type_df)
self._type_list_optional = list(self._param_dict_by_type.keys())
self._type_list_optional.remove('required')
self._type_list_optional.remove('postphase')
self._type_list_optional_postphase = self._type_list_optional.copy()
self._type_list_optional_postphase.remove('fluorescence measurements')
required_global_params = \
param_description_df.Parameter[
param_description_df.RequiredGlobal
].to_list()
self.required_global_param_df = pd.DataFrame(
index = required_global_params,
columns = ['Value','PhaseNum']
)
self.required_global_param_df.PhaseNum = 'all'
def _get_setup_file_path(self):
'''
Query full path to setup file
Check that path is a .csv file, and no file already exists there
If necessary, create directory that setup file will be in
'''
while True:
setup_file_path = \
input(
"\n"
"Enter the full path to the setup file you want to create\n"
"Note: if your path contains backslash ('\\') characters "
"(e.g. on Windows) you will need to use a double backslash "
"instead ('\\\\') when specifying the path\n"
"Setup file path: "
)
setup_file_extension = os.path.splitext(setup_file_path)[1]
if setup_file_extension != '.csv':
print('\nSetup file must be a .csv file.')
elif os.path.exists(setup_file_path):
print('\nA file already exists at this location.')
else:
setup_file_dir = os.path.dirname(setup_file_path)
try:
os.makedirs(setup_file_dir)
except:
pass
self.setup_file_path = setup_file_path
break
def _get_num_main_phases(self):
'''
Get the number of colony recognition phases in the
experiment
'''
while True:
num_main_phases = \
input(
"\nHow many independent phases of colony recognition "
"imaging (brightfield/phase contrast) does your experiment include? "
)
try:
num_main_phases = int(num_main_phases)
if num_main_phases > 0:
self.num_main_phases = int(num_main_phases)
break
else:
"\nThe number of phases must be greater than 0.\n"
continue
except ValueError:
print("\nThe number of phases entered must be an integer.\n")
continue
def _query_param(self, param, skip_default):
'''
Get user input for param, using param as key to get Explanation
from self.param_description_df
If skip_default is False, offers user to press Enter for
default value
'''
explanation = self.param_description_df.at[param, 'Explanation']
default_val = self.param_description_df.at[param, 'Default']
allow_blank = self.param_description_df.at[param, 'AllowBlank']
if allow_blank:
leave_blank_statement = "type 'None' or 'NA' to leave blank\n"
else:
leave_blank_statement = ""
while True:
if skip_default:
param_input = input((
"\n{0}:\n{1}\n{2}Enter {0}: "
).format(param, explanation, leave_blank_statement))
if param_input.strip().lower() in ['na','nan','none','']:
if allow_blank:
param_val = ''
break
else:
print('\nMust enter non-blank, non-NA value for '+param)
continue
else:
param_val = param_input.strip()
break
else:
param_input = input((
"\n{0}:\n{1}\npress Enter for default value ({2}); "
"{3}Enter {0}: "
).format(
param, explanation, default_val, leave_blank_statement
))
if param_input.strip().lower() == '':
param_val = default_val
break
elif param_input.strip().lower() in ['na','nan','none']:
if allow_blank:
param_val = ''
break
else:
print('\nMust enter non-blank, non-NA value for '+param)
continue
else:
param_val = param_input.strip()
break
return(param_val)
def _yes_or_no(self, question_string):
'''
Asks a yes or no question, returns True if yes, False if no
'''
while True:
out_string = input(question_string+'\n[yes/no] ')
if out_string.strip().lower() in ['y','yes']:
out_bool = True
break
elif out_string.strip().lower() in ['n','no']:
out_bool = False
break
else:
print("\nMust enter 'yes' or 'no'")
continue
return(out_bool)
def _order_params(self, param_list):
'''
Reorder params by the order they are found in
param_description_df.Parameter
'''
sorted_params = sorted(
param_list, key = param_description_df.Parameter.to_list().index
)
return(sorted_params)
def _loop_through_param_type(
self,
param_type,
required_fields,
phase_intro_text,
skip_default = False
):
'''
Get parameter values for parameters whose Type in
self.param_description_df corresponds to param_type, and which
are in required_fields
Return dataframe with parameter values
'''
# get list of parameters to query
all_curr_param_set = \
set.intersection(
set(self._param_dict_by_type[param_type]),
set(required_fields)
)
global_params = self.required_global_param_df.index
filled_global_params = self.required_global_param_df.index[
self.required_global_param_df.Value.notnull()
]
curr_params = self._order_params(
list(all_curr_param_set - set(filled_global_params))
)
# only ask about modifying parameters if they exist and if they
# are not required to enter (i.e. skip_default is False)
if len(curr_params) > 0 and not skip_default:
curr_default_val_df = \
self.param_description_df.loc[
curr_params
][['Default']]
modify_param_question = (
"\nDo you want to modify defaults for any {0} "
"parameters?\n{1}"
# ).format(param_type, ', '.join(curr_params))
).format(param_type, str(curr_default_val_df))
modify_params = self._yes_or_no(modify_param_question)
else:
modify_params = False
if (modify_params or skip_default) and len(curr_params) > 0:
# initialize df with parameter values
current_param_df = pd.DataFrame(
columns = ['Value'],
index = list(set(curr_params)-set(global_params))
)
# query user for each parameter value
print(phase_intro_text)
for param in curr_params:
if param in global_params:
self.required_global_param_df.at[param, 'Value'] = \
self._query_param(param, skip_default)
else:
current_param_df.at[param, 'Value'] = \
self._query_param(param, skip_default)
else:
current_param_df = pd.DataFrame(columns = ['Value'])
return(current_param_df)
def _ordinal(self, num):
'''
Converts number to ordinal (e.g. 111 to '111th', etc)
'''
num_str = str(num)
if num > 9:
secondToLastDigit = num_str[-2]
if secondToLastDigit == '1':
return(num_str+'th')
lastDigit = num % 10
if (lastDigit == 1):
return(num_str+'st')
elif (lastDigit == 2):
return(num_str+'nd')
elif (lastDigit == 3):
return(num_str+'rd')
else:
return(num_str+'th')
def _get_global_params(self):
'''
Get parameter values that are required to be global
'''
phase_preintro_text = 'For ALL imaging phases:'
phase_intro_text = (
'\n{0}\n{1}'
).format(phase_preintro_text, '-'*len(phase_preintro_text))
required_fields_global = list(set.intersection(
set(required_fields_general),
set(self.required_global_param_df.index)
))
# don't read output param_df because it should be empty
# only self.required_global_param_df should get filled
self._loop_through_param_type(
'required',
required_fields_global,
phase_intro_text,
skip_default = True
)
for param_type in self._type_list_optional:
self._loop_through_param_type(
param_type,
required_fields_global,
phase_intro_text
)
def _get_regular_phase_params(self, mainphase_counter_ordinal):
'''
Get parameter values for regular experimental phase (with
brightfield/phase-contrast images to segment)
'''
phase_preintro_text = (
'For the {0}main imaging phase:'
).format(mainphase_counter_ordinal)
phase_intro_text = (
'\n{0}\n{1}'
).format(phase_preintro_text, '-'*len(phase_preintro_text))
param_df_req = \
self._loop_through_param_type(
'required',
required_fields_general,
phase_intro_text,
skip_default = True
)
param_df_list = [param_df_req]
for param_type in self._type_list_optional:
curr_param_df = \
self._loop_through_param_type(
param_type,
required_fields_general,
phase_intro_text
)
param_df_list.append(curr_param_df)
curr_phase_param_df = | pd.concat(param_df_list) | pandas.concat |
import sys
import pytz
import hashlib
import numpy as np
import pandas as pd
from datetime import datetime
def edit_form_link(link_text='Submit edits'):
"""Return HTML for link to form for edits"""
return f'<a href="https://docs.google.com/forms/d/e/1FAIpQLScw8EUGIOtUj994IYEM1W7PfBGV0anXjEmz_YKiKJc4fm-tTg/viewform">{link_text}</a>'
def add_google_analytics(input_html):
"""
Return HTML with Google Analytics block added
"""
ga_block = """
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-173043454-1"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-173043454-1');
</script>
"""
output_html = input_html.replace('<!-- replace with google analytics -->', ga_block)
return output_html
def add_geojson(shape_gdf, field_name, field_value, input_html):
"""
Add a GeoJSON feature as a Javascript variable to an HTML string
This variable will be used to calculate the bounds of the map
"""
shape_row = shape_gdf[shape_gdf[field_name] == field_value].copy()
shape_geo = shape_row.geometry.iloc[0]
geo_bounds = shape_geo.boundary[0].xy
output_string = '[['
for idx, value in enumerate(geo_bounds[0]):
if idx > 0:
output_string += ','
output_string += '['
x = geo_bounds[0][idx]
output_string += '{}'.format(x)
y = geo_bounds[1][idx]
output_string += ', {}'.format(y)
output_string += ']\n'
output_string += ']]'
output_html = input_html.replace('REPLACE_WITH_XY', output_string)
return output_html
def dc_coordinates():
"""Return coordinates for a DC-wide map"""
dc_longitude = -77.016243706276569
dc_latitude = 38.894858329321485
dc_zoom_level = 10.3
return dc_longitude, dc_latitude, dc_zoom_level
def anc_names(anc_id):
"""
Return formatted ANC names
"""
ancs = pd.read_csv('data/ancs.csv')
anc_upper = 'ANC' + anc_id
anc_lower = anc_upper.lower()
anc_neighborhoods = ancs[ancs['anc_id'] == anc_id]['neighborhoods'].values[0]
return anc_upper, anc_lower, anc_neighborhoods
def assemble_divo():
"""
Return DataFrame with one row per SMD and various stats about each SMD's ranking
divo = district-votes
"""
results = pd.read_csv('data/results.csv')
districts = pd.read_csv('data/districts.csv')
votes_per_smd = pd.DataFrame(results.groupby('smd_id').votes.sum()).reset_index()
# Calculate number of SMDs in each Ward and ANC
smds_per_ward = pd.DataFrame(districts.groupby('ward').size(), columns=['smds_in_ward']).reset_index()
smds_per_anc = pd.DataFrame(districts.groupby('anc_id').size(), columns=['smds_in_anc']).reset_index()
divo = pd.merge(districts, votes_per_smd, how='inner', on='smd_id')
divo = pd.merge(divo, smds_per_ward, how='inner', on='ward')
divo = pd.merge(divo, smds_per_anc, how='inner', on='anc_id')
divo['smds_in_dc'] = len(districts)
# Rank each SMD by the number of votes recorded for ANC races within that SMD
# method = min: assigns the lowest rank when multiple rows are tied
divo['rank_dc'] = divo['votes'].rank(method='min', ascending=False)
divo['rank_ward'] = divo.groupby('ward').votes.rank(method='min', ascending=False)
divo['rank_anc'] = divo.groupby('anc_id').votes.rank(method='min', ascending=False)
# Create strings showing the ranking of each SMD within its ANC, Ward, and DC-wide
divo['string_dc'] = divo.apply(
lambda row: f"{make_ordinal(row['rank_dc'])} out of {row['smds_in_dc']} SMDs", axis=1)
divo['string_ward'] = divo.apply(
lambda row: f"{make_ordinal(row['rank_ward'])} out of {row['smds_in_ward']} SMDs", axis=1)
divo['string_anc'] = divo.apply(
lambda row: f"{make_ordinal(row['rank_anc'])} out of {row['smds_in_anc']} SMDs", axis=1)
average_votes_in_dc = divo.votes.mean()
average_votes_by_ward = divo.groupby('ward').votes.mean()
average_votes_by_anc = divo.groupby('anc_id').votes.mean()
return divo
def list_commissioners(status=None, date_point=None):
"""
Return dataframe with list of commissioners by status
Options:
status=None (all statuses returned) -- default
status='former'
status='current'
status='future'
date_point=None -- all statuses calculated from current DC time (default)
date_point=(some other datetime) -- all statuses calculated from that datetime
"""
commissioners = pd.read_csv('data/commissioners.csv')
if not date_point:
tz = pytz.timezone('America/New_York')
date_point = datetime.now(tz)
commissioners['start_date'] = pd.to_datetime(commissioners['start_date']).dt.tz_localize(tz='America/New_York')
commissioners['end_date'] = pd.to_datetime(commissioners['end_date']).dt.tz_localize(tz='America/New_York')
# Create combined field with start and end dates, showing ambiguity
commissioners['start_date_str'] = commissioners['start_date'].dt.strftime('%B %-d, %Y')
commissioners['end_date_str'] = commissioners['end_date'].dt.strftime('%B %-d, %Y')
# We don't have exact dates when these commissioners started, so show "circa 2019"
commissioners.loc[commissioners['start_date_str'] == 'January 2, 2019', 'start_date_str'] = '~2019'
# Combine start and end dates into one field
commissioners['term_in_office'] = commissioners['start_date_str'] + ' to ' + commissioners['end_date_str']
commissioners['is_former'] = commissioners.end_date < date_point
commissioners['is_current'] = (commissioners.start_date < date_point) & (date_point < commissioners.end_date)
commissioners['is_future'] = date_point < commissioners.start_date
# Test here that there is, at most, one "Current" and one "Future" commissioner per SMD.
# Multiple "Former" commissioners is allowed
smd_count = commissioners.groupby('smd_id')[['is_former', 'is_current', 'is_future']].sum().astype(int)
# smd_count.to_csv('smd_commissioner_count.csv')
if smd_count['is_current'].max() > 1 or smd_count['is_future'].max() > 1:
raise Exception('Too many commissioners per SMD')
if status:
commissioner_output = commissioners[commissioners['is_' + status]].copy()
else:
commissioner_output = commissioners.copy()
return commissioner_output
def build_results_candidate_people():
"""
Return DataFrame containing results, candidates, and people joined
"""
people = pd.read_csv('data/people.csv')
candidates = pd.read_csv('data/candidates.csv')
results = pd.read_csv('data/results.csv')
results_candidates = pd.merge(
results #[['candidate_id', 'person_id', 'smd_id']]
, candidates #[['candidate_id']]
, how='left'
, on=['candidate_id', 'smd_id']
)
rcp = pd.merge(results_candidates, people, how='left', on='person_id') # results-candidates-people
# Determine who were incumbent candidates at the time of the election
election_date = datetime(2020, 11, 3, tzinfo=pytz.timezone('America/New_York'))
commissioners = list_commissioners(status=None)
incumbents = commissioners[(commissioners.start_date < election_date) & (election_date < commissioners.end_date)]
incumbent_candidates = pd.merge(incumbents, candidates, how='inner', on='person_id')
incumbent_candidates['is_incumbent'] = True
rcp = pd.merge(rcp, incumbent_candidates[['candidate_id', 'is_incumbent']], how='left', on='candidate_id')
rcp['is_incumbent'] = rcp['is_incumbent'].fillna(False)
# Sort by SMD ascenting, Votes descending
rcp = rcp.sort_values(by=['smd_id', 'votes'], ascending=[True, False])
# Placeholder name for all write-in candidates.
# We do not know the combination of name and vote count for write-in candidates
# We only know the name of the write-in winners
rcp['full_name'] = rcp['full_name'].fillna('Write-ins combined')
rcp['write_in_winner_int'] = rcp['write_in_winner'].astype(int)
return rcp
def build_district_comm_commelect():
"""
Build DataFrame showing commissioner and commissioner-elect for every district
"""
districts = pd.read_csv('data/districts.csv')
commissioners = list_commissioners(status=None)
people = pd.read_csv('data/people.csv')
cp = pd.merge(commissioners, people, how='inner', on='person_id')
# left join to both current commissioners and commissioners-elect
cp_current = pd.merge(districts, cp.loc[cp['is_current'], ['smd_id', 'person_id', 'full_name']], how='left', on='smd_id')
cp_current = cp_current.rename(columns={'full_name': 'current_commissioner', 'person_id': 'current_person_id'})
cp_current_future = pd.merge(cp_current, cp.loc[cp['is_future'], ['smd_id', 'person_id', 'full_name']], how='left', on='smd_id')
cp_current_future = cp_current_future.rename(columns={'full_name': 'commissioner_elect', 'person_id': 'future_person_id'})
# If there is not a current commissioner for the SMD, mark the row as "vacant"
cp_current_future['current_commissioner'] = cp_current_future['current_commissioner'].fillna('(vacant)')
return cp_current_future
def build_smd_html_table(list_of_smds, link_path=''):
"""
Return an HTML table with one row per district for a given list of SMDs
Contains current commissioner and all candidates with number of votes
"""
rcp = build_results_candidate_people()
# Bold the winners in this text field
# results_field = 'Candidates and Results (Winner in Bold)'
# rcp[results_field] = rcp.apply(
# lambda row:
# '<strong>{} ({:,.0f} votes)</strong>'.format(row['full_name'], row['votes'])
# if row['winner']
# else '{} ({:,.0f} votes)'.format(row['full_name'], row['votes'])
# , axis=1
# )
results_field = 'Candidates and Results'
rcp[results_field] = rcp.apply(
lambda row: '{} ({:,.0f} votes)'.format(row['full_name'], row['votes'])
, axis=1
)
# Aggregate results by SMD
district_results = rcp.groupby('smd_id').agg({
'votes': sum
, results_field: lambda x: ', '.join(x)
, 'write_in_winner_int': sum
})
total_votes_display_name = 'ANC Votes'
district_results[total_votes_display_name] = district_results['votes']
max_votes_for_bar_chart = district_results[total_votes_display_name].max()
district_comm_commelect = build_district_comm_commelect()
dcp_results = pd.merge(district_comm_commelect, district_results, how='left', on='smd_id')
display_df = dcp_results[dcp_results['smd_id'].isin(list_of_smds)].copy()
display_df['SMD'] = (
f'<a href="{link_path}' + display_df['smd_id'].str.replace('smd_','').str.lower() + '.html">'
+ display_df['smd_id'].str.replace('smd_','') + '</a>'
)
display_df['Current Commissioner'] = display_df['current_commissioner']
display_df['Commissioner-Elect'] = display_df['commissioner_elect']
# Append "write-in" to Commissioners-Elect who were write-in candidates
display_df.loc[display_df['write_in_winner_int'] == 1, 'Commissioner-Elect'] = display_df.loc[display_df['write_in_winner_int'] == 1, 'Commissioner-Elect'] + ' (write-in)'
columns_to_html = ['SMD', 'Current Commissioner']
css_uuid = hashlib.sha224(display_df[columns_to_html].to_string().encode()).hexdigest() + '_'
html = (
display_df[columns_to_html]
.fillna('')
.style
# .set_properties(
# subset=[results_field]
# , **{
# 'text-align': 'left'
# , 'width': '700px'
# , 'height': '45px'
# }
# )
# .set_properties(
# subset=[total_votes_display_name]
# , **{'text-align': 'left'}
# )
.set_properties(
subset=['Current Commissioner']
, **{'width': '230px', 'text-align': 'left'} # 230px fits the longest commissioner name on one row
) # why is the width in pixels so different between these columns?
# .format({total_votes_display_name: '{:,.0f}'})
# .bar(
# subset=[total_votes_display_name]
# , color='#cab2d6' # light purple
# , vmin=0
# , vmax=3116
# )
.set_uuid(css_uuid)
.hide_index()
.render()
)
return html
def build_smd_html_table_candidates(list_of_smds, link_path=''):
"""
Return an HTML table with one row per district for a given list of SMDs
Contains current commissioner and all candidates by status
"""
districts = pd.read_csv('data/districts.csv')
commissioners = list_commissioners(status='current')
people = pd.read_csv('data/people.csv')
candidates = pd.read_csv('data/candidates.csv')
candidate_statuses = pd.read_csv('data/candidate_statuses.csv')
dc = pd.merge(districts, commissioners, how='left', on='smd_id')
dcp = pd.merge(dc, people, how='left', on='person_id')
cp = pd.merge(candidates, people, how='inner', on='person_id')
cpd = pd.merge(cp, districts, how='inner', on='smd_id')
cpds = pd.merge(cpd, candidate_statuses, how='inner', on='candidate_status')
dcp['Current Commissioner'] = dcp['full_name'].fillna('(vacant)')
display_df = dcp[dcp['smd_id'].isin(list_of_smds)].copy()
display_df['SMD'] = (
f'<a href="{link_path}' + display_df['smd_id'].str.replace('smd_','').str.lower() + '.html">'
+ display_df['smd_id'].str.replace('smd_','') + '</a>'
)
# Number of candidates in each SMD
# todo: make this a function
cps = pd.merge(cp, candidate_statuses, how='inner', on='candidate_status')
# Only include active candidates
district_candidates = pd.merge(districts, cps[cps['count_as_candidate']].copy(), how='left', on='smd_id')
candidate_count = pd.DataFrame(district_candidates.groupby('smd_id')['candidate_id'].count()).reset_index()
candidate_count.rename(columns={'candidate_id': 'Number of Candidates'}, inplace=True)
display_df = pd.merge(display_df, candidate_count, how='inner', on='smd_id')
columns_to_html = ['SMD', 'Current Commissioner', 'Number of Candidates']
cpds['order_status'] = cpds['display_order'].astype(str) + ';' + cpds['candidate_status']
candidates_in_smds = cpds[cpds['smd_id'].isin(list_of_smds)].copy()
statuses_in_smds = sorted(candidates_in_smds['order_status'].unique())
for status in statuses_in_smds:
status_name = status[status.find(';')+1:]
columns_to_html += [status_name]
cs_df = candidates_in_smds[candidates_in_smds['order_status'] == status][['smd_id', 'full_name']].copy()
cs_smd = cs_df.groupby('smd_id').agg({'full_name': list}).reset_index()
cs_smd[status_name] = cs_smd['full_name'].apply(lambda row: ', '.join(row))
display_df = pd.merge(display_df, cs_smd, how='left', on='smd_id')
html = (
display_df[columns_to_html]
.fillna('')
.style
.set_uuid('smd_')
.hide_index()
.render()
)
return html
def build_district_list(smd_id_list=None, level=0):
"""
Bulleted list of districts and current commmissioners
If smd_id_list is None, all districts are returned
If smd_id_list is a list, those SMDs are returned
link level:
0: homepage
1: ANC page
2: SMD page
"""
districts = | pd.read_csv('data/districts.csv') | pandas.read_csv |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # QA queries on new CDR row suppression
#
# Verify all rows identified for suppression in the deid dataset have been set to null.
#
# (Query results: ThIS query returned no results.)
import urllib
import pandas as pd
pd.options.display.max_rows = 120
# + tags=["parameters"]
# Parameters
project_id = ""
deid_cdr = ""
com_cdr =""
# -
# df will have a summary in the end
df = pd.DataFrame(columns = ['query', 'result'])
# # 1 Verify all fields identified for suppression in the OBSERVATION table have been removed from the table in the deid dataset.
query = f'''
WITH df1 AS (
SELECT observation_id
FROM `{project_id}.{com_cdr}.observation`
WHERE observation_source_value LIKE '%SitePairing%'
OR observation_source_value LIKE '%ArizonaSpecific%'
OR observation_source_value LIKE 'EastSoutheastMichigan%'
OR observation_source_value LIKE 'SitePairing_%'
)
SELECT
SUM(CASE WHEN value_as_string IS NOT NULL THEN 1 ELSE 0 END) AS n_value_as_string_not_null,
SUM(CASE WHEN value_source_value IS NOT NULL THEN 1 ELSE 0 END) AS n_value_source_value_not_null,
SUM(CASE WHEN observation_source_value IS NOT NULL THEN 1 ELSE 0 END) AS n_observation_source_value_not_null
FROM `{project_id}.{deid_cdr}.observation`
WHERE observation_id IN (SELECT observation_id FROM df1)
AND ((observation_source_value IS NOT NULL)
OR (value_source_value IS NOT NULL)
OR (value_as_string IS NOT NULL))
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query1 three colmns suppression in observation table', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query1 observation', 'result' : ''},
ignore_index = True)
df1
# # 2 Verify all fields identified for suppression in the OBSERVATION table have been removed from the table in the deid dataset.
# ## error in new cdr
query=f'''
WITH df1 AS (
SELECT observation_id
FROM `{project_id}.{com_cdr}.observation`
WHERE observation_source_value LIKE 'PIIName_%'
OR observation_source_value LIKE 'PIIAddress_%'
OR observation_source_value LIKE 'StreetAddress_%'
OR observation_source_value LIKE 'ConsentPII_%'
OR observation_source_value LIKE 'TheBasics_CountryBornTextBox'
OR observation_source_value LIKE 'PIIContactInformation_Phone'
OR observation_source_value LIKE 'Language_SpokenWrittenLanguage'
OR observation_source_value LIKE 'SocialSecurity_%'
)
SELECT
SUM(CASE WHEN value_as_string IS NOT NULL THEN 1 ELSE 0 END) AS n_value_as_string_not_null,
SUM(CASE WHEN value_source_value IS NOT NULL THEN 1 ELSE 0 END) AS n_value_source_value_not_null,
SUM(CASE WHEN observation_source_value IS NOT NULL THEN 1 ELSE 0 END) AS n_observation_source_value_not_null
FROM `{project_id}.{deid_cdr}.observation`
WHERE observation_id IN (SELECT observation_id FROM df1)
AND ((observation_source_value IS NOT NULL)
OR (value_source_value IS NOT NULL)
OR (value_as_string IS NOT NULL))
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query2 observation', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query2 observation', 'result' : ''},
ignore_index = True)
df1
# +
query=f'''
WITH df1 AS (
SELECT observation_id
FROM `{project_id}.{com_cdr}.observation`
WHERE observation_source_value LIKE 'PIIName_%'
OR observation_source_value LIKE 'PIIAddress_%'
OR observation_source_value LIKE 'StreetAddress_%'
OR observation_source_value LIKE 'ConsentPII_%'
OR observation_source_value LIKE 'TheBasics_CountryBornTextBox'
OR observation_source_value LIKE 'PIIContactInformation_Phone'
OR observation_source_value LIKE 'Language_SpokenWrittenLanguage'
OR observation_source_value LIKE 'SocialSecurity_%'
)
SELECT distinct observation_source_value,value_source_value, value_as_string
FROM `{project_id}.{deid_cdr}.observation`
WHERE observation_id IN (SELECT observation_id FROM df1)
AND ((observation_source_value IS NOT NULL)
OR (value_source_value IS NOT NULL)
OR (value_as_string IS NOT NULL))
'''
df1=pd.read_gbq(query, dialect='standard')
df1
# -
# # 3 Verify all fields identified for suppression in the OBSERVATION table have been removed from the table in the deid dataset.
# ## error in new cdr
query=f'''
WITH df1 AS (
SELECT observation_id
FROM `{project_id}.{com_cdr}.observation`
WHERE observation_source_value LIKE '%_Signature'
OR observation_source_value LIKE 'ExtraConsent__%'
)
SELECT
SUM(CASE WHEN value_as_string IS NOT NULL THEN 1 ELSE 0 END) AS n_value_as_string_not_null,
SUM(CASE WHEN value_source_value IS NOT NULL THEN 1 ELSE 0 END) AS n_value_source_value_not_null,
SUM(CASE WHEN observation_source_value IS NOT NULL THEN 1 ELSE 0 END) AS n_observation_source_value_not_null
FROM `{project_id}.{deid_cdr}.observation`
WHERE observation_id IN (SELECT observation_id FROM df1)
AND ((observation_source_value IS NOT NULL)
OR (value_source_value IS NOT NULL)
OR (value_as_string IS NOT NULL))
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query3 observation', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query3 observation', 'result' : ''},
ignore_index = True)
df1
# +
query=f'''
WITH df1 AS (
SELECT observation_id
FROM `{project_id}.{com_cdr}.observation`
WHERE observation_source_value LIKE '%_Signature'
OR observation_source_value LIKE 'ExtraConsent__%'
)
SELECT
distinct observation_source_value,value_source_value, value_as_string
FROM `{project_id}.{deid_cdr}.observation`
WHERE observation_id IN (SELECT observation_id FROM df1)
AND ((observation_source_value IS NOT NULL)
OR (value_source_value IS NOT NULL)
OR (value_as_string IS NOT NULL))
'''
df1=pd.read_gbq(query, dialect='standard')
df1
# -
# # 4 Verify all fields identified for suppression in the OBSERVATION table have been removed from the table in the deid dataset.
query=f'''
WITH df1 AS (
SELECT observation_id
FROM `{project_id}.{com_cdr}.observation`
WHERE observation_source_value LIKE '%Specific'
OR observation_source_value LIKE '%NoneOfTheseDescribeMe%'
OR observation_source_value LIKE '%RaceEthnicityNoneOfThese_%'
OR observation_source_value LIKE 'NoneOfTheseDescribeMe%'
OR observation_source_value LIKE 'WhatTribeAffiliation_%'
)
SELECT
SUM(CASE WHEN value_as_string IS NOT NULL THEN 1 ELSE 0 END) AS n_value_as_string_not_null,
SUM(CASE WHEN value_source_value IS NOT NULL THEN 1 ELSE 0 END) AS n_value_source_value_not_null,
SUM(CASE WHEN observation_source_value IS NOT NULL THEN 1 ELSE 0 END) AS n_observation_source_value_not_null
FROM `{project_id}.{deid_cdr}.observation`
WHERE observation_id IN (SELECT observation_id FROM df1)
AND ((observation_source_value IS NOT NULL)
OR (value_source_value IS NOT NULL)
OR (value_as_string IS NOT NULL))
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query4 observation', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query4 observation', 'result' : ''},
ignore_index = True)
df1
# # 5 Verify all fields identified for suppression in the OBSERVATION table have been removed from the table in the deid dataset.
# ## error in new cdr
query=f'''
WITH df1 AS (
SELECT observation_id
FROM `{project_id}.{com_cdr}.observation`
WHERE observation_source_value LIKE '%Gender%'
OR observation_source_value LIKE '%Sexuality%'
OR observation_source_value LIKE '%SexAtBirthNoneOfThese_%'
)
SELECT
SUM(CASE WHEN value_as_string IS NOT NULL THEN 1 ELSE 0 END) AS n_value_as_string_not_null,
SUM(CASE WHEN value_source_value IS NOT NULL THEN 1 ELSE 0 END) AS n_value_source_value_not_null,
SUM(CASE WHEN observation_source_value IS NOT NULL THEN 1 ELSE 0 END) AS n_observation_source_value_not_null
FROM `{project_id}.{deid_cdr}.observation`
WHERE observation_id IN (SELECT observation_id FROM df1)
AND ((observation_source_value IS NOT NULL)
OR (value_source_value IS NOT NULL)
OR (value_as_string IS NOT NULL))
'''
df1= | pd.read_gbq(query, dialect='standard') | pandas.read_gbq |
#preprocessing script to binned and imputed final data to apply on simple baselines..
import pandas as pd
import numpy as np
import sys
def bin_and_impute(data, bin_width=60, variable_start_index=5):
result = [] #list of patients dataframes
#set of variables to process:
variables = np.array(list(data.iloc[:,variable_start_index:]))
#create resample parameter string:
bin_width = str(bin_width)+'min'
#all distinct icustay ids:
id_s = data['icustay_id'].unique() # get unique ids
#loop over patients:
for icustay_id in id_s:
print(f'Processing ID: {icustay_id} ....')
pat = data.query( "icustay_id == @icustay_id" ) #select subset of dataframe featuring current icustay_id
pat_i = pat.set_index('chart_time', inplace=False)
#resampling needs datetime or timedelta format, create dummy timestamp from relative hour:
#pat_i.index = pd.to_datetime(pat_i.index, unit='D') #unit='s'
pat_i.index = | pd.to_timedelta(pat_i.index, unit='h') | pandas.to_timedelta |
import pandas as pd
from pandas.testing import assert_frame_equal
from sklearn.pipeline import make_pipeline
import pytest
from sklego.preprocessing import ColumnSelector
@pytest.fixture()
def df():
return pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [10, 9, 8, 7, 6, 5],
"c": ["a", "b", "a", "b", "c", "c"],
"d": ["b", "a", "a", "b", "a", "b"],
"e": [0, 1, 0, 1, 0, 1],
}
)
def test_select_two(df):
result_df = ColumnSelector(["d", "e"]).fit_transform(df)
expected_df = pd.DataFrame(
{"d": ["b", "a", "a", "b", "a", "b"], "e": [0, 1, 0, 1, 0, 1]}
)
assert_frame_equal(result_df, expected_df)
def test_select_one(df):
result_df = ColumnSelector(["e"]).fit_transform(df)
expected_df = pd.DataFrame({"e": [0, 1, 0, 1, 0, 1]})
assert_frame_equal(result_df, expected_df)
def test_select_all(df):
result_df = ColumnSelector(["a", "b", "c", "d", "e"]).fit_transform(df)
assert_frame_equal(result_df, df)
def test_select_none(df):
with pytest.raises(ValueError):
ColumnSelector([]).fit_transform(df)
def test_select_not_in_frame(df):
with pytest.raises(KeyError):
ColumnSelector(["f"]).fit_transform(df)
def test_select_one_in_pipeline(df):
pipe = make_pipeline(ColumnSelector(["d"]))
result_df = pipe.fit_transform(df)
expected_df = pd.DataFrame({"d": ["b", "a", "a", "b", "a", "b"]})
| assert_frame_equal(result_df, expected_df) | pandas.testing.assert_frame_equal |
from pycox.datasets import metabric, nwtco, support, gbsg, flchain
from sklearn.preprocessing import KBinsDiscretizer, LabelEncoder, StandardScaler
import numpy as np
import pandas as pd
import pdb
from .utils import LabelTransform
def load_data(config):
'''load data, return updated configuration.
'''
data = config['data']
horizons = config['horizons']
assert data in ["metabric", "nwtco", "support", "gbsg", "flchain", "seer",], "Data Not Found!"
get_target = lambda df: (df['duration'].values, df['event'].values)
if data == "metabric":
# data processing, transform all continuous data to discrete
df = metabric.read_df()
# evaluate the performance at the 25th, 50th and 75th event time quantile
times = np.quantile(df["duration"][df["event"]==1.0], horizons).tolist()
cols_categorical = ["x4", "x5", "x6", "x7"]
cols_standardize = ['x0', 'x1', 'x2', 'x3', 'x8']
df_feat = df.drop(["duration","event"],axis=1)
df_feat_standardize = df_feat[cols_standardize]
df_feat_standardize_disc = StandardScaler().fit_transform(df_feat_standardize)
df_feat_standardize_disc = pd.DataFrame(df_feat_standardize_disc, columns=cols_standardize)
# must be categorical feature ahead of numerical features!
df_feat = pd.concat([df_feat[cols_categorical], df_feat_standardize_disc], axis=1)
vocab_size = 0
for _,feat in enumerate(cols_categorical):
df_feat[feat] = LabelEncoder().fit_transform(df_feat[feat]).astype(float) + vocab_size
vocab_size = df_feat[feat].max() + 1
# get the largest duraiton time
max_duration_idx = df["duration"].argmax()
df_test = df_feat.drop(max_duration_idx).sample(frac=0.3)
df_train = df_feat.drop(df_test.index)
df_val = df_train.drop(max_duration_idx).sample(frac=0.1)
df_train = df_train.drop(df_val.index)
# assign cuts
labtrans = LabelTransform(cuts=np.array([df["duration"].min()]+times+[df["duration"].max()]))
labtrans.fit(*get_target(df.loc[df_train.index]))
y = labtrans.transform(*get_target(df)) # y = (discrete duration, event indicator)
df_y_train = pd.DataFrame({"duration": y[0][df_train.index], "event": y[1][df_train.index], "proportion": y[2][df_train.index]}, index=df_train.index)
df_y_val = pd.DataFrame({"duration": y[0][df_val.index], "event": y[1][df_val.index], "proportion": y[2][df_val.index]}, index=df_val.index)
# df_y_test = pd.DataFrame({"duration": y[0][df_test.index], "event": y[1][df_test.index], "proportion": y[2][df_test.index]}, index=df_test.index)
df_y_test = pd.DataFrame({"duration": df['duration'].loc[df_test.index], "event": df['event'].loc[df_test.index]})
elif data == "support":
df = support.read_df()
times = np.quantile(df["duration"][df["event"]==1.0], horizons).tolist()
cols_categorical = ["x1", "x2", "x3", "x4", "x5", "x6"]
cols_standardize = ['x0', 'x7', 'x8', 'x9', 'x10', 'x11', 'x12', 'x13']
df_feat = df.drop(["duration","event"],axis=1)
df_feat_standardize = df_feat[cols_standardize]
df_feat_standardize_disc = StandardScaler().fit_transform(df_feat_standardize)
df_feat_standardize_disc = pd.DataFrame(df_feat_standardize_disc, columns=cols_standardize)
df_feat = pd.concat([df_feat[cols_categorical], df_feat_standardize_disc], axis=1)
vocab_size = 0
for i,feat in enumerate(cols_categorical):
df_feat[feat] = LabelEncoder().fit_transform(df_feat[feat]).astype(float) + vocab_size
vocab_size = df_feat[feat].max() + 1
# get the largest duraiton time
max_duration_idx = df["duration"].argmax()
df_test = df_feat.drop(max_duration_idx).sample(frac=0.3)
df_train = df_feat.drop(df_test.index)
df_val = df_train.drop(max_duration_idx).sample(frac=0.1)
df_train = df_train.drop(df_val.index)
# assign cuts
# labtrans = LabTransDiscreteTime(cuts=np.array([0]+times+[df["duration"].max()]))
labtrans = LabelTransform(cuts=np.array([0]+times+[df["duration"].max()]))
labtrans.fit(*get_target(df.loc[df_train.index]))
# y = labtrans.fit_transform(*get_target(df)) # y = (discrete duration, event indicator)
y = labtrans.transform(*get_target(df)) # y = (discrete duration, event indicator)
df_y_train = pd.DataFrame({"duration": y[0][df_train.index], "event": y[1][df_train.index], "proportion":y[2][df_train.index]}, index=df_train.index)
df_y_val = pd.DataFrame({"duration": y[0][df_val.index], "event": y[1][df_val.index], "proportion":y[2][df_val.index]}, index=df_val.index)
# df_y_test = pd.DataFrame({"duration": y[0][df_test.index], "event": y[1][df_test.index], "proportion":y[2][df_test.index]}, index=df_test.index)
df_y_test = pd.DataFrame({"duration": df['duration'].loc[df_test.index], "event": df['event'].loc[df_test.index]})
elif data == "seer":
PATH_DATA = "./data/seer_processed.csv"
df = pd.read_csv(PATH_DATA)
times = np.quantile(df["duration"][df["event_breast"]==1.0], horizons).tolist()
event_list = ["event_breast", "event_heart"]
cols_categorical = ["Sex", "Year of diagnosis", "Race recode (W, B, AI, API)", "Histologic Type ICD-O-3",
"Laterality", "Sequence number", "ER Status Recode Breast Cancer (1990+)",
"PR Status Recode Breast Cancer (1990+)", "Summary stage 2000 (1998-2017)",
"RX Summ--Surg Prim Site (1998+)", "Reason no cancer-directed surgery", "First malignant primary indicator",
"Diagnostic Confirmation", "Median household income inflation adj to 2019"]
cols_standardize = ["Regional nodes examined (1988+)", "CS tumor size (2004-2015)", "Total number of benign/borderline tumors for patient",
"Total number of in situ/malignant tumors for patient",]
df_feat = df.drop(["duration","event_breast", "event_heart"],axis=1)
df_feat_standardize = df_feat[cols_standardize]
df_feat_standardize_disc = StandardScaler().fit_transform(df_feat_standardize)
df_feat_standardize_disc = pd.DataFrame(df_feat_standardize_disc, columns=cols_standardize)
df_feat = pd.concat([df_feat[cols_categorical], df_feat_standardize_disc], axis=1)
vocab_size = 0
for i,feat in enumerate(cols_categorical):
df_feat[feat] = LabelEncoder().fit_transform(df_feat[feat]).astype(float) + vocab_size
vocab_size = df_feat[feat].max() + 1
# get the largest duraiton time
max_duration_idx = df["duration"].argmax()
df_test = df_feat.drop(max_duration_idx).sample(frac=0.3)
df_train = df_feat.drop(df_test.index)
df_val = df_train.drop(max_duration_idx).sample(frac=0.1)
df_train = df_train.drop(df_val.index)
# assign cuts
labtrans = LabelTransform(cuts=np.array([0]+times+[df["duration"].max()]))
get_target = lambda df,event: (df['duration'].values, df[event].values)
# this datasets have two competing events!
df_y_train = | pd.DataFrame({"duration":df["duration"][df_train.index]}) | pandas.DataFrame |
import os
import locale
import codecs
import nose
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas import date_range, Index
import pandas.util.testing as tm
from pandas.tools.util import cartesian_product, to_numeric
CURRENT_LOCALE = locale.getlocale()
LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None)
class TestCartesianProduct(tm.TestCase):
def test_simple(self):
x, y = list('ABC'), [1, 22]
result = cartesian_product([x, y])
expected = [np.array(['A', 'A', 'B', 'B', 'C', 'C']),
np.array([1, 22, 1, 22, 1, 22])]
assert_equal(result, expected)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range('2000-01-01', periods=2)
result = [Index(y).day for y in cartesian_product([x, x])]
expected = [np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])]
assert_equal(result, expected)
class TestLocaleUtils(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestLocaleUtils, cls).setUpClass()
cls.locales = tm.get_locales()
if not cls.locales:
raise nose.SkipTest("No locales found")
tm._skip_if_windows()
@classmethod
def tearDownClass(cls):
super(TestLocaleUtils, cls).tearDownClass()
del cls.locales
def test_get_locales(self):
# all systems should have at least a single locale
assert len(tm.get_locales()) > 0
def test_get_locales_prefix(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test filtering locale prefixes")
first_locale = self.locales[0]
assert len(tm.get_locales(prefix=first_locale[:2])) > 0
def test_set_locale(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test setting another locale")
if LOCALE_OVERRIDE is not None:
lang, enc = LOCALE_OVERRIDE.split('.')
else:
lang, enc = 'it_CH', 'UTF-8'
enc = codecs.lookup(enc).name
new_locale = lang, enc
if not tm._can_set_locale(new_locale):
with tm.assertRaises(locale.Error):
with tm.set_locale(new_locale):
pass
else:
with tm.set_locale(new_locale) as normalized_locale:
new_lang, new_enc = normalized_locale.split('.')
new_enc = codecs.lookup(enc).name
normalized_locale = new_lang, new_enc
self.assertEqual(normalized_locale, new_locale)
current_locale = locale.getlocale()
self.assertEqual(current_locale, CURRENT_LOCALE)
class TestToNumeric(tm.TestCase):
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
with tm.assertRaises(ValueError):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
with tm.assertRaises(ValueError):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
def test_type_check(self):
# GH 11776
df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']})
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df, errors=errors)
def test_scalar(self):
self.assertEqual(pd.to_numeric(1), 1)
self.assertEqual(pd.to_numeric(1.1), 1.1)
self.assertEqual(pd.to_numeric('1'), 1)
self.assertEqual(pd.to_numeric('1.1'), 1.1)
with tm.assertRaises(ValueError):
to_numeric('XX', errors='raise')
self.assertEqual(to_numeric('XX', errors='ignore'), 'XX')
self.assertTrue(np.isnan(to_numeric('XX', errors='coerce')))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
| tm.assert_numpy_array_equal(res, exp) | pandas.util.testing.assert_numpy_array_equal |
import pandas as pd
import os
from os.path import join, abspath, dirname, isfile
from google_trans_new import google_translator
from sklearn.model_selection import train_test_split
import nlpaug.augmenter.word as naw
import spacy
from nltk.stem import SnowballStemmer
from utils import downloader, exploration, normalization
# from preprocessing import get_stopwords
def text_comp19_to_df():
"""
Returns a pandas Dataframe object with
the data of the TextComplexityDE19 dataset
"""
# Path to relevant csv file
csv_path = join(
dirname(dirname(dirname(abspath(__file__)))),
"data",
"TextComplexityDE19/ratings.csv",
)
# read in csv file
print("Check for TextComplexityDE19/ratings.csv")
if isfile(csv_path):
print("Reading in TextComplexityDE19/ratings.csv")
corpus = pd.read_csv(csv_path, encoding="windows-1252")
else:
print("Downloading TextComplexityDE19 Dataset")
downloader.download_TextComplexityDE19()
print("Reading in TextComplexityDE19/ratings.csv")
corpus = | pd.read_csv(csv_path, encoding="windows-1252") | pandas.read_csv |
from datetime import datetime
import pytest
from pandas import DataFrame
from evidently import ColumnMapping
from evidently.analyzers.data_quality_analyzer import DataQualityAnalyzer
from evidently.dashboard.widgets.data_quality_features_widget import DataQualityFeaturesWidget
from evidently.options import OptionsProvider
def sample_data(feature1, feature2, feature3):
return [{'feature1': t[0], 'feature2': t[1], 'feature3': t[2]} for t in zip(feature1, feature2, feature3)]
@pytest.mark.parametrize(
"reference, current, column_mapping",
[
(sample_data([1, 1], [1, 1], [1, 1]), sample_data([1, 1], [1, 1], [1, 1]), ColumnMapping()),
(sample_data(["1", "1"], [1, 1], [1, 1]), sample_data(["1", "1"], [1, 1], [1, 1]), ColumnMapping()),
(sample_data([True, True], [1, 1], [1, 1]), sample_data([True, True], [1, 1], [1, 1]), ColumnMapping()),
])
def test_data_profile_widget_no_exceptions(reference, current, column_mapping):
analyzer = DataQualityAnalyzer()
analyzer.options_provider = OptionsProvider()
results = analyzer.calculate(DataFrame(reference), DataFrame(current), column_mapping)
widget = DataQualityFeaturesWidget("test")
widget.options_provider = OptionsProvider()
widget.calculate( | DataFrame(reference) | pandas.DataFrame |
import os
import torch
from torch.utils.tensorboard import SummaryWriter
import pandas as pd
import numpy as np
# Timing utility
from timeit import default_timer as timer
from utils.utilities import parse_args, parse_yaml, make_dir
import data_loader as dl
from transformations import transforms as trfs
from models.model_zoo import get_model
#from utils.averager import Averager
from metrics.metrics import calculate_image_precision
def train_model(
train_data_loader,
valid_data_loader,
model,
optimizer,
num_epochs,
path_save_model,
writer,
lr_scheduler=None
):
"""
"""
scores_dict_train = get_empty_scores_dict(train_data_loader)
scores_dict_valid = get_empty_scores_dict(valid_data_loader)
history = []
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
cpu_device = torch.device('cpu')
model.to(device)
overall_start = timer()
n_train_batches = len(train_data_loader)
# Main loop
for epoch in range(num_epochs):
# Keep track of training loss and validation MAP each epoch
train_loss = 0.0
# Set to training
model.train()
start = timer()
for ii, (images, targets, image_ids) in enumerate(train_data_loader):
print(f"\nEpoch #{epoch} Train Batch #{ii}/{n_train_batches}")
images = list(image.to(device) for image in images)
targets = [
{k: v.to(device) for k, v in t.items()} for t in targets
]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
loss_value = losses.item()
# Track train loss by multiplying average loss by number of
# examples in batch
train_loss += loss_value * len(images)
optimizer.zero_grad()
losses.backward()
optimizer.step()
# update the learning rate
if lr_scheduler is not None:
lr_scheduler.step()
print(f"\nEpoch #{epoch}: {timer() - start:.2f} seconds elapsed.")
scores_dict_train, train_map = predict_data_set(
model, train_data_loader, scores_dict_train, epoch, device,
cpu_device, 'Train'
)
scores_dict_valid, valid_map = predict_data_set(
model, valid_data_loader, scores_dict_valid, epoch, device,
cpu_device, 'Validation'
)
# Calculate average losses
train_loss = train_loss / len(train_data_loader.dataset)
writer.add_scalar("Train Loss", train_loss, epoch)
writer.add_scalar("Train MAP", train_map, epoch)
writer.add_scalar("Valid MAP", valid_map, epoch)
history.append([train_loss, train_map, valid_map])
# End of training
total_time = timer() - overall_start
print(
f"{total_time:.2f} total seconds elapsed. "
f"{(total_time / num_epochs):.2f} seconds per epoch"
)
torch.save(model.state_dict(), path_save_model)
df_history = pd.DataFrame(
history,
columns=['train_loss', 'train_map', 'valid_map']
)
df_scores_train = | pd.DataFrame(scores_dict_train) | pandas.DataFrame |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, | Timestamp('2008-10-23 05:53:11') | pandas.Timestamp |
#!/usr/bin/python
"""functions to create the figures for publication
"""
import seaborn as sns
import math
import pyrtools as pt
import neuropythy as ny
import os.path as op
import warnings
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
import pandas as pd
import re
import itertools
from sklearn import linear_model
from . import summary_plots
from . import analyze_model
from . import plotting
from . import model
from . import utils
from . import first_level_analysis
from . import style
def create_precision_df(paths, summary_func=np.mean,
df_filter_string='drop_voxels_with_mean_negative_amplitudes,drop_voxels_near_border'):
"""Create dataframe summarizing subjects' precision
When combining parameter estimates into an 'overall' value, we want
to use the precision of each subject's data. To do that, we take the
first level summary dfs (using regex to extract the subject,
session, and task from the path) and call `summary_func` on the
`precision` column. This gives us a single number, which we'll use
when performing the precision-weighted mean
df_filter_string can be used to filter the voxels we examine, so
that we look only at those voxels that the model was fit to
Parameters
----------
paths : list
list of strings giving the paths to the first level summary
dfs.
summary_func : callable, optional
function we use to summarize the precision. Must take an array
as its first input, not require any other inputs, and return a
single value
df_filter_string : str or None, optional
a str specifying how to filter the voxels in the dataset. see
the docstrings for sfp.model.FirstLevelDataset and
sfp.model.construct_df_filter for more details. If None, we
won't filter. Should probably use the default, which is what all
models are trained using.
Returns
-------
df : pd.DataFrame
dataframe containing one row per (subject, session) pair, giving
the precision for that scanning session. used to weight
bootstraps
"""
regex_names = ['subject', 'session', 'task']
regexes = [r'(sub-[a-z0-9]+)', r'(ses-[a-z0-9]+)', r'(task-[a-z0-9]+)']
df = []
for p in paths:
tmp = pd.read_csv(p)
if df_filter_string is not None:
df_filter = model.construct_df_filter(df_filter_string)
tmp = df_filter(tmp).reset_index()
val = summary_func(tmp.precision.values)
if hasattr(val, '__len__') and len(val) > 1:
raise Exception(f"summary_func {summary_func} returned more than one value!")
data = {'precision': val}
for n, regex in zip(regex_names, regexes):
res = re.findall(regex, p)
if len(set(res)) != 1:
raise Exception(f"Unable to infer {n} from path {p}!")
data[n] = res[0]
df.append(pd.DataFrame(data, [0]))
return pd.concat(df).reset_index(drop=True)
def existing_studies_df():
"""create df summarizing earlier studies
there have been a handful of studies looking into this, so we want
to summarize them for ease of reference. Each study is measuring
preferred spatial frequency at multiple eccentricities in V1 using
fMRI (though how exactly they determine the preferred SF and the
stimuli they use vary)
This dataframe contains the following columns:
- Paper: the reference for this line
- Eccentricity: the eccentricity (in degrees) that they measured
preferred spatial frequency at
- Preferred spatial frequency (cpd): the preferred spatial frequency
measured at this eccentricity (in cycles per degree)
- Preferred period (deg): the preferred period measured at this
eccentricity (in degrees per cycle); this is just the inverse of
the preferred spatial frequency
The eccentricity / preferred spatial frequency were often not
reported in a manner that allowed for easy extraction of the data,
so the values should all be taken as approximate, as they involve me
attempting to read values off of figures / colormaps.
Papers included (and their reference in the df):
- Sasaki (2001): <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., & <NAME>. (2001). Local and global
attention are mapped retinotopically in human occipital
cortex. Proceedings of the National Academy of Sciences, 98(4),
2077–2082.
- Henriksson (2008): <NAME>., <NAME>., Hyv\"arinen,
Aapo, & <NAME>. (2008). Spatial frequency tuning in human
retinotopic visual areas. Journal of Vision, 8(10),
5. http://dx.doi.org/10.1167/8.10.5
- Kay (2011): <NAME>. (2011). Understanding Visual Representation
By Developing Receptive-Field Models. Visual Population Codes:
Towards a Common Multivariate Framework for Cell Recording and
Functional Imaging, (), 133–162.
- Hess (dominant eye, 2009): <NAME>., <NAME>., <NAME>.,
<NAME>., & <NAME>. (2009). Selectivity as well as
sensitivity loss characterizes the cortical spatial frequency
deficit in amblyopia. Human Brain Mapping, 30(12),
4054–4069. http://dx.doi.org/10.1002/hbm.20829 (this paper reports
spatial frequency separately for dominant and non-dominant eyes in
amblyopes, only the dominant eye is reported here)
- D'Souza (2016): <NAME>., <NAME>., <NAME>., Strasburger,
H., & <NAME>. (2016). Dependence of chromatic responses in v1
on visual field eccentricity and spatial frequency: an fmri
study. JOSA A, 33(3), 53–64.
- Farivar (2017): <NAME>., <NAME>., <NAME>.,
<NAME>., & <NAME>. (2017). Non-uniform phase sensitivity
in spatial frequency maps of the human visual cortex. The Journal
of Physiology, 595(4),
1351–1363. http://dx.doi.org/10.1113/jp273206
- Olsson (pilot, model fit): line comes from a model created by <NAME> in the Winawer lab, fit to pilot data collected by
<NAME> (so note that this is not data). Never ended up
in a paper, but did show in a presentation at VSS 2017: <NAME>,
<NAME>, <NAME>, <NAME> (2017) An anatomically-defined
template of BOLD response in
V1-V3. J. Vis. 17(10):585. DOI:10.1167/17.10.585
Returns
-------
df : pd.DataFrame
Dataframe containing the optimum spatial frequency at multiple
eccentricities from the different papers
"""
data_dict = {
'Paper': ['Sasaki (2001)',]*7,
'Preferred spatial frequency (cpd)': [1.25, .9, .75, .7, .6, .5, .4],
'Eccentricity': [0, 1, 2, 3, 4, 5, 12]
}
data_dict['Paper'].extend(['Henriksson (2008)', ]*5)
data_dict['Preferred spatial frequency (cpd)'].extend([1.2, .68, .46, .40, .18])
data_dict['Eccentricity'].extend([1.7, 4.7, 6.3, 9, 19])
# This is only a single point, so we don't plot it
# data_dict['Paper'].extend(['Kay (2008)'])
# data_dict['Preferred spatial frequency (cpd)'].extend([4.5])
# data_dict['Eccentricity'].extend([ 2.9])
data_dict['Paper'].extend(['Kay (2011)']*5)
data_dict['Preferred spatial frequency (cpd)'].extend([4, 3, 10, 10, 2])
data_dict['Eccentricity'].extend([2.5, 4, .5, 1.5, 7])
data_dict['Paper'].extend(["Hess (dominant eye, 2009)"]*3)
data_dict['Preferred spatial frequency (cpd)'].extend([2.25, 1.9, 1.75])
data_dict['Eccentricity'].extend([2.5, 5, 10])
data_dict['Paper'].extend(["D'Souza (2016)"]*3)
data_dict['Preferred spatial frequency (cpd)'].extend([2, .95, .4])
data_dict['Eccentricity'].extend([1.4, 4.6, 9.8])
data_dict['Paper'].extend(['Farivar (2017)']*2)
data_dict['Preferred spatial frequency (cpd)'].extend([3, 1.5,])
data_dict['Eccentricity'].extend([.5, 3])
# model fit and never published, so don't include.
# data_dict['Paper'].extend(['Olsson (pilot, model fit)']*10)
# data_dict['Preferred spatial frequency (cpd)'].extend([2.11, 1.76, 1.47, 2.75, 1.24, 1.06, .88, .77, .66, .60])
# data_dict['Eccentricity'].extend([2, 3, 4, 1, 5, 6, 7, 8, 9, 10])
# these values gotten using web plot digitizer and then rounded to 2
# decimal points
data_dict["Paper"].extend(['Aghajari (2020)']*9)
data_dict['Preferred spatial frequency (cpd)'].extend([2.24, 1.62, 1.26,
1.09, 0.88, 0.75,
0.78, 0.75, 0.70])
data_dict['Eccentricity'].extend([0.68, 1.78, 2.84, 3.90, 5.00, 6.06, 7.16,
8.22, 9.28])
# Predictions of the scaling hypothesis -- currently unused
# ecc = np.linspace(.01, 20, 50)
# fovea_cutoff = 0
# # two possibilities here
# V1_RF_size = np.concatenate([np.ones(len(ecc[ecc<fovea_cutoff])),
# np.linspace(1, 2.5, len(ecc[ecc>=fovea_cutoff]))])
# V1_RF_size = .2 * ecc
df = pd.DataFrame(data_dict)
df = df.sort_values(['Paper', 'Eccentricity'])
df["Preferred period (deg)"] = 1. / df['Preferred spatial frequency (cpd)']
return df
def _demean_df(df, y='cv_loss', extra_cols=[]):
"""demean a column of the dataframe
Calculate the mean of `y` across the values in the 'subject' and
'loss_func' columns, then demean `y` and return df with several new
columns:
- `demeaned_{y}`: each y with `{y}_mean` subtracted off
- `{y}_mean`: the average of y per subject per loss_func
- `{y}_mean_overall`: the average of `{y}_mean` per loss_func
- `remeaned_{y}`: the `demeaned_{y}` with `{y}_mean_overall` added
back to it
If you use this with the defaults, the overall goal of this is to
enable us to look at how the cv_loss varies across models, because
the biggest effect is the difference in cv_loss across
subjects. Demeaning the cv_loss on a subject-by-subject basis
enables us to put all the subjects together so we can look for
patterns across models. For example, we can then compute error bars
that only capture the variation across models, but not across
subjects. Both remeaned or demeaned will capture this, the question
is what values to have on the y-axis. If you use demeaned, you'll
have negative loss, which might be confusing. If you use remeaned,
the y-axis values will be the average across subjects, which might
be easier to interpret.
Parameters
----------
df : pd.DataFrame
dataframe to demean
y : str, optional
the column to demean
extra_cols : list, optionla
list of columns to de/remean using the mean from `y`. for
example, you might want to de/remean the noise_ceiling using the
mean from the cross-validation loss
Returns
-------
df : pd.DataFrame
dataframe with new, demeaned column
"""
gb_cols = ['subject', 'loss_func']
df = df.set_index(gb_cols)
y_mean = df.groupby(gb_cols)[y].mean()
df[f'{y}_mean'] = y_mean
# here we take the average over the averages. we do this so that we weight
# all of the groups the same. For example, if gb_cols=['subject'] and one
# subject had twice as many rows (because it had two sessions in df, for
# example), then this ensures that subject isn't twice as important when
# computing the mean (which would be the case if we used
# df[f'{y}_mean'].mean() instead). We do, however, want to do this
# separately for each loss function, since they'll probably have different
# means
df = df.reset_index()
df = df.set_index('loss_func')
df[f'{y}_mean_overall'] = y_mean.reset_index().groupby('loss_func')[y].mean()
df[f'demeaned_{y}'] = df[y] - df[f'{y}_mean']
df[f'remeaned_{y}'] = df[f'demeaned_{y}'] + df[f'{y}_mean_overall']
for col in extra_cols:
df[f'demeaned_{col}'] = df[col] - df[f'{y}_mean']
df[f'remeaned_{col}'] = df[f'demeaned_{col}'] + df[f'{y}_mean_overall']
return df.reset_index()
def prep_df(df, task, groupaverage=False):
"""prepare the dataframe by restricting to the appropriate subset
The dataframe created by earlier analysis steps contains all
scanning sessions and potentially multiple visual areas. for our
figures, we just want to grab the relevant scanning sessions and
visual areas (V1), so this function helps do that. If df has the
'frequency_type' column (i.e., it's summarizing the 1d tuning
curves), we also restrict to the "local_sf_magnitude" rows (rather
than "frequency_space")
Parameters
----------
df : pd.DataFrame
dataframe that will be used for plotting figures. contains some
summary of (either 1d or 2d) model information across sessions.
task : {'task-sfrescaled', 'task-sfpconstant'}
this determines which task we'll grab: task-sfprescaled or
task-sfpconstant. task-sfp is also exists, but we consider that
a pilot task and so do not allow it for the creation of figures
(the stimuli were not contrast-rescaled).
groupaverage : bool, optional
whether to grab only the groupaverage subjects (if True) or
every other subject (if False). Note that we'll grab/drop both
i-linear and i-nearest if they're both present
Returns
-------
df : pd.DataFrame
The restricted dataframe.
"""
if task not in ['task-sfprescaled', 'task-sfpconstant']:
raise Exception("Only task-sfprescaled and task-sfpconstant are allowed!")
df = df.query("task==@task")
if 'frequency_type' in df.columns:
df = df.query("frequency_type=='local_sf_magnitude'")
if 'varea' in df.columns:
df = df.query("varea==1")
if 'fit_model_type' in df.columns:
df.fit_model_type = df.fit_model_type.map(dict(zip(plotting.MODEL_ORDER,
plotting.MODEL_PLOT_ORDER)))
if 'subject' in df.columns:
df.subject = df.subject.map(dict(zip(plotting.SUBJECT_ORDER,
plotting.SUBJECT_PLOT_ORDER)))
return df
def prep_model_df(df):
"""prepare models df for plotting
For plotting purposes, we want to rename the model parameters from
their original values (e.g., sf_ecc_slope, abs_mode_cardinals) to
those we use in the equation (e.g., a, p_1). We do that by simply
remapping the names from those given at plotting.ORIG_PARAM_ORDER to
those in plotting.PLOT_PARAM_ORDER. we additionally add a new
column, param_category, which we use to separate out the three types
of parameters: sigma, the effect of eccentricity, and the effect of
orientation / retinal angle.
Parameters
----------
df : pd.DataFrame
models dataframe, that is, the dataframe that summarizes the
parameter values for a variety of models
Returns
-------
df : pd.DataFrame
The remapped dataframe.
"""
rename_params = dict((k, v) for k, v in zip(plotting.ORIG_PARAM_ORDER,
plotting.PLOT_PARAM_ORDER))
df = df.set_index('model_parameter')
df.loc['sigma', 'param_category'] = 'sigma'
df.loc[['sf_ecc_slope', 'sf_ecc_intercept'], 'param_category'] = 'eccen'
df.loc[['abs_mode_cardinals', 'abs_mode_obliques', 'rel_mode_cardinals', 'rel_mode_obliques',
'abs_amplitude_cardinals', 'abs_amplitude_obliques', 'rel_amplitude_cardinals',
'rel_amplitude_obliques'], 'param_category'] = 'orientation'
df = df.reset_index()
df['model_parameter'] = df.model_parameter.map(rename_params)
return df
def append_precision_col(df, col='preferred_period',
gb_cols=['subject', 'session', 'varea', 'stimulus_superclass', 'eccen']):
"""append column giving precision of another column and collapse
this function gives the precision of the value found in a single
column (across the columns that are NOT grouped-by) and collapses
across those columns. The intended use case is to determine the
precision of a parameter estimate across bootstraps for each
(subject, session) (for the 2d model) or for each (subject, session,
stimulus_superclass, eccen) (for the 1d model).
precision is the inverse of the variance, so let :math:`c` be the
68% confidence interval of the column value, then precision is
:math:`\frac{1}{(c/2)^2}`
finally, we collapse across gb_cols, returning the median and
precision of col for each combination of values from those columns.
Parameters
----------
df : pd.DataFrame
the df that contains the values we want the precision for
col : str, optional
the name of the column that contains the values we want the
precision for
gb_cols : list, optional
list of strs containing the columns we want to groupby. we will
compute the precision separately for each combination of values
here.
Returns
-------
df : pd.DataFrame
the modified df, containing the median and precision of col
(also contains the medians of the other values in the original
df, but not their precision)
"""
gb = df.groupby(gb_cols)
df = df.set_index(gb_cols)
df[f'{col}_precision'] = gb[col].apply(first_level_analysis._precision_dist)
df = df.reset_index()
return df.groupby(gb_cols).median().reset_index()
def precision_weighted_bootstrap(df, seed, n_bootstraps=100, col='preferred_period',
gb_cols=['varea', 'stimulus_superclass', 'eccen'],
precision_col='preferred_period_precision'):
"""calculate the precision-weighted bootstrap of a column
to combine across subjects, we want to use a precision-weighted
average, rather than a regular average, because we are trying to
summarize the true value across the population and our uncertainty
in it. Therefore, we down-weight subjects whose estimate is
noisier. Similar to append_precision_col(), we groupby over some of
the columns to combine info across them (gb_cols here should be a
subset of those used for append_precision_col())
You should plot the values here with scatter_ci_dist() or something
similar to draw the 68% CI of the distribution here (not sample it
to draw the CI)
Parameters
----------
df : pd.DataFrame
the df that we want to bootstrap (must already have precision
column, i.e., this should be the df returned by
append_precision_col())
seed : int
seed for numpy's RNG
n_bootstraps : int, optional
the number of independent bootstraps to draw
col : str, optional
the name of the column that contains the values we want to draw
bootstraps for
gb_cols : list, optional
list of strs containing the columns we want to groupby. we will
compute the bootstraps for each combination of values here.
precision_col : str, optional
name of the column that contains the precision, used in the
precision-weighted mean
Returns
-------
df : pd.DataFrame
the df containing the bootstraps of precision-weighted
mean. this will only contain the following columns: col,
*gb_cols, and bootstrap_num
"""
np.random.seed(seed)
if type(gb_cols) != list:
raise Exception("gb_cols must be a list!")
bootstraps = []
for n, g in df.groupby(gb_cols):
# n needs to be a list of the same length as gb_cols for the
# dict(zip()) call to work, but if len(gb_cols) == 1, then it
# will be a single str (or int or float or whatever), so we
# convert it to a list real quick
if len(gb_cols) == 1:
n = [n]
tmp = dict(zip(gb_cols, n))
for j in range(n_bootstraps):
t = g.sample(len(g), replace=True)
tmp[col] = np.average(t[col], weights=t[precision_col])
tmp['bootstrap_num'] = j
bootstraps.append(pd.DataFrame(tmp, [0]))
bootstraps = pd.concat(bootstraps).reset_index(drop=True)
if 'subject' in df.columns and 'subject' not in gb_cols:
bootstraps['subject'] = 'all'
return bootstraps
def _summarize_1d(df, reference_frame, y, row, col, height, facetgrid_legend,
**kwargs):
"""helper function for pref_period_1d and bandwidth_1d
since they're very similar functions.
"eccen" is always plotted on the x-axis, and hue is always
"stimulus_type" (unless overwritten with kwargs)
Parameters
----------
df : pd.DataFrame
pandas DataFrame summarizing all the 1d tuning curves, as
created by the summarize_tuning_curves.py script. If you want
confidence intervals, this should be the "full" version of that
df (i.e., including the fits to each bootstrap).
y : str
which column of the df to plot on the y-axis
reference_frame : {'relative', 'absolute'}
whether the data contained here is in the relative or absolute
reference frame. this will determine both the palette used and
the hue_order
row : str
which column of the df to facet the plot's rows on
col : str
which column of the df to facet the plot's column on
height : float
height of each plot facet
kwargs :
all passed to summary_plots.main() (most of these then get
passed to sns.FacetGrid, see the docstring of summary_plots.main
for more info)
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
pal = plotting.stimulus_type_palette(reference_frame)
hue_order = plotting.get_order('stimulus_type', reference_frame)
col_order, row_order = None, None
if col is not None:
col_order = plotting.get_order(col, col_unique=df[col].unique())
if row is not None:
row_order = plotting.get_order(row, col_unique=df[row].unique())
kwargs.setdefault('xlim', (0, 12))
g = summary_plots.main(df, row=row, col=col, y=y, eccen_range=(0, 11),
hue_order=hue_order, height=height,
plot_func=[plotting.plot_median_fit, plotting.plot_median_fit,
plotting.scatter_ci_dist],
# these three end up being kwargs passed to the
# functions above, in order
x_jitter=[None, None, .2],
x_vals=[(0, 10.5), None, None],
linestyle=['--', None, None],
palette=pal, col_order=col_order,
row_order=row_order,
facetgrid_legend=facetgrid_legend, **kwargs)
g.set_xlabels('Eccentricity (deg)')
if facetgrid_legend:
g._legend.set_title("Stimulus class")
return g
def pref_period_1d(df, context='paper', reference_frame='relative',
row='session', col='subject', col_wrap=None, **kwargs):
"""Plot the preferred period of the 1d model fits.
Note that we do not restrict the input dataframe in any way, so we
will plot all data contained within it. If this is not what you want
(e.g., you only want to plot some of the tasks), you'll need to do
the restrictions yourself before passing df to this function
The only difference between this and the bandwidth_1d function is
what we plot on the y-axis, and how we label it.
Parameters
----------
df : pd.DataFrame
pandas DataFrame summarizing all the 1d tuning curves, as
created by the summarize_tuning_curves.py script. If you want
confidence intervals, this should be the "full" version of that
df (i.e., including the fits to each bootstrap).
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
reference_frame : {'relative', 'absolute'}, optional
whether the data contained here is in the relative or absolute
reference frame. this will determine both the palette used and
the hue_order
row : str, optional
which column of the df to facet the plot's rows on
col : str, optional
which column of the df to facet the plot's column on
kwargs :
passed to sfp.figures._summarize_1d
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
# if we're wrapping columns, then we need this to take up the full width in
# order for it to be readable
if col_wrap is not None:
fig_width = 'full'
else:
fig_width = 'half'
params, fig_width = style.plotting_style(context, figsize=fig_width)
if col_wrap is not None:
fig_width /= col_wrap
# there is, as of seaborn 0.11.0, a bug that interacts with our xtick
# label size and height (see
# https://github.com/mwaskom/seaborn/issues/2293), which causes an
# issue if col_wrap == 3. this manual setting is about the same size
# and fixes it
if col_wrap == 3:
fig_width = 2.23
elif col is not None:
fig_width /= df[col].nunique()
plt.style.use(params)
if context == 'paper':
facetgrid_legend = False
kwargs.setdefault('xlim', (0, 11.55))
kwargs.setdefault('ylim', (0, 2.1))
else:
kwargs.setdefault('ylim', (0, 4))
facetgrid_legend = True
g = _summarize_1d(df, reference_frame, 'preferred_period', row, col,
fig_width, facetgrid_legend, col_wrap=col_wrap, **kwargs)
g.set_ylabels('Preferred period (deg)')
yticks = [i for i in range(4) if i <= kwargs['ylim'][1]]
g.set(yticks=yticks)
if context != 'paper':
g.fig.suptitle("Preferred period of 1d tuning curves in each eccentricity band")
g.fig.subplots_adjust(top=.85)
else:
if len(g.axes) == 1:
# remove title if there's only one plot (otherwise it tells us which
# subject is which)
g.axes.flatten()[0].set_title('')
for ax in g.axes.flatten():
ax.axhline(color='gray', linestyle='--')
ax.axvline(color='gray', linestyle='--')
ax.set(xticks=[0, 2, 4, 6, 8, 10])
g.fig.subplots_adjust(wspace=.05, hspace=.15)
return g
def bandwidth_1d(df, context='paper', reference_frame='relative',
row='session', col='subject', units='octaves', **kwargs):
"""plot the bandwidth of the 1d model fits
Note that we do not restrict the input dataframe in any way, so we
will plot all data contained within it. If this is not what you want
(e.g., you only want to plot some of the tasks), you'll need to do
the restrictions yourself before passing df to this function
The only difference between this and the pref_period_1d function is
what we plot on the y-axis, and how we label it.
Parameters
----------
df : pd.DataFrame
pandas DataFrame summarizing all the 1d tuning curves, as
created by the summarize_tuning_curves.py script. If you want
confidence intervals, this should be the "full" version of that
df (i.e., including the fits to each bootstrap).
units : {'octaves', 'degrees}, optional
Whether to plot this data in octaves (in which case we expect it to be
flat with eccentricity) or degrees (in which case we expect it to scale
with eccentricity)
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
reference_frame : {'relative', 'absolute'}, optional
whether the data contained here is in the relative or absolute
reference frame. this will determine both the palette used and
the hue_order
row : str, optional
which column of the df to facet the plot's rows on
col : str, optional
which column of the df to facet the plot's column on
kwargs :
passed to sfp.figures._summarize_1d
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
if context == 'paper':
facetgrid_legend = False
kwargs.setdefault('xlim', (0, 11.55))
else:
facetgrid_legend = True
if units == 'degrees':
if 'tuning_curve_bandwidth_degrees' not in df.columns:
df['tuning_curve_bandwidth_degrees'] = df.apply(utils._octave_to_degrees, 1)
y = 'tuning_curve_bandwidth_degrees'
elif units == 'octaves':
y = 'tuning_curve_bandwidth'
kwargs.setdefault('ylim', (0, 8))
g = _summarize_1d(df, reference_frame, y, row, col,
fig_width, facetgrid_legend, **kwargs)
g.set_ylabels(f'Tuning curve FWHM ({units})')
if context != 'paper':
g.fig.suptitle("Full-Width Half-Max of 1d tuning curves in each eccentricity band")
g.fig.subplots_adjust(top=.85)
elif len(g.axes) == 1:
# remove title if there's only one plot (otherwise it tells us which
# subject is which)
g.axes.flatten()[0].set_title('')
return g
def existing_studies_figure(df, y="Preferred period (deg)", legend=True, context='paper'):
"""Plot the results from existing studies
See the docstring for figures.existing_studies_df() for more
details on the information displayed in this figure.
Parameters
----------
df : pd.DataFrame
The existing studies df, as returned by the function
figures.existing_studies_df().
y : {'Preferred period (deg)', 'Preferred spatial frequency (cpd)'}
Whether to plot the preferred period or preferred spatial
frequency on the y-axis. If preferred period, the y-axis is
linear; if preferred SF, the y-axis is log-scaled (base 2). The
ylims will also differ between these two
legend : bool, optional
Whether to add a legend or not
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
The FacetGrid containing the plot
"""
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
fig_height = fig_width / 1.2
pal = sns.color_palette('Set2', df.Paper.nunique())
pal = dict(zip(df.Paper.unique(), pal))
if 'Current study' in df.Paper.unique():
pal['Current study'] = (0, 0, 0)
g = sns.FacetGrid(df, hue='Paper', height=fig_height, aspect=1.2, palette=pal)
if y == "Preferred period (deg)":
g.map(plt.plot, 'Eccentricity', y, marker='o')
g.ax.set_ylim((0, 6))
elif y == "Preferred spatial frequency (cpd)":
g.map(plt.semilogy, 'Eccentricity', y, marker='o', basey=2)
g.ax.set_ylim((0, 11))
g.ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(plotting.myLogFormat))
g.ax.set_xlim((0, 20))
if context == 'poster':
g.ax.set(xticks=[0, 5, 10, 15, 20])
g.ax.set_title("Summary of human V1 fMRI results")
if legend:
g.add_legend()
# facetgrid doesn't let us set the title fontsize directly, so need to do
# this hacky work-around
g.fig.legends[0].get_title().set_size(mpl.rcParams['legend.title_fontsize'])
g.ax.set_xlabel('Eccentricity of receptive field center (deg)')
return g
def input_schematic(context='paper', prf_loc=(250, 250), prf_radius=100,
stim_freq=(.01, .03)):
"""Schematic to explain 2d model inputs.
This schematic explains the various inputs of our 2d model:
eccentricity, retinotopic angle, spatial frequency, and
orientation. It does this with a little diagram of a pRF with a
local stimulus, with arrows and labels.
The location and size of the pRF, as well as the frequency of the
stimulus, are all modifiable, and the labels and arrows will update
themselves. The arrows should behave appropriately, but it's hard to
guarantee that the labels will always look good (their positioning
is relative, so it will at least be close). You are restricted to
placing the pRF inside the first quadrant, which helps make the
possibilities more reasonable.
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
prf_loc : tuple, optional
2-tuple of floats, location of the prf. Both numbers must lie
between 0 and 500 (i.e., we require this to be in the first
quadrant). Max value on both x and y axes is 500.
prf_radius : float, optional
radius of the prf, in pixels. the local stimulus will have half
this radius
stim_freq : tuple, optional
2-tuple of floats, the (x_freq, y_freq) of the stimulus, in
cycles per pixel
Returns
-------
fig : plt.Figure
Figure containing the schematic
"""
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
figsize = (fig_width, fig_width)
fig, ax = plt.subplots(1, 1, figsize=figsize)
def get_xy(distance, angle, origin=(500, 500)):
return [o + distance * func(angle) for o, func in
zip(origin, [np.cos, np.sin])]
pal = sns.color_palette('deep', 2)
if (np.array(prf_loc) > 500).any() or (np.array(prf_loc) < 0).any():
raise Exception("the coordinates of prf_loc must be between 0 and 500, but got "
f"value {prf_loc}!")
# prf_loc is in coordinates relative to the center, so we convert that here
abs_prf_loc = [500 + i for i in prf_loc]
mask = utils.create_circle_mask(*abs_prf_loc, prf_radius/2, 1001)
mask[mask==0] = np.nan
stim = mask * utils.create_sin_cpp(1001, *stim_freq)
plotting.im_plot(stim, ax=ax, origin='lower')
ax.axhline(500, c='.5')
ax.axvline(500, c='.5')
ax.set(xlim=(450, 1001), ylim=(450, 1001))
for s in ax.spines.keys():
ax.spines[s].set_visible(False)
prf = mpl.patches.Circle(abs_prf_loc, prf_radius, fc='none', ec='k', linewidth=2,
linestyle='--', zorder=10)
ax.add_artist(prf)
prf_ecc = np.sqrt(np.square(prf_loc).sum())
prf_angle = np.arctan2(*prf_loc[::-1])
e_loc = get_xy(prf_ecc/2, prf_angle + np.pi/13)
plotting.draw_arrow(ax, (500, 500), abs_prf_loc, arrowprops={'connectionstyle': 'arc3',
'arrowstyle': '<-',
'color': pal[1]})
ax.text(*e_loc, r'$r_v$')
ax.text(600, 500 + 100*np.sin(prf_angle/2), r'$\theta_v$')
angle = mpl.patches.Arc((500, 500), 200, 200, 0, 0, np.rad2deg(prf_angle),
fc='none', ec=pal[1], linestyle='-')
ax.add_artist(angle)
# so that this is the normal vector, the 7000 is just an arbitrary
# scale factor to make the vector a reasonable length
normal_len = 7000 * np.sqrt(np.square(stim_freq).sum())
normal_angle = np.arctan2(*stim_freq[::-1])
omega_loc = get_xy(normal_len, normal_angle, abs_prf_loc)
plotting.draw_arrow(ax, abs_prf_loc, omega_loc, r'$\omega_l$', {'connectionstyle': 'arc3',
'arrowstyle': '<-',
'color': pal[0]})
angle = mpl.patches.Arc(abs_prf_loc, 1.2*normal_len, 1.2*normal_len, 0, 0,
# small adjustment appears to be necessary for some
# reason -- but really only for some spatial
# frequencies.
np.rad2deg(normal_angle)-3,
fc='none', ec=pal[0], linestyle='-')
ax.add_artist(angle)
plotting.draw_arrow(ax, (abs_prf_loc[0] + normal_len, abs_prf_loc[1]), abs_prf_loc,
arrowprops={'connectionstyle': 'angle3', 'arrowstyle': '-', 'color': '.5',
'linestyle': ':'})
theta_loc = get_xy(1.3*normal_len/2, normal_angle/2, abs_prf_loc)
ax.text(*theta_loc, r'$\theta_l$')
return fig
def model_schematic(context='paper'):
"""Create model schematic.
In order to better explain the model, its predictions, and the
effects of its parameters, we create a model schematic that shows
the effects of the different p parameters (those that control the
effect of stimulus orientation and retinotopic angle on preferred
period).
This creates only the polar plots (showing the preferred period contours),
and doesn't have a legend; it's intended that you call
compose_figures.add_legend to add the graphical one (and a space has been
left for it)
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
fig : plt.Figure
Figure containing the schematic
"""
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
figsize = (fig_width, fig_width/3)
if context == 'paper':
orientation = np.linspace(0, np.pi, 4, endpoint=False)
elif context == 'poster':
orientation = np.linspace(0, np.pi, 2, endpoint=False)
abs_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
abs_mode_cardinals=.4, abs_mode_obliques=.1)
rel_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
rel_mode_cardinals=.4, rel_mode_obliques=.1)
full_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
abs_mode_cardinals=.4, abs_mode_obliques=.1,
rel_mode_cardinals=.4, rel_mode_obliques=.1)
# we can't use the plotting.feature_df_plot / feature_df_polar_plot
# functions because they use FacetGrids, each of which creates a
# separate figure and we want all of this to be on one figure.
fig, axes = plt.subplots(1, 3, figsize=figsize,
subplot_kw={'projection': 'polar'})
labels = [r'$p_1>p_2>0$', r'$p_3>p_4>0$',
# can't have a newline in a raw string, so have to combine them
# in the last label here
r'$p_1=p_3>$'+'\n'+r'$p_2=p_4>0$']
for i, (m, ax) in enumerate(zip([abs_model, rel_model, full_model], axes)):
plotting.model_schematic(m, [ax], [(-.1, 3)], False,
orientation=orientation)
if i != 0:
ax.set(ylabel='')
if i != 1:
ax.set(xlabel='')
else:
# want to move this closer
ax.set_xlabel(ax.get_xlabel(), labelpad=-10)
ax.set_title(labels[i])
ax.set(xticklabels=[], yticklabels=[])
fig.subplots_adjust(wspace=.075)
return fig
def model_schematic_large(context='paper'):
"""Create larger version of model schematic.
In order to better explain the model, its predictions, and the
effects of its parameters, we create a model schematic that shows
the effects of the different p parameters (those that control the
effect of stimulus orientation and retinotopic angle on preferred
period).
Note that this includes both linear and polar plots, and will probably be
way too large
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
fig : plt.Figure
Figure containing the schematic
"""
if context == 'paper':
orientation = np.linspace(0, np.pi, 4, endpoint=False)
size_scale = 1
elif context == 'poster':
size_scale = 1.5
orientation = np.linspace(0, np.pi, 2, endpoint=False)
abs_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
abs_mode_cardinals=.4, abs_mode_obliques=.1)
rel_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
rel_mode_cardinals=.4, rel_mode_obliques=.1)
full_model = model.LogGaussianDonut('full', sf_ecc_slope=.2, sf_ecc_intercept=.2,
abs_mode_cardinals=.4, abs_mode_obliques=.1,
rel_mode_cardinals=.4, rel_mode_obliques=.1)
# we can't use the plotting.feature_df_plot / feature_df_polar_plot
# functions because they use FacetGrids, each of which creates a
# separate figure and we want all of this to be on one figure.
fig = plt.figure(figsize=(size_scale*15, size_scale*15))
gs = mpl.gridspec.GridSpec(figure=fig, ncols=3, nrows=3)
projs = ['rectilinear', 'polar']
labels = [r'$p_1>p_2>0$', r'$p_3>p_4>0$', r'$p_1=p_3>p_2=p_4>0$']
axes = []
for i, m in enumerate([abs_model, rel_model, full_model]):
model_axes = [fig.add_subplot(gs[i, j], projection=projs[j]) for j in range(2)]
if i == 0:
title = True
else:
title = False
model_axes = plotting.model_schematic(m, model_axes[:2], [(-.1, 4.2), (-.1, 3)], title,
orientation=orientation)
if i != 2:
[ax.set(xlabel='') for ax in model_axes]
model_axes[0].text(size_scale*-.25, .5, labels[i], rotation=90,
transform=model_axes[0].transAxes, va='center',
fontsize=1.5*mpl.rcParams['font.size'])
axes.append(model_axes)
# this needs to be created after the model plots so we can grab
# their axes
legend_axis = fig.add_subplot(gs[1, -1])
legend_axis.legend(*axes[1][1].get_legend_handles_labels(), loc='center left')
legend_axis.axis('off')
return fig
def _catplot(df, x='subject', y='cv_loss', hue='fit_model_type', height=8, aspect=.9,
ci=68, plot_kind='strip', x_rotate=False, legend='full', orient='v', **kwargs):
"""wrapper around seaborn.catplot
several figures call seaborn.catplot and are pretty similar, so this
function bundles a bunch of the stuff we do:
1. determine the proper order for hue and x
2. determine the proper palette for hue
3. always use np.median as estimator and 'full' legend
4. optionally rotate x-axis labels (and add extra room if so)
5. add a horizontal line at the x-axis if we have both negative and
positive values
Parameters
----------
df : pd.DataFrame
pandas DataFrame
x : str, optional
which column of the df to plot on the x-axis
y : str, optional
which column of the df to plot on the y-axis
hue : str, optional
which column of the df to facet as the hue
height : float, optional
height of each plot facet
aspect : float, optional
aspect ratio of each facet
ci : int, optional
size of the confidence intervals (ignored if plot_kind=='strip')
plot_kind : {'point', 'bar', 'strip', 'swarm', 'box', 'violin', or 'boxen'}, optional
type of plot to make, i.e., sns.catplot's kind argument. see
that functions docstring for more details. only 'point' and
'strip' are expected, might do strange things otherwise
x_rotate : bool or int, optional
whether to rotate the x-axis labels or not. if True, we rotate
by 25 degrees. if an int, we rotate by that many degrees. if
False, we don't rotate. If labels are rotated, we'll also shift
the bottom of the plot up to avoid cutting off the bottom.
legend : str or bool, optional
the legend arg to pass through to seaborn.catplot, see its
docstrings for more details
orient : {'h', 'v'}, optional
orientation of plot (horizontal or vertical)
kwargs :
passed to sns.catplot
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
hue_order = plotting.get_order(hue, col_unique=df[hue].unique())
if 'order' in kwargs.keys():
order = kwargs.pop('order')
else:
order = plotting.get_order(x, col_unique=df[x].unique())
pal = plotting.get_palette(hue, col_unique=df[hue].unique(),
doubleup='doubleup' in x)
if plot_kind == 'strip':
# want the different hues to be in a consistent order on the
# x-axis, which requires this
kwargs.update({'jitter': False, 'dodge': True})
if orient == 'h':
x_copy = x
x = y
y = x_copy
aspect = 1/aspect
kwargs['sharex'] = False
else:
kwargs['sharey'] = False
if 'dodge' not in kwargs.keys():
kwargs['dodge'] = 0
# facetgrid seems to ignore the defaults for these, but we want to use them
# so its consistent with other figures
gridspec_kws = {k: mpl.rcParams[f'figure.subplot.{k}']
for k in ['top', 'bottom', 'left', 'right']}
g = sns.catplot(x, y, hue, data=df, hue_order=hue_order, legend=legend, height=height,
kind=plot_kind, aspect=aspect, order=order, palette=pal, ci=ci,
estimator=np.median, orient=orient, facet_kws={'gridspec_kws': gridspec_kws},
**kwargs)
for ax in g.axes.flatten():
if x_rotate:
if x_rotate is True:
x_rotate = 25
labels = ax.get_xticklabels()
if labels:
ax.set_xticklabels(labels, rotation=x_rotate, ha='right')
if orient == 'v':
if (df[y] < 0).any() and (df[y] > 0).any():
ax.axhline(color='grey', linestyle='dashed')
else:
if (df[x] < 0).any() and (df[x] > 0).any():
ax.axvline(color='grey', linestyle='dashed')
if x_rotate:
if x == 'subject':
g.fig.subplots_adjust(bottom=.15)
else:
g.fig.subplots_adjust(bottom=.2)
return g
def cross_validation_raw(df, seed, noise_ceiling_df=None, orient='v', context='paper'):
"""plot raw cross-validation loss
This does no pre-processing of the df and plots subjects on the
x-axis, model type as hue. (NOTE: this means if there are multiple
scanning sessions for each subject, the plot will combine them,
which is probably NOT what you want)
Parameters
----------
df : pd.DataFrame
dataframe containing the output of the cross-validation
analyses, combined across sessions (i.e., the output of
combine_model_cv_summaries snakemake rule)
seed : int
seed for numpy's RNG
noise_ceiling_df : pd.DataFrame
dataframe containing the results of the noise ceiling analyses
for all subjects (i.e., the output of the
noise_ceiling_monte_carlo_overall rule)
orient : {'h', 'v'}, optional
orientation of plot (horizontal or vertical)
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
np.random.seed(seed)
height = 8
aspect = .9
s = 5
if context == 'poster':
height *= 2
aspect = 1
s *= 2
if noise_ceiling_df is not None:
merge_cols = ['subject', 'mat_type', 'atlas_type', 'session', 'task', 'vareas', 'eccen']
df = pd.merge(df, noise_ceiling_df, 'outer', on=merge_cols, suffixes=['_cv', '_noise'])
g = _catplot(df.query('loss_func in ["weighted_normed_loss", "normed_loss", "cosine_distance_scaled"]'),
legend=False, height=height, s=s, x_rotate=True, orient=orient,
col='loss_func')
if noise_ceiling_df is not None:
g.map_dataframe(plotting.plot_noise_ceiling, 'subject', 'loss')
g.fig.suptitle("Cross-validated loss across subjects")
if orient == 'v':
g.set(ylabel="Cross-validated loss", xlabel="Subject")
elif orient == 'h':
g.set(xlabel="Cross-validated loss", ylabel="Subject")
g.add_legend()
g._legend.set_title("Model type")
ylims = [(0, .06), (0, .0022), (0, .0022)]
for i, ax in enumerate(g.axes.flatten()):
ax.set(ylim=ylims[i])
return g
def cross_validation_demeaned(df, seed, remeaned=False, orient='v', context='paper'):
"""plot demeaned cross-validation loss
This function demeans the cross-validation loss on a
subject-by-subject basis, then plots subjects on the x-axis, model
type as hue. (NOTE: this means if there are multiple scanning
sessions for each subject, the plot will combine them, which is
probably NOT what you want)
Parameters
----------
df : pd.DataFrame
dataframe containing the output of the cross-validation
analyses, combined across sessions (i.e., the output of
combine_model_cv_summaries snakemake rule)
seed : int
seed for numpy's RNG
remeaned : bool, optional
whether to use the demeaned cross-validation loss or the
remeaned one. Remeaned has the mean across subjects added back
to it, so that there won't be any negative y-values. This will
only affect the values on the y-axis; the relative placements of
the points will all be the same.
orient : {'h', 'v'}, optional
orientation of plot (horizontal or vertical)
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
np.random.seed(seed)
height = 8
aspect = .9
if context == 'poster':
height *= 2
aspect = 1
df = _demean_df(df)
if remeaned:
name = 'remeaned'
else:
name = 'demeaned'
g = _catplot(df, y=f'{name}_cv_loss', height=height, aspect=aspect, x_rotate=True,
orient=orient, col='loss_func')
g.fig.suptitle(f"{name.capitalize()} cross-validated loss across subjects")
if orient == 'v':
g.set(ylabel=f"Cross-validated loss ({name} by subject)", xlabel="Subject")
elif orient == 'h':
g.set(xlabel=f"Cross-validated loss ({name} by subject)", ylabel="Subject")
g._legend.set_title("Model type")
return g
def cross_validation_model(df, seed, plot_kind='strip', remeaned=False, noise_ceiling_df=None,
orient='v', sort=False, doubleup=False, context='paper'):
"""plot demeaned cross-validation loss, as function of model type
This function demeans the cross-validation loss on a
subject-by-subject basis, then plots model type on the x-axis,
subject as hue. (NOTE: this means if there are multiple scanning
sessions for each subject, the plot will combine them, which is
probably NOT what you want)
Parameters
----------
df : pd.DataFrame
dataframe containing the output of the cross-validation
analyses, combined across sessions (i.e., the output of
combine_model_cv_summaries snakemake rule)
seed : int
seed for numpy's RNG
plot_kind : {'strip', 'point'}, optional
whether to create a strip plot (each subject as a separate
point) or a point plot (combine across subjects, plotting the
median and bootstrapped 68% CI)
remeaned : bool, optional
whether to use the demeaned cross-validation loss or the
remeaned one. Remeaned has the mean across subjects added back
to it, so that there won't be any negative y-values. This will
only affect the values on the y-axis; the relative placements of
the points (and the size of the error bars if
`plot_kind='point'`) will all be the same.
noise_ceiling_df : pd.DataFrame
dataframe containing the results of the noise ceiling analyses
for all subjects (i.e., the output of the
noise_ceiling_monte_carlo_overall rule)
orient : {'h', 'v'}, optional
orientation of plot (horizontal or vertical)
sort : bool, optional
whether to sort the models by the median loss of the
weighted_normed_loss or show them in numbered order
doubleup : bool, optional
whether to "double-up" models so that we plot two models on the same
row if they're identical except for fitting A3/A4. this then shows the
version fitting A3/A4 as a fainter color of the version that doesn't.
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
seaborn FacetGrid object containing the plot
"""
kwargs = {}
np.random.seed(seed)
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
if doubleup:
height = fig_width * .855
else:
height = fig_width
aspect = 1
if noise_ceiling_df is not None:
merge_cols = ['subject', 'mat_type', 'atlas_type', 'session', 'task', 'vareas', 'eccen']
noise_ceiling_df = noise_ceiling_df.groupby(merge_cols).median().reset_index()
df = pd.merge(df, noise_ceiling_df, 'inner', on=merge_cols, suffixes=['_cv', '_noise'])
extra_cols = ['loss']
else:
extra_cols = []
df = _demean_df(df, extra_cols=extra_cols)
if plot_kind == 'strip':
hue = 'subject'
legend_title = "Subject"
legend = 'full'
elif plot_kind == 'point':
hue = 'fit_model_type'
legend = False
if remeaned:
name = 'remeaned'
else:
name = 'demeaned'
if sort:
gb = df.query("loss_func == 'weighted_normed_loss'").groupby('fit_model_type')
kwargs['order'] = gb[f'{name}_cv_loss'].median().sort_values(ascending=False).index
if doubleup:
df['fit_model_doubleup'] = df.fit_model_type.map(dict(zip(plotting.MODEL_PLOT_ORDER,
plotting.MODEL_PLOT_ORDER_DOUBLEUP)))
x = 'fit_model_doubleup'
if noise_ceiling_df is not None:
nc_map = {k: k for k in range(1, 8)}
nc_map.update({10: 8, 12: 9})
df['fit_model_nc'] = df.fit_model_doubleup.map(nc_map)
else:
x = 'fit_model_type'
if noise_ceiling_df is not None:
df['fit_model_nc'] = df.fit_model_type
g = _catplot(df, x=x, y=f'{name}_cv_loss', hue=hue,
col='loss_func', plot_kind=plot_kind, height=height,
aspect=aspect, orient=orient, legend=legend, **kwargs)
title = f"{name.capitalize()} cross-validated loss across model types"
if noise_ceiling_df is not None:
g.map_dataframe(plotting.plot_noise_ceiling, 'fit_model_nc', f'{name}_loss', ci=0,
orient=orient)
title += "\n Median noise ceiling shown as blue line"
if orient == 'v':
g.set(ylabel=f"Cross-validated loss ({name} by subject)", xlabel="Model type")
elif orient == 'h':
g.set(xlabel=f"Cross-validated loss ({name} by subject)", ylabel="")
# if plot_kind=='point', then there is no legend, so the following
# would cause an error
if plot_kind == 'strip':
g._legend.set_title(legend_title)
# don't want title in the paper version
if context != 'paper':
g.fig.suptitle(title)
else:
if orient == 'h':
# also want to remove the y axis, since it's duplicating the one from
# the other figure
for ax in g.axes.flatten():
ax.yaxis.set_visible(False)
ax.spines['left'].set_visible(False)
if plot_kind == 'point':
# this way, the ylims line up whether or not we plotted the
# noise ceiling line
if doubleup:
ax.set_ylim((8.5, -0.5))
else:
ax.set_ylim((13.5, -0.5))
return g
def model_types(context='paper', palette_type='model', annotate=False,
order=None, doubleup=False):
"""Create plot showing which model fits which parameters.
We have 11 different parameters, which might seem like a lot, so we
do cross-validation to determine whether they're all necessary. This
plot shows which parameters are fit by each model, in a little
table.
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
palette_type : {'model', 'simple', 'simple_r', seaborn palette name}, optional
palette to use for this plot. if 'model', the parameter each
model fits is shown in its color (as used in other plots). If
'simple' or 'simple_r', we'll use a white/black colormap with
either black (if 'simple') or white (if 'simple_r') showing the
parameter is fit. Else, should be a str giving a seaborn palette
name, i.e., an arg that can be passed to seaborn.color_palette.
annotate : bool, optional
whether to annotate the schematic with info on the parameter
categories (e.g., period/amplitude, eccentricity/orientation,
etc)
order : pandas index or None, optional
If None, we plot the models in the default order. Else, should be an
index object that gives the order to plot them in (from top to bottom).
Returns
-------
fig : plt.Figure
The figure with the plot on it
"""
params, fig_width = style.plotting_style(context, figsize='half')
# these ticks don't add anything and are confusing
params['xtick.bottom'] = False
params['ytick.left'] = False
plt.style.use(params)
figsize = (fig_width, fig_width)
extra_space = 0
model_names = plotting.MODEL_PLOT_ORDER
parameters = plotting.PLOT_PARAM_ORDER
model_variants = np.zeros((len(model_names), len(parameters)))
if palette_type == 'model':
pal = plotting.get_palette('fit_model_type', col_unique=model_names,
doubleup=doubleup)
try:
pal = pal.tolist()
except AttributeError:
# then it's already a list
pass
pal = [(1, 1, 1)] + pal
fill_vals = dict(zip(range(len(model_names)), range(1, len(model_names)+1)))
else:
if palette_type.startswith('simple'):
black, white = [(0, 0, 0), (1, 1, 1)]
if palette_type.endswith('_r'):
pal = [black, white]
else:
pal = [white, black]
else:
pal = sns.color_palette(palette_type, 2)
fill_vals = dict(zip(range(len(model_names)), len(model_names) * [True]))
if not doubleup:
model_variants[0, [0, 2]] = fill_vals[0]
model_variants[1, [0, 1]] = fill_vals[1]
model_variants[2, [0, 1, 2]] = fill_vals[2]
model_variants[3, [0, 1, 2, 3, 4]] = fill_vals[3]
model_variants[4, [0, 1, 2, 5, 6]] = fill_vals[4]
model_variants[5, [0, 1, 2, 3, 4, 5, 6]] = fill_vals[5]
model_variants[6, [0, 1, 2, 7, 8]] = fill_vals[6]
model_variants[7, [0, 1, 2, 9, 10]] = fill_vals[7]
model_variants[8, [0, 1, 2, 7, 8, 9, 10]] = fill_vals[8]
model_variants[9, [0, 1, 2, 3, 4, 7, 8]] = fill_vals[9]
model_variants[10, [0, 1, 2, 5, 6, 9, 10]] = fill_vals[10]
model_variants[11, [0, 1, 2, 3, 4, 5, 6, 7, 8]] = fill_vals[11]
model_variants[12, [0, 1, 2, 3, 4, 5, 6, 9, 10]] = fill_vals[12]
model_variants[13, :] = fill_vals[13]
# while in theory, we want square to be True here too, we messed with
# all the size in such a way that it works with it set to False
square = False
else:
model_variants[0, [0, 2]] = fill_vals[0]
model_variants[1, [0, 1]] = fill_vals[1]
model_variants[2, [0, 1, 2]] = fill_vals[2]
model_variants[3, [0, 1, 2, 3, 4]] = fill_vals[3]
model_variants[4, [0, 1, 2, 5, 6]] = fill_vals[4]
model_variants[5, [0, 1, 2, 3, 4, 5, 6]] = fill_vals[5]
model_variants[6, [0, 1, 2, 7, 8]] = fill_vals[6]
model_variants[2, [9, 10]] = fill_vals[7]
model_variants[6, [9, 10]] = fill_vals[8]
model_variants[9, [0, 1, 2, 3, 4, 7, 8]] = fill_vals[9]
model_variants[4, [9, 10]] = fill_vals[10]
model_variants[11, [0, 1, 2, 3, 4, 5, 6, 7, 8]] = fill_vals[11]
model_variants[5, [9, 10]] = fill_vals[12]
model_variants[11, [9, 10]] = fill_vals[13]
# drop the rows that are all 0s
model_variants = model_variants[~(model_variants==0).all(1)]
warnings.warn("when doubling-up, we just use sequential numbers for models "
"(the numbers therefore have a different meaning than for "
"non-doubled-up version)")
model_names = np.arange(1, model_variants.shape[0]+1)
square = True
model_variants = pd.DataFrame(model_variants, model_names, parameters)
if order is not None:
model_variants = model_variants.reindex(order)
fig = plt.figure(figsize=figsize)
ax = sns.heatmap(model_variants, cmap=pal, cbar=False, square=square)
ax.set_yticklabels(model_variants.index, rotation=0)
ax.set_ylabel("Model type")
# we want the labels on the top here, not the bottom
ax.tick_params(labelbottom=False, labeltop=True, pad=-2)
if annotate:
arrowprops = {'connectionstyle': 'bar', 'arrowstyle': '-', 'color': '0'}
text = ['Eccentricity', 'Absolute', 'Relative', 'Absolute', 'Relative']
text = ['Ecc', 'Abs', 'Rel', 'Abs', 'Rel']
for i, pos in enumerate(range(1, 10, 2)):
plotting.draw_arrow(ax, ((pos+.5)/11, 1.08+extra_space),
((pos+1.5)/11, 1.08+extra_space), arrowprops=arrowprops,
xycoords='axes fraction', textcoords='axes fraction')
ax.text((pos+1)/11, 1.11+extra_space, text[i], transform=ax.transAxes,
ha='center', va='bottom')
arrowprops['connectionstyle'] = f'bar,fraction={.3/5}'
plotting.draw_arrow(ax, (1.5/11, 1.17+extra_space), (6.5/11, 1.17+extra_space),
arrowprops=arrowprops,
xycoords='axes fraction', textcoords='axes fraction')
ax.text(4/11, 1.22+extra_space, 'Period', transform=ax.transAxes,
ha='center', va='bottom')
arrowprops['connectionstyle'] = f'bar,fraction={.3/3}'
plotting.draw_arrow(ax, (7.5/11, 1.17+extra_space), (10.5/11, 1.17+extra_space),
arrowprops=arrowprops,
xycoords='axes fraction', textcoords='axes fraction')
ax.text(9/11, 1.22+extra_space, 'Amplitude', transform=ax.transAxes,
ha='center', va='bottom')
return fig
def model_parameters(df, plot_kind='point', visual_field='all', fig=None, add_legend=True,
context='paper', **kwargs):
"""plot model parameter values, across subjects
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects. note that this should first have gone through
prep_model_df, which renames the values of the model_parameter
columns so they're more pleasant to look at on the plot and adds
a column, param_category, which enables us to break up the
figure into three subplots
plot_kind : {'point', 'strip', 'dist'}, optional
What type of plot to make. If 'point' or 'strip', it's assumed
that df contains only the fits to the median data across
bootstraps (thus, one value per subject per parameter); if
'dist', it's assumed that df contains the fits to all bootstraps
(thus, 100 values per subject per parameter). this function
should run if those are not true, but it will look weird:
- 'point': point plot, so show 68% CI across subjects
- 'strip': strip plot, so show each subject as a separate point
- 'dist': distribution, show each each subject as a separate
point with their own 68% CI across bootstraps
visual_field : str, optional
in addition to fitting the model across the whole visual field,
we also fit the model to some portions of it (the left half,
right half, etc). this arg allows us to easily modify the title
of the plot to make it clear which portion of the visual field
we're plotting. If 'all' (the default), we don't modify the
title at all, otherwise we append "in {visual_field} visual
field" to it.
fig : plt.Figure or None, optional
the figure to plot on. If None, we create a new figure. Intended
use case for this is to plot the data from multiple sessions on
the same axes (with different display kwargs), in order to
directly compare how parameter values change.
add_legend : bool, optional
whether to add a legend or not. If True, will add just outside
the right-most axis
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
kwargs :
Passed directly to the plotting function, which depends on the
value of plot_kind
Returns
-------
fig : plt.Figure
Figure containin the plot
"""
params, fig_width = style.plotting_style(context, figsize='full')
plt.style.use(params)
# in order to make the distance between the hues appear roughly
# equivalent, need to set the ax_xlims in a particular way
n_ori_params = df.query("param_category=='orientation'").model_parameter.nunique()
ax_xlims = [[-.5, .5], [-.5, 1.5], [-.5, n_ori_params - .5]]
yticks = [[0, .5, 1, 1.5, 2, 2.5], [0, .1, .2, .3, .4], [-.03, 0, .03, .06, .09]]
axhline = [2]
if fig is None:
fig, axes = plt.subplots(1, 3, figsize=(fig_width, fig_width/2),
gridspec_kw={'width_ratios': [.12, .25, .63],
'wspace': .3})
else:
axes = fig.axes
order = plotting.get_order('model_parameter', col_unique=df.model_parameter.unique())
if plot_kind == 'point':
pal = plotting.get_palette('model_parameter', col_unique=df.model_parameter.unique(),
as_dict=True)
elif plot_kind == 'strip':
# then we're showing this across subjects
if 'subject' in df.columns and df.subject.nunique() > 1:
hue = 'subject'
# this is sub-groupaverage
else:
hue = 'groupaverage_seed'
pal = plotting.get_palette(hue, col_unique=df[hue].unique(), as_dict=True)
hue_order = plotting.get_order(hue, col_unique=df[hue].unique())
elif plot_kind == 'dist':
# then we're showing this across subjects
if 'subject' in df.columns and df.subject.nunique() > 1:
pal = plotting.get_palette('subject', col_unique=df.subject.unique(), as_dict=True)
hue_order = plotting.get_order('subject', col_unique=df.subject.unique())
gb_col = 'subject'
# copied from how seaborn's stripplot handles this, by looking
# at lines 368 and 1190 in categorical.py (version 0.9.0)
dodge = np.linspace(0, .8 - (.8 / df.subject.nunique()), df.subject.nunique())
dodge -= dodge.mean()
yticks = [[0, .5, 1, 1.5, 2, 2.5, 3.0],
[-.1, 0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1],
[-.2, -.1, 0, .1, .2, .3]]
ax_xlims = [[-1, 1], [-1, 2], [-.75, n_ori_params-.5]]
axhline += [1]
# else we've combined across all subjects
else:
pal = plotting.get_palette('model_parameter', col_unique=df.model_parameter.unique(),
as_dict=True)
gb_col = 'model_parameter'
dodge = np.zeros(df.model_parameter.nunique())
for i, ax in enumerate(axes):
cat = ['sigma', 'eccen', 'orientation'][i]
tmp = df.query("param_category==@cat")
ax_order = [i for i in order if i in tmp.model_parameter.unique()]
if plot_kind == 'point':
sns.pointplot('model_parameter', 'fit_value', 'model_parameter', data=tmp,
estimator=np.median, ax=ax, order=ax_order, palette=pal, ci=68, **kwargs)
elif plot_kind == 'strip':
# want to make sure that the different hues end up in the
# same order everytime, which requires doing this with
# jitter and dodge
sns.stripplot('model_parameter', 'fit_value', hue, data=tmp, ax=ax,
order=ax_order, palette=pal, hue_order=hue_order, jitter=False,
dodge=True, **kwargs)
elif plot_kind == 'dist':
handles, labels = [], []
for j, (n, g) in enumerate(tmp.groupby(gb_col)):
dots, _, _ = plotting.scatter_ci_dist('model_parameter', 'fit_value', data=g,
label=n, ax=ax, color=pal[n],
x_dodge=dodge[j], x_order=ax_order, **kwargs)
handles.append(dots)
labels.append(n)
ax.set(xlim=ax_xlims[i], yticks=yticks[i])
ax.tick_params(pad=0)
if ax.legend_:
ax.legend_.remove()
if i == 2:
if add_legend:
if plot_kind == 'dist':
legend = ax.legend(handles, labels, loc='lower center', ncol=3,
borderaxespad=0, frameon=False,
bbox_to_anchor=(.49, -.3), bbox_transform=fig.transFigure)
else:
legend = ax.legend(loc=(1.01, .3), borderaxespad=0, frameon=False)
# explicitly adding the legend artist allows us to add a
# second legend if we want
ax.add_artist(legend)
if i in axhline:
ax.axhline(color='grey', linestyle='dashed')
if i == 0:
ax.set(ylabel='Parameter value')
fig.text(.5, 0, "Parameter", ha='center')
if context != 'paper':
# don't want title in paper context
suptitle = "Model parameters"
if visual_field != 'all':
suptitle += f' in {visual_field} visual field'
fig.suptitle(suptitle)
fig.subplots_adjust(top=.85)
return fig
def model_parameters_pairplot(df, drop_outlier=False):
"""plot pairwise distribution of model parameters
There's one very obvious outlier (sub-wlsubj007, ses-04, bootstrap
41), where the $a$ parameter (sf_ecc_slope) is less than 0 (other
parameters are also weird). If you want to drop that, set
drop_outlier=True
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects. note that this should first have gone through
prep_model_df, which renames the values of the model_parameter
columns so they're more pleasant to look at on the plot
drop_outlier : bool, optional
whether to drop the outlier or not (see above)
Returns
-------
g : sns.PairGrid
the PairGrid containing the plot
"""
pal = plotting.get_palette('subject', col_unique=df.subject.unique())
pal = dict(zip(df.subject.unique(), pal))
df = pd.pivot_table(df, index=['subject', 'bootstrap_num'], columns='model_parameter',
values='fit_value').reset_index()
# this is a real outlier: one subject, one bootstrap (see docstring)
if drop_outlier:
df = df[df.get('$a$') > 0]
g = sns.pairplot(df, hue='subject', vars=plotting.PLOT_PARAM_ORDER, palette=pal)
for ax in g.axes.flatten():
ax.axhline(color='grey', linestyle='dashed')
ax.axvline(color='grey', linestyle='dashed')
return g
def model_parameters_compare_plot(df, bootstrap_df):
"""plot comparison of model parameters from bootstrap vs median fits
we have two different ways of fitting the data: to all of the
bootstraps or just to the median across bootstraps. if we compare
the resulting parameter values, they shouldn't be that different,
which is what we do here.
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects. note that this should first have gone through
prep_model_df, which renames the values of the model_parameter
columns so they're more pleasant to look at on the plot
bootstrap_df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects and bootstraps. note that this should first have gone
through prep_model_df, which renames the values of the
model_parameter columns so they're more pleasant to look at on
the plot
Returns
-------
g : sns.FacetGrid
the FacetGrid containing the plot
"""
pal = plotting.get_palette('subject', col_unique=df.subject.unique(), as_dict=True)
order = plotting.get_order('subject', col_unique=df.subject.unique())
compare_cols = ['model_parameter', 'subject', 'session', 'task']
compare_df = df[compare_cols + ['fit_value']]
tmp = bootstrap_df[compare_cols + ['fit_value']].rename(columns={'fit_value': 'fit_value_bs'})
compare_df = pd.merge(tmp, compare_df, on=compare_cols)
compare_df = compare_df.sort_values(compare_cols)
g = sns.FacetGrid(compare_df, col='model_parameter', hue='subject', col_wrap=4, sharey=False,
aspect=2.5, height=3, col_order=plotting.PLOT_PARAM_ORDER, hue_order=order,
palette=pal)
g.map_dataframe(plotting.scatter_ci_dist, 'subject', 'fit_value_bs')
g.map_dataframe(plt.scatter, 'subject', 'fit_value')
for ax in g.axes.flatten():
ax.set_xticklabels(ax.get_xticklabels(), rotation=25, ha='right')
return g
def training_loss_check(df, hue='test_subset', thresh=.2):
"""check last epoch training loss
in order to check that one of the models didn't get stuck in a local
optimum in, e.g., one of the cross-validation folds or bootstraps,
we here plot the loss for each subject and model, with median and
68% CI across batches. they should hopefully look basically all the
same
Parameters
----------
df : pd.DataFrame
dataframe with the last epoch loss, as created by
`analyze_model.collect_final_loss`
hue : str, optional
which df column to use as the hue arg for the FacetGrid
thresh : float, optional
the loss threshold for getting stuck in local optima. we
annotate the plot with any training sessions whose median
training loss on the last epoch is above this value
Returns
-------
g : sns.FacetGrid
the FacetGrid containing the plot
"""
# to make sure we show the full dataframe below, from
# https://stackoverflow.com/a/42293737
pd.set_option('display.max_columns', None)
# from https://stackoverflow.com/a/25352191
pd.set_option('display.max_colwidth', -1)
df.fit_model_type = df.fit_model_type.map(dict(zip(plotting.MODEL_ORDER,
plotting.MODEL_PLOT_ORDER_FULL)))
order = plotting.get_order('fit_model_type', col_unique=df.fit_model_type.unique())
col_order = plotting.get_order('subject', col_unique=df.subject.unique())
g = sns.FacetGrid(df, col='subject', hue=hue, col_wrap=4, sharey=False,
aspect=2.5, height=3, col_order=col_order)
g.map_dataframe(plotting.scatter_ci_dist, 'fit_model_type', 'loss', x_jitter=True,
x_order=order)
for ax in g.axes.flatten():
ax.set_xticklabels(ax.get_xticklabels(), rotation=25, ha='right')
if ax.get_ylim()[1] > thresh:
ax.hlines(thresh, 0, len(df.fit_model_type.unique())-1, 'gray', 'dashed')
# find those training sessions with loss above the threshold
above_thresh = df.groupby(['subject', 'fit_model_type', hue]).loss.median()
above_thresh = above_thresh.reset_index().query('loss > @thresh')
if len(above_thresh) > 0:
g.fig.text(1.01, .5, ("Probable local optima (median last epoch training loss > "
f"{thresh}):\n" + str(above_thresh)))
g.fig.suptitle("Last epoch median training loss (with 68% CI across batches) on each CV fold")
g.fig.subplots_adjust(top=.92)
return g
def feature_df_plot(df, avg_across_retinal_angle=False, reference_frame='relative',
feature_type='pref-period', visual_field='all', context='paper',
col_wrap=None, scatter_ref_pts=False, **kwargs):
"""plot model predictions based on parameter values
This function is used to create plots showing the preferred period
as a function of eccentricity, as given by the model. Right now, it
always plots each subject separately, and will plot confidence
intervals based on bootstraps if possible (i.e., if df contains the
column 'bootstrap_num'). You can optionally average over the
retinotopic angles or keep them separate, and you can plot the
predictions for stimuli in the relative or absolute reference frame.
This function converts the model paramter value df into the
feature_df by calling analyze_model.create_feature_df.
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects.
avg_across_retinal_angle : bool, optional
whether to average across the different retinotopic angles
(True) or plot each of them on separate subplots (False). only
relevant if feature_type=='pref-period' (others all plot
something as function of retinotopic angle on polar plots)
reference_frame : {'relative', 'absolute'}, optional
whether the you want to plot the predictions for stimuli in the
relative or absolute reference frame (i.e., annuli and pinwheels
or constant gratings).
feature_type : {'pref-period', 'pref-period-contour', 'iso-pref-period', 'max-amp'}
what type of feature to create the plot for:
- pref-period: plot preferred period as a function of
eccentricity (on a Cartesian plot)
- pref-period-contour: plot preferred period as a function of
retinotopic angle at several different eccentricities (on a
polar plot)
- iso-pref-period: plot iso-preferred period lines as a function
of retinotopic angle, for several different preferred periods
(on a polar plot)
- max-amp: plot max amplitude as a function of retinotopic angle
(on a polar plot)
visual_field : str, optional
in addition to fitting the model across the whole visual field,
we also fit the model to some portions of it (the left half,
right half, etc). this arg allows us to easily modify the title
of the plot to make it clear which portion of the visual field
we're plotting. If 'all' (the default), we don't modify the
title at all, otherwise we append "in {visual_field} visual
field" to it.
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
col_wrap : int or None, optional
col_wrap argument to pass through to seaborn FacetGrid
scatter_ref_pts : bool, optional
if True, we plot black points every 45 degrees on the polar plots to
serve as a reference (only used in paper context). if False, do
nothing.
kwargs :
Passed to plotting.feature_df_plot
Returns
-------
g : sns.FacetGrid
the FacetGrid containing the plot
"""
aspect = 1
params, fig_width = style.plotting_style(context, figsize='full')
plt.style.use(params)
kwargs.setdefault('top', .9)
axes_titles = True
title_kwargs = {}
adjust_kwargs = {}
if df.bootstrap_num.nunique() > 1 or 'groupaverage_seed' in df.columns:
# then we have each subject's bootstraps or the groupaverage
# subject (which has also already been bootstrapped), so we use
# scatter_ci_dist to plot across them
plot_func = plotting.scatter_ci_dist
kwargs.update({'draw_ctr_pts': False, 'ci_mode': 'fill', 'join': True})
else:
plot_func = sns.lineplot
# in this case, we have the individual fits
if 'groupaverage_seed' not in df.columns:
gb_cols = ['subject', 'bootstrap_num']
col = 'subject'
pre_boot_gb_cols = ['subject', 'reference_frame', 'Stimulus type', 'bootstrap_num',
'Eccentricity (deg)']
# in this case, we have the sub-groupaverage
else:
gb_cols = ['groupaverage_seed']
col = None
pre_boot_gb_cols = ['reference_frame', 'Stimulus type', 'groupaverage_seed',
'Eccentricity (deg)']
# if we're faceting over something, need to separate it out when creating
# the feature df
if 'hue' in kwargs.keys():
gb_cols += [kwargs['hue']]
pre_boot_gb_cols += [kwargs['hue']]
if col is None or df.subject.nunique() == 1:
facetgrid_legend = False
suptitle = False
axes_titles = False
split_oris = True
col = 'orientation_type'
ori_map = {k: ['cardinals', 'obliques'][i%2] for i, k in
enumerate(np.linspace(0, np.pi, 4, endpoint=False))}
pre_boot_gb_cols += [col]
if feature_type == 'pref-period':
kwargs.setdefault('height', (fig_width/2) / aspect)
else:
# the polar plots have two subplots, so they're half the height of the
# pref-period one in order to get the same width
kwargs.setdefault('height', (fig_width/4) / aspect)
else:
if context != 'paper':
facetgrid_legend = True
suptitle = True
else:
facetgrid_legend = False
suptitle = False
split_oris = False
if col_wrap is not None:
# there is, as of seaborn 0.11.0, a bug that interacts with our
# xtick label size and height (see
# https://github.com/mwaskom/seaborn/issues/2293), which causes an
# issue if col_wrap == 3. this manual setting is about the same
# size and fixes it
if col_wrap == 3:
kwargs.setdefault('height', 2.23)
else:
kwargs.setdefault('height', (fig_width / col_wrap) / aspect)
if feature_type == 'pref-period':
if context == 'poster':
aspect = 1.3
else:
kwargs.setdefault('ylim', (0, 2.1))
kwargs.setdefault('xlim', (0, 11.55))
if avg_across_retinal_angle:
pre_boot_gb_func = 'mean'
row = None
else:
pre_boot_gb_func = None
row = 'Retinotopic angle (rad)'
if split_oris:
orientation = np.linspace(0, np.pi, 2, endpoint=False)
else:
orientation = np.linspace(0, np.pi, 4, endpoint=False)
df = analyze_model.create_feature_df(df, reference_frame=reference_frame, gb_cols=gb_cols,
orientation=orientation)
if split_oris:
df['orientation_type'] = df['Orientation (rad)'].map(ori_map)
g = plotting.feature_df_plot(df, col=col, row=row, pre_boot_gb_func=pre_boot_gb_func,
plot_func=plot_func, aspect=aspect,
pre_boot_gb_cols=pre_boot_gb_cols, col_wrap=col_wrap,
facetgrid_legend=facetgrid_legend, **kwargs)
else:
kwargs.update({'all_tick_labels': ['r'], })
if context == 'paper':
orientation = np.linspace(0, np.pi, 4, endpoint=False)
kwargs.update({'ylabelpad': 10, 'theta_ticklabels': [], 'wspace': .1,
'hspace': .1})
elif context == 'poster':
orientation = np.linspace(0, np.pi, 2, endpoint=False)
kwargs.update({'top': .76, 'r_ticks': [.25, .5, .75, 1], 'wspace': .3,
'r_ticklabels': ['', .5, '', 1], 'ylabelpad': 60,
'hspace': .3})
if feature_type == 'pref-period-contour':
rticks = np.arange(.25, 1.5, .25)
if context == 'paper':
rticklabels = ['' for i in rticks]
else:
rticklabels = [j if j == 1 else '' for i, j in enumerate(rticks)]
if not split_oris:
# there's a weird interaction where if we set the rticks before
# calling scatter (which we do when split_oris is True), it
# competely messes up the plot. unsure why.
kwargs.update({'r_ticks': rticks, 'r_ticklabels': rticklabels})
df = analyze_model.create_feature_df(df, reference_frame=reference_frame,
eccentricity=[5], orientation=orientation,
retinotopic_angle=np.linspace(0, 2*np.pi, 49),
gb_cols=gb_cols)
if split_oris:
df['orientation_type'] = df['Orientation (rad)'].map(ori_map)
kwargs['ylim'] = (0, 1.25)
row = 'Eccentricity (deg)'
if df[row].nunique() == 1:
row = None
r = 'Preferred period (deg)'
g = plotting.feature_df_polar_plot(df, col=col, row=row,
r=r, plot_func=plot_func, col_wrap=col_wrap,
aspect=aspect,
pre_boot_gb_cols=pre_boot_gb_cols,
facetgrid_legend=facetgrid_legend, **kwargs)
if context == 'paper':
for axes in g.axes:
axes[0].set_ylabel('Preferred\nperiod (deg)')
elif feature_type == 'iso-pref-period':
if context == 'poster':
kwargs.update({'r_ticks': list(range(1, 9)),
'r_ticklabels': [i if i%2==0 else '' for i in range(1, 9)]})
if split_oris:
df['orientation_type'] = df['Orientation (rad)'].map(ori_map)
df = analyze_model.create_feature_df(df, 'preferred_period_contour', period_target=[1],
reference_frame=reference_frame,
orientation=orientation, gb_cols=gb_cols)
r = 'Eccentricity (deg)'
row = 'Preferred period (deg)'
if df[row].nunique() == 1:
row = None
g = plotting.feature_df_polar_plot(df, col=col, r=r, row=row,
plot_func=plot_func, aspect=aspect,
title='ISO-preferred period contours',
pre_boot_gb_cols=pre_boot_gb_cols,
col_wrap=col_wrap,
facetgrid_legend=facetgrid_legend, **kwargs)
elif feature_type == 'max-amp':
rticks = np.arange(.25, 1.5, .25)
if context == 'paper':
rticklabels = ['' for i in rticks]
else:
rticklabels = [j if j == 1 else '' for i, j in enumerate(rticks)]
if not split_oris:
# there's a weird interaction where if we set the rticks before
# calling scatter (which we do when split_oris is True), it
# competely messes up the plot. unsure why.
kwargs.update({'r_ticks': rticks, 'r_ticklabels': rticklabels})
df = analyze_model.create_feature_df(df, 'max_amplitude', orientation=orientation,
reference_frame=reference_frame, gb_cols=gb_cols)
if split_oris:
df['orientation_type'] = df['Orientation (rad)'].map(ori_map)
kwargs['ylim'] = (0, 1.15)
r = 'Max amplitude'
g = plotting.feature_df_polar_plot(df, col=col, r=r,
aspect=aspect, plot_func=plot_func,
title='Relative amplitude', col_wrap=col_wrap,
pre_boot_gb_cols=pre_boot_gb_cols,
facetgrid_legend=facetgrid_legend, **kwargs)
ylabel = 'Relative amplitude'
# doesn't look good with multiple rows
if context == 'paper' and col_wrap is None:
# the location argument here does nothing, since we over-ride
# it with the bbox_to_anchor and bbox_transform arguments. the
# size and size_vertical values here look weird because they're
# in polar units (so size is in theta, size_vertical is in r)
asb = AnchoredSizeBar(g.axes[0, 0].transData, 0, '1', 'center',
frameon=False, size_vertical=1,
bbox_to_anchor=(.52, 1),
sep=5,
bbox_transform=g.fig.transFigure)
g.axes[0, 0].add_artist(asb)
ylabel = ylabel.replace(' ', '\n')
for axes in g.axes:
axes[0].set_ylabel(ylabel)
else:
raise Exception(f"Don't know what to do with feature_type {feature_type}!")
if split_oris:
th = np.linspace(0, 2*np.pi, 8, endpoint=False)
r_val = 1 # df[r].mean()
if scatter_ref_pts:
for ax in g.axes.flatten():
ax.scatter(th, len(th)*[r_val], c='k',
s=mpl.rcParams['lines.markersize']**2 / 2)
# for some reason, can't call the set_rticks until after all
# scatters have been called, or they get messed up
for ax in g.axes.flatten():
ax.set_yticks(rticks)
ax.set_yticklabels(rticklabels)
else:
adjust_kwargs.update({'wspace': -.1, 'hspace': .15})
if context == 'paper':
for ax in g.axes.flatten():
if ax.get_xlabel():
ax.set_xlabel(ax.get_xlabel(), labelpad=-5)
# remove the xlabel from one of them and place the remaining one in
# between the two subplots, because it's redundant
g.axes[0, 0].set_xlabel('')
# this can have its xlabel removed, since it will be above another plot which has one
if feature_type == 'pref-period-contour':
g.axes[0, 1].set_xlabel('')
else:
g.axes[0, 1].set_xlabel(g.axes.flatten()[1].get_xlabel(), x=-.05,
ha='center', labelpad=-5)
title_kwargs['pad'] = -13
if visual_field != 'all':
g.fig._suptitle.set_text(g.fig._suptitle.get_text() + f' in {visual_field} visual field')
if not suptitle:
g.fig.suptitle('')
if not axes_titles:
for ax in g.axes.flatten():
ax.set_title('')
else:
g.set_titles(col_template="{col_name}", **title_kwargs)
g.tight_layout()
g.fig.subplots_adjust(**adjust_kwargs)
return g
def existing_studies_with_current_figure(df, seed=None, precision_df=None, y="Preferred period (deg)",
context='paper'):
"""Plot results from existing studies with our results
This is the same plot as `existing_studies_figure()`, with the
results from our study plotted as a black line (so see that figure
for more details).
Note that the `df` argument here is the dataframe containing results
from this study, NOT the results from previous studies (we call the
`existing_studies_df()` function here)
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects.
seed : int or None
seed for numpy's RNG. can only be None if precision_df is None
precision_df : pd.dataFrame or None, optional
dataframe containing the precision for each scanning session in
df. If None, we won't do any bootstrapping, and so assume this
already has only one subject
y : {'Preferred period (deg)', 'Preferred spatial frequency (cpd)'}
Whether to plot the preferred period or preferred spatial
frequency on the y-axis. If preferred period, the y-axis is
linear; if preferred SF, the y-axis is log-scaled (base 2). The
ylims will also differ between these two
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
g : sns.FacetGrid
The FacetGrid containing the plot
"""
# this gets us the median parameter value for each subject and fit
# model type
df = df.groupby(['subject', 'model_parameter', 'fit_model_type']).median().reset_index()
if precision_df is not None:
df = df.merge(precision_df, on=['subject'])
df = precision_weighted_bootstrap(df, seed, 100, 'fit_value', ['model_parameter', 'fit_model_type'],
'precision')
gb_cols = [c for c in ['subject', 'bootstrap_num'] if c in df.columns]
df = analyze_model.create_feature_df(df, reference_frame='relative', gb_cols=gb_cols)
df = df.groupby(['subject', 'reference_frame', 'Eccentricity (deg)', 'bootstrap_num']).agg('mean').reset_index()
df['Preferred spatial frequency (cpd)'] = 1 / df['Preferred period (deg)']
g = existing_studies_figure(existing_studies_df(), y, False, context)
_, line, _ = plotting.scatter_ci_dist('Eccentricity (deg)', y, data=df,
color='k', join=True, ax=g.ax,
linewidth=1.5*plt.rcParams['lines.linewidth'],
ci=68, estimator=np.median,
draw_ctr_pts=False, ci_mode='fill');
data = g._legend_data.copy()
data['Current study'] = line[0]
g.add_legend(data, label_order=g.hue_names + ['Current study'])
# facetgrid doesn't let us set the title fontsize directly, so need to do
# this hacky work-around
g.fig.legends[0].get_title().set_size(mpl.rcParams['legend.title_fontsize'])
return g
def mtf(mtf_func, df=None, context='paper'):
"""Plot the MTF as a function of spatial frequencies
This plots the function we use to invert the display MTF when constructing
our stimuli. We plot a semilogx plot, from 1/512 to 1/2 cycles per pixel,
labeled as pixels per period (the reciprocal of spatial frequency), with
y-values going from .5 to 1
Parameters
----------
mtf_func : function
python function that takes array of spatial frequencies as its only
argument and returns the MTF at those spatial frequencies.
df : pd.DataFrame or None, optional
If not None, the data used to fit this function, which we'll plot as
points on the figure.
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
Returns
-------
fig : plt.figure
Figure containing the MTF plot
"""
sfs = np.linspace(0, .5)
params, fig_width = style.plotting_style(context, figsize='half')
plt.style.use(params)
fig, ax = plt.subplots(1, 1, figsize=(fig_width, fig_width*.65))
ax.semilogx(sfs, mtf_func(sfs), 'C0', basex=2)
if df is not None:
ax.semilogx(df.display_freq, df.corrected_contrast, 'C0o', basex=2)
ticks = [512, 128, 32, 8, 2]
ax.set(xticks=[1/i for i in ticks], xticklabels=ticks, xlabel='Pixels per period',
ylabel='Michelson contrast', yticks=[.5, .75, 1])
fig.tight_layout()
return fig
def sigma_interpretation(df):
"""Generate string interpreting relative size of a, b, and sigma.
This function returns a string (meant to be printed or saved to txt file)
that describes the preferred period at 0, the standard deviation, and how
many degrees you'd have to move in order to shift your preferred period by
a single standard deviation.
Parameters
----------
df : pd.DataFrame
dataframe containing all the model parameter values, across
subjects. note that this should first have gone through
prep_model_df, which renames the values of the model_parameter
columns.
Returns
-------
result : str
string containing the description discussed above
"""
# get the median value of the parameters we're interested
median_params = df.groupby('model_parameter').fit_value.median()
a = median_params['$a$']
b = median_params['$b$']
sigma = median_params['$\sigma$']
n_degrees = (b * (2**sigma - 1)) / a
pref_period_there = b + n_degrees * a
# as described on the wiki page for FWHM:
# https://en.wikipedia.org/wiki/Full_width_at_half_maximum. That's for a
# regular Gaussian, but the same calculation works here, just in octave
# units (as equivalent to $\log_2(SF_{.5H} / SF_{.5L})$, where those SFs
# are the spatial frequency where the curve reaches half-max above and
# below the peak, respectively)
fwhm = 2*np.sqrt(2*np.log(2)) * sigma
result = (
f"Preferred period at 0 degrees is {b:.03f}, with slope {a:.03f}.\n"
f"Standard deviation of the log-Gaussian is {sigma:.03f} octaves (equivalent to FWHM of {fwhm:.03f} octaves).\n"
f"Therefore, you'd need to move to {n_degrees:.03f} degrees eccentricity to move by a std dev.\n"
f"At that eccentricity, preferred period is {pref_period_there:.03f}.\n"
"All this is calculated using the median across bootstraps, average across polar angle and orientations."
)
return result
def compare_cv_models(first_level_df, targets, predictions, model_names, loss_func='normed_loss',
df_filter_string='drop_voxels_with_mean_negative_amplitudes,drop_voxels_near_border',
context='paper', voxel_n_check=9):
"""Create plots to help understand differences in model performance.
This creates several plots to compare the predictions of different models.
We make pairwise comparisons between each of them:
1. Plot pairwise difference in loss as a function of eccentricity (each
comparison on a separate row) (1 plot).
2. Plot the `voxel_n_check` voxels that are the best for each model in each
pairwise comparison (2 plots per pairwise comparison). We plot the voxel
response as a function of spatial frequency, and then curves for each
model. This means that we're collapsing across stimulus orientation
(variation in those responses just shown as confidence intervals).
Because we're only plotting response as a function of spatial frequency
(and not of stimulus orientation), this is really only sufficient for
comparing models 1 to 3, those models whose responses are isotropic.
Modification to this would be necessary to make informative plots for the
other models.
Parameters
----------
first_level_df : pd.DataFrame
DataFrame containing the responses of each voxel to each stimulus. Note
that we only use the median response, so the summary dataframe (vs
full, which includes separate bootstraps) should be used.
targets : torch.tensor
tensor containing the targets for the model, i.e., the responses and
precision of the voxels-to-fit, as saved out by
sfp.analyze_model.calc_cv_error
predictions : list
list of tensors containing the predictions for each model, as saved out
by sfp.analyze_model.calc_cv_error
model_names : list
list of strings containing the names (for plotting purposes) of each
model, in same order as predictions.
loss_func : str, optional
The loss function to compute. One of: {'weighted_normed_loss',
'crosscorrelation', 'normed_loss', 'explained_variance_score',
'cosine_distance', 'cosine_distance_scaled'}.
df_filter_string : str or None, optional
a str specifying how to filter the voxels in the dataset. see
the docstrings for sfp.model.FirstLevelDataset and
sfp.model.construct_df_filter for more details. If None, we
won't filter. Should probably use the default, which is what all
models are trained using.
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in
seaborn's set_context function). if poster, will scale things up
voxel_n_check : int, optional
Number of voxels to plot in second plot type. As you get farther away
from default value (9), more likely that plot will look weird.
Returns
-------
figs : list
List containing the created figures
"""
params, fig_width = style.plotting_style(context, figsize='full')
plt.style.use(params)
if df_filter_string is not None:
df_filter = model.construct_df_filter(df_filter_string)
first_level_df = df_filter(first_level_df)
voxels = first_level_df.drop_duplicates('voxel')
voxels['voxel_new'] = np.arange(len(voxels))
tmp = first_level_df.set_index('voxel')
tmp['voxel_new'] = voxels['voxel_new']
first_level_df = tmp.reset_index()
for name, pred in zip(model_names, predictions):
loss = analyze_model._calc_loss(pred, targets, loss_func, False)
voxels[f'{name}_loss'] = loss
# this is the number of combinations of the values in model names with
# length 2. for some reason, itertools objects don't have len()
n_combos = int(math.factorial(len(model_names)) / 2 /
math.factorial(len(model_names)-2))
fig, axes = plt.subplots(n_combos, 2, squeeze=False,
figsize=(fig_width, n_combos/2*fig_width))
predictions = dict(zip(model_names, predictions))
voxel_comp_figs = []
for i, (name_1, name_2) in enumerate(itertools.combinations(model_names, 2)):
loss_name = f'{name_1}_loss - {name_2}_loss'
voxels[loss_name] = voxels[f'{name_1}_loss'] - voxels[f'{name_2}_loss']
ymax = voxels[loss_name].max() + voxels[loss_name].max() / 10
ymin = voxels[loss_name].min() + voxels[loss_name].min() / 10
sns.scatterplot(x='eccen', y=loss_name, data=voxels, ax=axes[i, 0])
axes[i, 0].set_ylim(ymin, ymax)
sns.regplot(x='eccen', y=loss_name, data=voxels, ax=axes[i, 1],
x_estimator=np.median, x_bins=50)
axes[i, 1].set(ylabel='')
axes[i, 0].hlines(0, voxels.eccen.min(), voxels.eccen.max(), linestyles='dashed')
axes[i, 1].hlines(0, voxels.eccen.min(), voxels.eccen.max(), linestyles='dashed')
vox_idx = voxels[loss_name].values.argsort()
vox_idx = np.concatenate([vox_idx[-voxel_n_check:], vox_idx[:voxel_n_check]])
tmp = first_level_df.query(f"voxel_new in @vox_idx")
data = []
for j, v in enumerate(vox_idx):
d = {}
for name in model_names:
pred = predictions[name]
val = pred[v]
# need to normalize predictions for comparison
val = val / val.norm(2, -1, True)
d[name] = val.detach()
d['voxel_new'] = v
d['stimulus_class'] = np.arange(48)
d['better_model'] = {True: name_2, False: name_1}[j < voxel_n_check]
data.append(pd.DataFrame(d))
t = pd.concat(data)
tmp = tmp.merge(t, 'left', on=['voxel_new', 'stimulus_class'],
validate='1:1', )
tmp = tmp.rename(columns={'amplitude_estimate_median_normed':
'voxel_response'})
tmp = pd.melt(tmp, ['voxel_new', 'local_sf_magnitude', 'stimulus_class',
'better_model', 'eccen'],
value_vars=['voxel_response'] + model_names,
var_name='model', value_name='response')
for name, other_name in zip([name_1, name_2], [name_2, name_1]):
g = sns.relplot(x='local_sf_magnitude', y='response',
data=tmp.query(f"better_model=='{name}'"),
hue='model', col='voxel_new', kind='line',
col_wrap=3, height=fig_width/3)
g.fig.suptitle(f'better_model = {name} (vs {other_name})')
if voxel_n_check > 6:
g.fig.subplots_adjust(top=.9)
elif voxel_n_check > 3:
g.fig.subplots_adjust(top=.85)
else:
g.fig.subplots_adjust(top=.75)
g.set(xscale='log')
for ax in g.axes.flatten():
vox_id = int(re.findall('\d+', ax.get_title())[0])
ax.set_title(ax.get_title() + f",\neccen = {tmp.query('voxel_new==@vox_id').eccen.unique()[0]:.02f}")
voxel_comp_figs.append(g.fig)
fig.tight_layout()
return [fig] + voxel_comp_figs
def theory_background_figure(context):
"""Create figure with some small info on background theory.
Parameters
----------
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in seaborn's
set_context function). if poster, will scale things up (but only paper
has been tested)
Returns
-------
fig : plt.figure
Figure containing this plot
"""
einstein_path = op.join(op.dirname(op.realpath(__file__)), '..', 'reports', 'figures',
'einstein.pgm')
einstein = plt.imread(einstein_path)
einstein = einstein / einstein.max()
params, fig_width = style.plotting_style(context, figsize='full')
params['axes.titlesize'] = '8'
params['axes.labelsize'] = '8'
params['legend.fontsize'] = '8'
warnings.warn("We adjust the font size for axes titles, labels, and legends down to "
"8pts (so this will probably look wrong if context is not paper)!")
plt.style.use(params)
fig = plt.figure(figsize=(fig_width, fig_width/2))
gs = fig.add_gridspec(4, 4, hspace=.65)
fig.add_subplot(gs[:2, 0])
fig.add_subplot(gs[2:, 0])
fig.add_subplot(gs[1:3, 1])
fig.add_subplot(gs[1:3, -2])
fig.add_subplot(gs[:2, -1])
fig.add_subplot(gs[2:, -1])
axes = np.array(fig.axes).flatten()
for ax in axes[:2]:
ax.axis('off')
pt.imshow((einstein+.5)/1.5, ax=axes[0], zoom=110/256, title=None,
vrange=(0, 1))
pt.imshow((einstein+.5)/1.5, ax=axes[1], zoom=110/256, title=None,
vrange=(0, 1))
axes[0].set_title(r'SF preferences $\bf{constant}$'+'\nacross visual field')
axes[1].set_title(r'SF preferences $\bf{scale}$'+'\nwith eccentricity')
ecc = np.linspace(.01, 20, 50)
V1_pRF_size = 0.063485 * ecc
constant_hyp = 2*np.ones(len(ecc))
pal = sns.color_palette('Dark2', n_colors=2)
for i, ax in enumerate(axes[2:4].flatten()):
if i == 0:
ax.semilogy(ecc, 1./V1_pRF_size, '-', label='scaling',
linewidth=2, basey=2, c=pal[0])
ax.set_ylim((.25, 10))
ax.plot(ecc, constant_hyp, c=pal[1], linewidth=2, label='constant')
ax.set(xticks=[], yticks=[], ylabel='Preferred SF (cpd)',
xlabel='Eccentricity')
elif i == 1:
ax.plot(ecc, V1_pRF_size, linewidth=2, label='scaling', c=pal[0])
ax.plot(ecc, 1./constant_hyp, c=pal[1], linewidth=2, label='constant')
ax.set(xlabel='Eccentricity', xticks=[], yticks=[],
ylabel='Preferred period (deg)')
axes[3].legend(frameon=False, bbox_to_anchor=(-.1, -.1), loc='upper center')
axes[3].annotate('', xy=(.5, 1), xytext=(-.65, 1), xycoords='axes fraction',
arrowprops={'arrowstyle': '<->', 'color': 'k',
'connectionstyle': 'arc3,rad=-.3'})
axes[3].text(-.075, 1.2, r'$\frac{1}{f(x)}$', ha='center', va='bottom',
transform=axes[3].transAxes)
# from Eero, this is about what it should be
V1_RF_size = .2 * ecc
V1_pRF_size_slope = 0.063485
V1_pRF_size_offset = 0
V1_pRF_size_error = 0.052780
for i, ax in enumerate(axes[4:].flatten()):
ax.fill_between(ecc, (V1_pRF_size_slope - V1_pRF_size_error/2.)*ecc + V1_pRF_size_offset,
(V1_pRF_size_slope + V1_pRF_size_error/2.)*ecc + V1_pRF_size_offset,
alpha=.1, color=pal[0])
ax.plot(ecc, V1_pRF_size_slope*ecc+V1_pRF_size_offset, linewidth=2, label='scaling', c=pal[0])
if i == 0:
for e in [1,5,10,15,20]:
ax.plot([0, 20], [V1_pRF_size_slope*e+V1_pRF_size_offset,
V1_pRF_size_slope*e+V1_pRF_size_offset], '--', c='k',
linewidth=1)
ax.set(title="Full-field gratings", xticks=[], yticks=[])
if i == 1:
for j in [-1, -.5, 0, .5, 1]:
ax.plot(ecc, (V1_pRF_size_slope + j*V1_pRF_size_error/2.)*ecc + V1_pRF_size_offset,
linestyle='--', c='k', linewidth=1)
ax.set(xlabel='Eccentricity', xticks=[], yticks=[], title='Scaled gratings')
ax.set_ylabel("Preferred period (deg)")
return fig
def voxel_exclusion(df, context='paper'):
"""Create plot showing how many voxels were excluded from model fitting.
WARNING: Currently this is not context-compliant -- the figure ends up much
wider than allowed. If we want to use this in paper, will change that.
Parameters
----------
df : pd.DataFrame
dataframe containing the voxel exclusion info, as created by the
snakemake rule voxel_exclusion_df
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in seaborn's
set_context function). if poster, will scale things up (but only paper
has been tested)
Returns
-------
g : sns.FacetGrid
FacetGrid containing the plot
"""
params, fig_width = style.plotting_style(context, figsize='full')
plt.style.use(params)
if 'ecc in 1-12,drop_voxels_with_any_negative_amplitudes' in df.columns:
arg_str = 'any'
elif 'ecc in 1-12,drop_voxels_with_mean_negative_amplitudes' in df.columns:
arg_str = 'mean'
neg = df['ecc in 1-12'] - df[f'ecc in 1-12,drop_voxels_with_{arg_str}_negative_amplitudes']
border = df['ecc in 1-12'] - df['ecc in 1-12,drop_voxels_near_border']
df[f'ecc in 1-12,drop_voxels_with_{arg_str}_negative_amplitudes,drop_voxels_near_border - independent'] = df['ecc in 1-12'] - (neg + border)
neg_prop = dict(zip(df.subject, neg / df['ecc in 1-12']))
neg = dict(zip(df.subject, neg))
map_dict = {'total_voxels': 0,
'ecc in 1-12': 1,
f'ecc in 1-12,drop_voxels_with_{arg_str}_negative_amplitudes': 2,
'ecc in 1-12,drop_voxels_near_border': 3,
f'ecc in 1-12,drop_voxels_with_{arg_str}_negative_amplitudes,drop_voxels_near_border': 4,
f'ecc in 1-12,drop_voxels_with_{arg_str}_negative_amplitudes,drop_voxels_near_border - independent': 5}
id_vars = [c for c in df.columns if c not in map_dict.keys()]
df = pd.melt(df, id_vars, value_name='number_of_voxels')
df['exclusion_criteria'] = df.variable.map(map_dict)
col_order = plotting.get_order('subject', col_unique=df.subject.unique())
g = sns.catplot(x='exclusion_criteria', y='number_of_voxels', data=df,
col='subject', kind='point', col_wrap=6, aspect=.5,
height=(1/.5)*(2*fig_width/6), col_order=col_order)
for ijk, data in g.facet_data():
ax = g.axes[ijk[1]]
ax.scatter(4, data.query('exclusion_criteria==4').number_of_voxels, c='r', zorder=100)
txt = '\n'.join([f'{v}: {k}' for k,v in map_dict.items()])
g.fig.text(1, .75, txt, va='center')
txt = '\n'.join([f'{s}: {neg[s]} ({neg_prop[s]:.3f})' for s in col_order])
txt = "Number of voxels dropped because of negative amplitude (proportion on stimuli)\n\n" + txt
g.fig.text(1, .25, txt, va='center')
return g
def _create_model_prediction_df(df, trained_model, voxel_label,
for_relative_plot=False,
extend_sf=False):
"""Create df containing model predictions for a single voxel
Will contain 48 rows, with the following columns: model_predictions (normed
predictions of trained_model to the spatial frequency seen by this voxel),
voxel (voxel_label), stimulus_class (0 to 47, giving the stimulus label),
peak_sf (if add_peak_sf is True, this gives the preferred spatial frequency
of this voxel, at each observed orientation).
Parameters
----------
df : pd.DataFrame
DataFrame containing the responses of a single voxel to stimuli. Should
only have one response per stimulus (thus, the summary df), and must
have columns eccen, angle, local_sf_magnitude, and local_sf_xy_direction.
trained_model : sfp.model.LogGaussianDonut
Trained model whose responses we want to get.
voxel_label : str
The label for this voxel.
for_relative_plot : bool, optional
If True, will add a column giving the peak spatial frequency for this
voxel at each observed orientation and evaluate the model at 36
frequencies log-spaced from two decades below to two decades above the
peak (rather than the presented frequencies), at the four main
orientations.
extend_sf : bool, optional
If True, we instead generate predictions for local spatial frequencies
from .01 to 100 cpd (logspaced, 36 samples), for the four main angles.
Cannot be True if for_relative_plot is True.
Returns
-------
data : pd.DataFrame
DataFrame containing the above info
"""
data = {}
assert df.eccen.nunique() == 1 and df.angle.nunique() == 1, "_create_model_prediction_df must be called on the df with responses to a single voxel!"
sfs = df.drop_duplicates('stimulus_class')[['local_sf_magnitude',
'local_sf_xy_direction']]
sfs = torch.tensor(sfs.values)
prf_loc = torch.tensor(df[['eccen', 'angle']].values)
predictions = trained_model.evaluate(sfs[:, 0], sfs[:, 1], prf_loc[:, 0], prf_loc[:, 1])
predictions_norm = predictions.norm(2, -1, True)
if extend_sf:
if for_relative_plot:
raise Exception("At most one of for_relative_plot and extend_sf can be true, but both were true!")
# get the 4 main orientations
angles = np.linspace(0, 2*np.pi, 8, endpoint=False)
angles = df.query('freq_space_angle in @angles').drop_duplicates('freq_space_angle')
angles = angles.local_sf_xy_direction.values
n_samps = 36
freqs = []
for a in angles:
freqs.extend(np.logspace(-2, 2, n_samps))
sfs = torch.tensor([freqs, np.concatenate([n_samps*[a] for a in angles])]).transpose(0, 1)
data['local_sf_magnitude'] = sfs[:, 0].detach().numpy()
# we use the same norm as before, in order to make sure things line up correctly
predictions = trained_model.evaluate(sfs[:, 0], sfs[:, 1],
prf_loc[0, 0], prf_loc[0, 1])
elif for_relative_plot:
# get the 4 main orientations
angles = np.linspace(0, 2*np.pi, 8, endpoint=False)
angles = df.query('freq_space_angle in @angles').drop_duplicates('freq_space_angle')
angles = angles.local_sf_xy_direction.values
peak_sf = []
freqs = []
n_samps = 36
for a in angles:
peak_sf.append(trained_model.preferred_sf(a, prf_loc[0, 0], prf_loc[0, 1]).item())
freqs.extend(np.logspace(np.log10(peak_sf[-1]/100), np.log10(peak_sf[-1]*100), n_samps))
sfs = torch.tensor([freqs, np.concatenate([n_samps*[a] for a in angles])]).transpose(0, 1)
peak_sf = np.concatenate([n_samps*[p] for p in peak_sf])
data['peak_sf'] = peak_sf
data['local_sf_magnitude'] = sfs[:, 0].detach().numpy()
# we use the same norm as before, in order to make sure things line up correctly
predictions = trained_model.evaluate(sfs[:, 0], sfs[:, 1],
prf_loc[0, 0], prf_loc[0, 1])
else:
data['stimulus_class'] = np.arange(48)
data['model_predictions'] = (predictions / predictions_norm).detach().squeeze()
data['voxel'] = voxel_label
return pd.DataFrame(data)
def _remap_frequencies(df, freq_mag_col='local_sf_magnitude'):
"""Create plotting_sf column in df
for each voxel, our stimuli have several orientations. ideally, these
orientations would all have the exact same spatial frequency, but they
don't (the w_r/w_a parameters needed to be integers in order to avoid
obvious artifacts at polar angle 0). for plotting purposes, this is
confusing, so we map those values such that they are identical, and the
binning that gets done later on then makes more sense.
This adds a column, plotting_sf, which contains this info.
Parameters
----------
df : pd.DataFrame
first level DataFrame containing the amplitude responses for a single
subject and session. Must be the summary version (only has median across
bootstraps).
freq_mag_col : str, optional
Name of the column with the spatial frequencies to remap.
Returns
-------
df : pd.DataFrame
the dataframe with plotting_sf column added.
"""
canonical_freqs = [f for f in df.freq_space_distance.unique() if f == int(f)]
canonical_freq_mapper = {f: min(canonical_freqs, key=lambda x: abs(x-f))
for f in df.freq_space_distance.unique()}
freq_mapper = df.groupby(['voxel', 'freq_space_distance'])[freq_mag_col].median().to_dict()
df['plotting_sf'] = df.apply(lambda x: freq_mapper[x.voxel,
canonical_freq_mapper[x.freq_space_distance]],
axis=1)
return df
def _merge_model_response_df(df, model_predictions):
"""Merge dfs with model predictions and voxel responses.
Parameters
----------
df : pd.DataFrame
first level DataFrame containing the amplitude responses for a single
subject and session. Must be the summary version (only has median across
bootstraps).
model_predictions : pd.DataFrame
DataFrame containing the model predictions for each voxel in df.
Returns
-------
df : pd.Dataframe
The merged dataframe
"""
try:
df = df.merge(model_predictions, 'left', on=['voxel', 'stimulus_class'],
validate='1:1', )
df = df.rename(columns={'amplitude_estimate_median_normed':
'voxel_response'})
df = pd.melt(df, ['voxel', 'stimulus_class', 'eccen', 'freq_space_angle',
'local_sf_magnitude', 'plotting_sf'],
value_vars=['voxel_response', 'model_predictions'],
var_name='model', value_name='Response (a.u.)')
except KeyError:
# in this case, we're combining the relative ones, so model_predictions
# doesn't have a stimulus_class column (and they're evaluated at
# different frequencies)
df = df[['voxel', 'local_sf_magnitude', 'amplitude_estimate_median_normed',
'peak_sf', 'subject']]
df['model'] = 'voxel_response'
df = df.rename(columns={'amplitude_estimate_median_normed': 'Response (a.u.)'})
model_predictions = model_predictions.rename(columns={'model_predictions':
'Response (a.u.)'})
model_predictions['model'] = 'model_predictions'
df = pd.concat([df, model_predictions], sort=False)
return df
def _voxel_responses_and_predictions(*args, label='', n_bins=10, plot_type='reg', **kwargs):
"""Plot voxel responses and model predictions.
If label=voxel_response, we use sns.regplot (if plot_type=='reg', with
n_bins bins on the x axis) or sns.histplot (if plot_type='hist', logscaling
the x-axis). Else, we use sns.lineplot
"""
if label == 'voxel_response':
if plot_type == 'reg':
# there are 22 unique frequencies (freq_space_distance in the
# dataframe), but only 10 "real" ones, the others are just off by a
# little bit (because w_a/w_r needed to be whole numbers)
return sns.regplot(*args, x_bins=n_bins,
fit_reg=False, label=label,
scatter_kws={'s': 10}, **kwargs)
elif plot_type == 'hist':
to_return = sns.histplot(*args, label=label,
log_scale=(True, False),
# rasterize to decrease size
rasterized=True,
**kwargs)
# set xscale back to linear because apparently sns.histplot sets it
# for all axes, and we want the next facet to have linear xscale
# for when sns.lineplot is called
plt.xscale('linear')
return to_return
else:
return sns.lineplot(*args, label=label, **kwargs, zorder=10)
def example_voxels(df, trained_model, voxel_idx=[2310, 2957, 1651],
extend_sf=False, context='paper'):
"""Plot some example voxel data and their model fit.
For some voxels and a trained model, plot some comparisons between the
measured voxel responses and the model's predictions. Each voxel gets its
own column. Nothing is done here to choose the voxels, so that must be done
externally.
Parameters
----------
df : pd.DataFrame
first level DataFrame containing the amplitude responses for a single
subject and session. Must be the summary version (only has median across
bootstraps).
trained_model : sfp.model.LogGaussianDonut
Trained model whose responses we want to show.
voxel_idx : list, optional
List of voxel ids (i.e., values from the 'voxel' column of df) to show.
Should be selected somehow in order to make sure they're reasonably
nice. The default values are for sub-wlsubj001, ses-04, and are roughly
foveal, parafoveal, and peripheral, all reasonably well fit by the full
model. Regardless of how many are here, we'll have 3 columns per row.
extend_sf : bool, optional
If True, we instead generate predictions for local spatial frequencies
from .01 to 100 cpd (logspaced, 36 samples), for the four main angles.
context : {'paper', 'poster'}, optional
plotting context that's being used for this figure (as in seaborn's
set_context function). if poster, will scale things up (but only paper
has been tested)
Returns
-------
g : sns.FacetGrid
FacetGrid containing the plot
"""
params, fig_width = style.plotting_style(context, figsize='full')
plt.style.use(params)
ax_height = (fig_width / 4) / .75
df = df.query("voxel in @voxel_idx")
data = []
voxel = df.drop_duplicates('voxel')
eccen_order = voxel.sort_values('eccen').voxel.values
for i, v in enumerate(voxel_idx):
data.append(_create_model_prediction_df(df.query('voxel==@v'),
trained_model, v,
extend_sf=extend_sf))
data = | pd.concat(data) | pandas.concat |
# This code reads a results of WQ samplings from a lake at various time and locations by
# several researchers
# The code does the following steps#
# 1) reads the .csv files
# 2) calculates the mean and standard deviations of samples at taken a particular date (by all researcher)
# 3) fills the gap between dates to have a consistent data set from the beginning to the end of all samplings
# 4) plots a time series of the variables versus water level
# This code has been written by <NAME> in 6/1/2019
#----------------------------------------------------
# Import the Pandas and Matplotlib and other packages
import pandas as pd
import matplotlib.pyplot as plt
import scipy as sci
import numpy as np
from numpy import *
from scipy.stats import *
import seaborn as sns
# Determine the temporal range of your data
beginDate = '06-01-1977'
endDate = '25-08-2017'
idx = pd.date_range(beginDate, endDate)
# Define the name of authors who you are using their data as a unique list
r=['<NAME>,1986', '<NAME>,1995', 'Alipur,2006',
'Hafezieh, 2016','Asem et al.,2007','Karbasi et al.,2010', 'sima & Tajrishy, 2015', 'EAWB']
# Define the name of water quality parameters (header line of the .csv files)
Parameters_list=['water_level','TDS','Na','Mg','SO4','Cl','HCO3','K','Ca']
#read and merge dataframes of ionic composition and TDs
dicMean={}
dicStd={}
df={}
#create a blank dictionary for each parameter
for n, p in enumerate(Parameters_list):
globals()['Mean_%s'%p]={}
globals()['Std_%s'%p]={}
# Notice that the name of csv files is the first 6 letters of the reference (authors)
for i , dr in enumerate(r):
dfname=(dr[0:6])
# set the path of your .csv files
csv_FilesPath = r'C:\Users\somay\PycharmProjects\PyCodes\ULplots_PNAS\TDS_Ions_Timeseries\WQCSV_InputFiles\\'
name = dfname.strip() + '.csv'
df = pd.read_csv(csv_FilesPath + name, header=0, sep=',',
index_col=0, parse_dates=True,
infer_datetime_format=True, low_memory=False)
grouped_df = df.groupby('date')
df_mean = df.groupby(df.index.date).mean()
df_std = df.groupby(df.index.date).std()
dicMean[dfname] = df_mean.reindex(idx)
dicStd[dfname] = df_std.reindex(idx)
# Select desired columns and form a dictionary for each parameter
for n, p in enumerate(Parameters_list):
globals()['Mean_%s'%p][dr]=dicMean[dfname][p]
globals()['Std_%s'% p][dr] = dicStd[dfname][p]
# convert the dictionary of each parameter to a data frame named for example df_MeanTDS
#print(Meanwater_level)
for n, p in enumerate(Parameters_list):
# Selecting desired parameters (columns)
globals()['df_Mean%s'%p]=pd.DataFrame.from_dict(globals()['Mean_%s'%p])
globals()['df_Std%s' % p] = pd.DataFrame.from_dict(globals()['Std_%s' % p])
##Arbituary print of the TDS statistics
#print(df_MeanTDS.min(),df_MeanTDS.max(),df_MeanTDS.mean(),df_MeanTDS.std())
# Put together all TDS & Volume data as time series regardless of the authors
df_MeanTDS_allAuthours=df_MeanTDS.stack()
df_Meanlevel_allAuthours=df_Meanwater_level.stack()
# concat two series of Level and TDS and sets the columns name of the resulting dataframe
L_TDS_Timeseries=pd.concat([df_Meanlevel_allAuthours.rename('water level'),
df_MeanTDS_allAuthours.rename('TDS')], axis=1)
#Determine the x and y dataset
#x_data=water_level , y_data=TDS
x_data=array(df_Meanlevel_allAuthours)
y_data=array(df_MeanTDS_allAuthours)
#mask NaN data from x and y datasets
mask = ~np.isnan(x_data)&~np.isnan(y_data)
LTDS_df= | pd.DataFrame({"level":x_data[mask],"TDS":y_data[mask]}) | pandas.DataFrame |
"""
Created on Jun 11, 2013
@author: agross
"""
import pandas as pd
import numpy as np
def to_date(s):
"""
Pulls year, month, and day columns from clinical files and
formats into proper date-time field.
"""
try:
return pd.datetime(int(s['yearofformcompletion']),
int(s['monthofformcompletion']),
int(s['dayofformcompletion']))
except:
return np.nan
def fix_date(df):
"""
Translate date to date-time, get rid of old columns.
"""
try:
df['form_completion'] = df.apply(to_date, 1)
del df['yearofformcompletion']
del df['monthofformcompletion']
del df['dayofformcompletion']
except:
pass
return df
def try_float(s):
try:
return float(s)
except:
return np.nan
def format_drugs(br):
"""
Format drug rows in merged clinical file from Firehose.
The data consists of one or more drug entries for each patient.
Here we use a MultiIndex with Patient Barcode on level 0 and
the drug administration on level 1.
Input
br: clinical DataFrame with patient bar-codes on the columns
"""
drugs = br.select(lambda s: s.startswith('patient.drugs.drug'))
ft = pd.MultiIndex.from_tuples # long Pandas names
drug_annot = ft(map(lambda s: tuple(s.split('.')[2:4]), drugs.index))
drugs.index = drug_annot
c = drugs.count(1)
#dx: here we get rid of duplicates by taking the instance with the
# most fields (next 2 lines)
drugs = drugs.ix[c.ix[c.argsort()].index]
drugs = drugs.groupby(level=[0, 1], axis=0).last()
drugs = drugs.stack().unstack(level=1)
drugs.index = drugs.index.swaplevel(0, 1)
drugs = drugs.sort_index()
drugs = fix_date(drugs)
return drugs
def format_followup(br):
"""
Format follow-up rows in merged clinical file from Firehose.
The data consists of one or more followup entries for each patient.
Here we use a MultiIndex with Patient Barcode on level 0 and
the follow-up number on level 1.
Input
br: clinical DataFrame with patient bar-codes on the columns
"""
row_filter = lambda s: s.startswith('patient.followups.followup')
followup = br.select(row_filter)
if len(followup) == 0:
return
ft = pd.MultiIndex.from_tuples # long Pandas names
followup.index = ft([(s.split('.')[-1], '.'.join(s.split('.')[:-1]))
for s in followup.index])
followup = followup.stack().unstack(level=0)
followup.index = followup.index.reorder_levels([1, 0])
followup = followup.sort_index()
followup = fix_date(followup)
return followup
def format_stage(br):
row_filter = lambda s: s.startswith('patient.stageevent')
stage = br.select(row_filter)
stage = stage.dropna(how='all')
stage = stage.rename(index=lambda s: s.split('.')[-1])
stage = stage.T
return stage
def format_radiation(br):
"""
Format radiation rows in merged clinical file from Firehose.
The data consists of one or more entries for each patient.
Here we use a MultiIndex with Patient Barcode on level 0 and
the treatment number on level 1.
Input
br: clinical DataFrame with patient bar-codes on the columns
"""
row_filter = lambda s: s.startswith('patient.radiations.radiation')
followup = br.select(row_filter)
if len(followup) == 0:
return
ft = pd.MultiIndex.from_tuples
idx = ft([tuple(s.split('.')[2:4]) for s in followup.index])
followup.index = idx
followup = followup.stack().unstack(level=1)
followup.index = followup.index.reorder_levels([1, 0])
followup = followup.sort_index()
followup = fix_date(followup)
return followup
def format_clinical_var(br):
"""
Format clinical variables that are not associated with drug, follow-up,
or radiation.
Input
br: clinical DataFrame with patient bar-codes on the columns
"""
cl = [s for s in br.index if (s.count('.') == 1)
and s.startswith('patient')]
clinical = br.ix[cl]
clinical.index = clinical.index.map(lambda s: s.split('.')[1])
cl = [s for s in br.index if (s.count('.') == 2)
and s.startswith('patient.primarypathology')]
clinical2 = br.ix[cl]
clinical2.index = clinical2.index.map(lambda s: s.split('.')[2])
clinical = clinical.append(clinical2)
clinical = clinical.T.dropna(axis=1, how='all')
clinical['age'] = clinical.ageatinitialpathologicdiagnosis.astype(float)
del clinical['ageatinitialpathologicdiagnosis']
return clinical
def format_survival(clin, followup):
"""
Format survival for downstream analysis.
For survival analysis we need to track the time to death/censoring
as well as the censoring status (censored or deceased) for each patient.
We use a MultiIndex with Patient Barcode on level 0 and ['days','event']
on level 1, where days in the time variable and 'event' is the death
indicator. Here we extract the standard survival as well as event
free survival from the clinical information as well as the patient
followup.
Input
br: clinical DataFrame with patient bar-codes on the columns
Returns:
survival: DataFrame consisting of event_free_survival, and survival
Series
timeline: DataFrame of clinical variables related to patient cancer
timelines
"""
clin2 = clin.copy()
clin2.index = pd.MultiIndex.from_tuples([(i, 'surgery', 0) for i in clin2.index])
if type(followup) == pd.DataFrame:
f = followup.append(clin2)
else:
f = clin2
time_vars = ['daystodeath', 'daystolastfollowup', 'daystolastknownalive',
'daystonewtumoreventafterinitialtreatment', 'daystotumorprogression',
'daystotumorrecurrence']
time_cols = list(f.columns.intersection(time_vars))
timeline = f[time_cols].dropna(how='all').astype(float)
timeline['days'] = timeline.max(1)
timeline = timeline.groupby(level=0).max()
deceased = timeline.daystodeath.isnull() == False
#days = timeline.days[((timeline.days > 7) | (deceased == False))]
#days = days[days > 0]
days = timeline.days[timeline.days >= 0]
survival = pd.concat([days, deceased], keys=['days', 'event'], axis=1)
survival = survival.dropna().stack().astype(float)
pfs_var = 'daystonewtumoreventafterinitialtreatment'
if (followup is not None) and (pfs_var in followup):
new_tumor = followup[pfs_var].dropna().groupby(level=0).min()
time_to_progression = pd.concat([new_tumor, timeline.days], 1).min(1)
time_to_progression = time_to_progression[time_to_progression > 7]
progression = (deceased | pd.Series(1, index=new_tumor.index))
pfs = pd.concat([time_to_progression, progression], keys=['days', 'event'],
axis=1)
pfs = pfs.dropna().stack().astype(float)
else:
pfs = survival
survival = pd.concat([survival, pfs], keys=['survival', 'event_free_survival'],
axis=1)
return survival, timeline
def get_clinical(cancer, data_path, patients=None, **params):
"""
Reads in and formats clinical data for a given tumor type.
Returns
clin: clinical variables
drugs: drugs administered, Dataframe with with
(patient, treatment_id) on index
followup: patient followups, Dataframe with with
(patient, follwup_id) on index
timeline: patient cancer timeline variables
survival: DataFrame consisting of event_free_survival, and
survival with (patient, ['days','event']) on index.
"""
f = '{}stddata/{}/Clinical/{}.clin.merged.txt'.format(data_path, cancer,
cancer)
tab = | pd.read_table(f, index_col=0, low_memory=False) | pandas.read_table |
import plotly.express as px
import pandas as pd
import datetime as dt
from utils.gurobi_model import GRBModel
# from configs import output_file_name, days, move_hours, switch_hours
from configs import (output_file_name,
start_year, start_month, start_day,
days,
st_from, st_to,
mt_from, mt_to)
class Writer:
def __init__(self): # получение выходных данных
self.grbm = GRBModel()
self.eps, self.b, self.c, self.ksi = self.get_results()
self.processed_suborders = self.get_processed_suborders()
self.suborders_of_orders = self.get_suborders_of_orders()
self.done_orders_info = self.transform_results()
def get_results(self): # получение результатов
self.grbm.optimize_model()
return self.grbm.eps, self.grbm.b, self.grbm.c, self.grbm.ksi
def get_processed_suborders(self): # список обработанных подзаказов
return [suborder for (suborder, equip) in self.eps.keys() if self.eps[(suborder, equip)].X > 0]
def get_suborders_of_orders(self): # словарь заказ-->обработанные полуфабрикаты
suborders_of_orders = {}
for order, info in self.grbm.orders.items():
suborders_of_orders[order] = []
for final_suborder in info.keys():
if self.ksi[final_suborder].X > 0 and final_suborder in self.processed_suborders:
suborders_of_orders[order].append(final_suborder)
for order, suborders in suborders_of_orders.items():
if suborders == list(self.grbm.orders[order].keys()):
for subord in suborders:
for proc_subord in self.processed_suborders:
if proc_subord not in suborders and proc_subord not in self.grbm.final_subord_id and \
subord in self.grbm.order_graph[proc_subord]: suborders.append(proc_subord)
return suborders_of_orders
def transform_results(self): # преобразование результатов
list_of_dicts = []
for (subord, equip) in self.eps.keys():
for order, suborders in self.suborders_of_orders.items():
if len(suborders) > 4 and self.eps[(subord, equip)].X > 0 and subord in suborders:
list_of_dicts.append({'Equipment_ID': str(equip), 'start': self.b[subord].X, 'end': self.c[subord].X,
'Order_ID': str(order), 'Suborder_ID': str(subord)})
return pd.DataFrame(list_of_dicts)
def create_gantt_chart(self): # построение диаграммы Ганта (+выходной файл excel)
self.done_orders_info['Start'] = dt.datetime(start_year, start_month, start_day) + | pd.TimedeltaIndex(self.done_orders_info['start'], unit='m') | pandas.TimedeltaIndex |
import pandas as pd
import datetime
import sasoptpy as so
from swat import CAS
from collections import namedtuple
import os
supplier = 'R2R'
def prep_data(car_type='diesel'):
# Data in this repository is randomly populated, original data is provided by Rome2Rio.com
travel_data = | pd.read_csv('../data/all_methods_random.csv') | pandas.read_csv |
import filecmp
import pandas as pd
def merge_col(filepath1: str, filepath2: str) -> pd.DataFrame:
df1 = pd.read_table(filepath1, header=None)
df2 = | pd.read_table(filepath2, header=None) | pandas.read_table |
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QWidget, QTableWidget, QTableWidgetItem, QVBoxLayout
from PyQt5 import QtGui
import sys
import pandas as pd
import ast
import random
true_false_df = pd.read_excel('true_false.xlsx')
true_false_df = true_false_df.reset_index(drop = True)
w = 0
class true_or_false_window(object):
def exporting(self):
true_false_df.to_excel(r'true_false_edited.xlsx', index = False)
def previous_question(self):
try:
global w
global true_false_df
w = w - 1
b = true_false_df[0][w]
c = true_false_df[1][w]
self.textBrowser.setText(b.capitalize())
self.texteditor.setText(c)
options = self.texteditor.toPlainText()
print(options)
options = self.texteditor.toPlainText()
length = len(true_false_df)
print(w, length)
if w < 0:
self.textBrowser.setText('No previous Questions')
self.exporting()
except:
self.textBrowser.setText('No previous Questions')
self.texteditor.setText('No previous Questions')
self.exporting()
def different_sentence(self):
try:
global w
global true_false_df
i = 0
for i in range(len(true_false_df)):
if true_false_df[0][w] == true_false_df[0][w + 1]:
true_false_df.drop(true_false_df.index[w], inplace=True)
true_false_df = true_false_df.reset_index(drop = True)
else:
true_false_df.drop(true_false_df.index[w], inplace=True)
true_false_df = true_false_df.reset_index(drop = True)
break
i = i + 1
b = true_false_df[0][w]
c = true_false_df[1][w]
self.textBrowser.setText(b.capitalize())
self.texteditor.setText(c)
options = self.texteditor.toPlainText()
print(options)
options = self.texteditor.toPlainText()
length = len(true_false_df)
print(a, length)
if w > length:
self.textBrowser.setText('End of List! No More Questions')
self.exporting()
except:
self.textBrowser.setText('End of List! No More Questions')
self.exporting()
def saving(self):
try:
global w
global true_false_df
w = w + 1
b = true_false_df[0][w]
c = true_false_df[1][w]
self.textBrowser.setText(b.capitalize())
self.texteditor.setText(c)
options = self.texteditor.toPlainText()
print(options)
options = self.texteditor.toPlainText()
length = len(true_false_df)
print(w, length)
if w > length:
self.textBrowser.setText('End of List! No More Questions')
self.exporting()
except:
self.textBrowser.setText('End of List! No More Questions')
self.exporting()
def deleteit1(self):
try:
global w
global true_false_df
length = len(true_false_df)
print(a, length)
true_false_df.drop(true_false_df.index[w], inplace=True)
true_false_df = true_false_df.reset_index(drop = True)
b = true_false_df[0][w]
c = true_false_df[1][w]
self.textBrowser.setText(b.capitalize())
self.texteditor.setText(c)
options = self.texteditor.toPlainText()
print(options)
if w > length:
self.textBrowser.setText('End of List! No More Questions')
self.exporting()
except:
self.textBrowser.setText('End of List! No More Questions')
self.exporting()
def setupUi(self, MainWindow):
global w
global true_false_df
MainWindow.setObjectName("MainWindow")
MainWindow.setGeometry(600,200,800,500)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.textlabel1 = QtWidgets.QLabel(self.centralwidget)
self.textlabel1.setGeometry(QtCore.QRect(50, 25 , 500, 25))
self.textlabel1.setObjectName("textlabel1")
self.textlabel1.setText("TRUE SENTENCE")
self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser.setGeometry(QtCore.QRect(50, 50, 700, 100))
self.textBrowser.setStyleSheet("background:rgb(239,239,239);")
self.textBrowser.setObjectName("textBrowser")
self.textBrowser.setText(true_false_df[0][w])
self.textlabel = QtWidgets.QLabel(self.centralwidget)
self.textlabel.setGeometry(QtCore.QRect(50, 175, 500, 25))
self.textlabel.setObjectName("textlabel")
self.textlabel.setText("FALSE SENTENCE(Editable)")
self.texteditor = QtWidgets.QTextEdit(self.centralwidget)
self.texteditor.setGeometry(QtCore.QRect(50, 200, 700, 100))
self.texteditor.setObjectName("texteditor")
self.texteditor.setText(true_false_df[1][w])
self.delete_2 = QtWidgets.QPushButton(self.centralwidget)
self.delete_2.setGeometry(QtCore.QRect(50, 325, 250, 35))
self.delete_2.setObjectName("delete_2")
self.delete_2.clicked.connect(self.deleteit1)
self.save = QtWidgets.QPushButton(self.centralwidget)
self.save.setGeometry(QtCore.QRect(325, 325, 250, 35))
self.save.setObjectName("save")
self.save.clicked.connect(self.saving)
self.next_sentence = QtWidgets.QPushButton(self.centralwidget)
self.next_sentence.setGeometry(QtCore.QRect(325, 400, 250, 35))
self.next_sentence.setObjectName("next_sentence")
self.next_sentence.clicked.connect(self.different_sentence)
self.back = QtWidgets.QPushButton(self.centralwidget)
self.back.setGeometry(QtCore.QRect(50, 400, 250, 35))
self.back.setObjectName("save")
self.back.clicked.connect(self.previous_question)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# self.cont = False
# i = 0
# while(self.cont is not True):
# self.textBrowser.setText(a)
# print( "Waiting for user to push button 2")
# QtCore.QCoreApplication.processEvents()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "True or False Teacher"))
self.delete_2.setText(_translate("MainWindow", "DELETE AND NEXT SENTENCE"))
self.save.setText(_translate("MainWindow", "SAVE AND NEXT"))
self.back.setText(_translate("MainWindow", "PREVIOUS SENTENCE"))
self.next_sentence.setText(_translate("MainWindow", "DELETE AND NEXT QUESTION"))
mcq_question_options = pd.read_excel('mcq_.xlsx')
mcq_question_options = mcq_question_options.reset_index(drop = True)
a = 0
class mcq_window(object):
def exporting(self):
mcq_question_options.to_excel(r'mcq_edited.xlsx', index = False)
mcq_question_options.to_excel(r'mtc_edited.xlsx', index = False)
def previous_question(self):
try:
global a
global true_false_df
a = a - 1
b = mcq_question_options['Fill and Column'][a]
c = mcq_question_options['Keywords'][a]
x = mcq_question_options['Options'][a]
self.textBrowser.setText("\n\n"+b.capitalize() +"\n\nAnswer -" +c.capitalize())
if type(x) is list:
pass
else:
x = ast.literal_eval(x)
options1 = ""
for i in x:
options1 = options1 + i + " "
self.texteditor.setText(options1)
options = self.texteditor.toPlainText()
print(options)
options = self.texteditor.toPlainText()
options = self.texteditor.toPlainText()
length = len(mcq_question_options)
print(a, length)
if a < 0:
self.textBrowser.setText('No previous Questions')
self.exporting()
except:
self.textBrowser.setText('No previous Questions')
self.texteditor.setText('No previous Questions')
self.exporting()
def saving(self):
try:
global a
global mcq_question_options
a = a + 1
b = mcq_question_options['Fill and Column'][a]
c = mcq_question_options['Keywords'][a]
x = mcq_question_options['Options'][a]
self.textBrowser.setText("\n\n"+b.capitalize() +"\n\nAnswer -" +c.capitalize())
if type(x) is list:
pass
else:
x = ast.literal_eval(x)
options1 = ""
for i in x:
options1 = options1 + i + " "
self.texteditor.setText(options1)
options = self.texteditor.toPlainText()
print(options)
options = self.texteditor.toPlainText()
print(options)
options = options.split(" ")
options = options[:-1]
mcq_question_options['Options'][a] = options
length = len(mcq_question_options)
print(a, length)
if a > length:
self.textBrowser.setText('End of List! No More Questions')
self.exporting()
except Exception as ex:
print(mcq_question_options['Options'][a])
self.textBrowser.setText('End of List! No More Questions' + str(ex))
self.exporting()
def deleteit1(self):
try:
global a
global mcq_question_options
mcq_question_options.drop(mcq_question_options.index[a], inplace=True)
mcq_question_options = mcq_question_options.reset_index(drop = True)
b = mcq_question_options['Fill and Column'][a]
c = mcq_question_options['Keywords'][a]
x = mcq_question_options['Options'][a]
self.textBrowser.setText("\n\n"+b.capitalize() +"\n\nAnswer -" +c.capitalize())
x = ast.literal_eval(x)
options1 = ""
for i in x:
options1 = options1 + i + ' '
self.texteditor.setText(options1)
options = self.texteditor.toPlainText()
print(options)
length = len(mcq_question_options)
print(a, length)
if a > length:
self.textBrowser.setText('End of List! No More Questions')
self.exporting()
except:
self.textBrowser.setText('End of List! No More Questions')
self.exporting()
def setupUi(self, MainWindow):
global a
global mcq_question_options
b = mcq_question_options['Fill and Column'][a]
c = mcq_question_options['Keywords'][a]
x = mcq_question_options['Options'][a]
x = ast.literal_eval(x)
options1 = ""
for i in x:
options1 = options1 + i + ' '
MainWindow.setObjectName("MainWindow")
MainWindow.setGeometry(600,200,800,500)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.textlabel1 = QtWidgets.QLabel(self.centralwidget)
self.textlabel1.setGeometry(QtCore.QRect(50, 25 , 500, 25))
self.textlabel1.setObjectName("textlabel1")
self.textlabel1.setText("Questions")
self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser.setGeometry(QtCore.QRect(50, 50, 700, 150))
self.textBrowser.setStyleSheet("background:rgb(239,239,239);")
self.textBrowser.setObjectName("textBrowser")
self.textBrowser.setText("\n\n"+b.capitalize() +"\n\nAnswer -" +c.capitalize())
self.textlabel = QtWidgets.QLabel(self.centralwidget)
self.textlabel.setGeometry(QtCore.QRect(50, 225, 500, 25))
self.textlabel.setObjectName("textlabel")
self.textlabel.setText("EDIT or ADD options below.Seperate by space")
self.texteditor = QtWidgets.QTextEdit(self.centralwidget)
self.texteditor.setGeometry(QtCore.QRect(50, 250, 700, 30))
self.texteditor.setObjectName("texteditor")
self.texteditor.setText(options1)
self.delete_2 = QtWidgets.QPushButton(self.centralwidget)
self.delete_2.setGeometry(QtCore.QRect(50, 375, 250, 35))
self.delete_2.setObjectName("delete_2")
self.delete_2.clicked.connect(self.deleteit1)
self.back = QtWidgets.QPushButton(self.centralwidget)
self.back.setGeometry(QtCore.QRect(50, 325, 250, 35))
self.back.setObjectName("back")
self.back.clicked.connect(self.previous_question)
self.save = QtWidgets.QPushButton(self.centralwidget)
self.save.setGeometry(QtCore.QRect(325, 325, 250, 35))
self.save.setObjectName("save")
self.save.clicked.connect(self.saving)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# self.cont = False
# i = 0
# while(self.cont is not True):
# self.textBrowser.setText(a)
# print( "Waiting for user to push button 2")
# QtCore.QCoreApplication.processEvents()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MCQ Teacher"))
self.delete_2.setText(_translate("MainWindow", "DELETE QUESTION"))
self.back.setText(_translate("MainWindow", "PREVIOUS QUESTION"))
self.save.setText(_translate("MainWindow", "SAVE and NEXT"))
class question_type(object):
def open_mcq_window(self):
self.window = QtWidgets.QMainWindow()
self.ui =mcq_window()
self.ui.setupUi(self.window)
self.window.show()
def true_or_false_window(self):
self.window = QtWidgets.QMainWindow()
self.ui = true_or_false_window()
self.ui.setupUi(self.window)
self.window.show()
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setGeometry(600,200,800,500)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.textlabel = QtWidgets.QLabel(self.centralwidget)
self.textlabel.setGeometry(QtCore.QRect(325, 100, 500, 50))
self.textlabel.setObjectName("textlabel")
self.textlabel.setText("Choose Question Type")
self.true_or_false = QtWidgets.QPushButton(self.centralwidget)
self.true_or_false.setGeometry(QtCore.QRect(225, 200, 150, 100))
self.true_or_false.setObjectName("true_or_false")
self.true_or_false.clicked.connect(self.true_or_false_window)
self.mcq = QtWidgets.QPushButton(self.centralwidget)
self.mcq.setGeometry(QtCore.QRect(425, 200, 150, 100))
self.mcq.setObjectName("mcq")
self.mcq.clicked.connect(self.open_mcq_window)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# self.cont = False
# i = 0
# while(self.cont is not True):
# self.textBrowser.setText(a)
# print( "Waiting for user to push button 2")
# QtCore.QCoreApplication.processEvents()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Question Type"))
self.mcq.setText(_translate("MainWindow", "MCQ"))
self.true_or_false.setText(_translate("MainWindow", "TREUE FALSE"))
mcq_answer = pd.read_excel('mcq_edited.xlsx')
mcq_answer = mcq_answer.reset_index(drop = True)
m = 0
mtc_answer = pd.read_excel('mtc_edited.xlsx')
mtc_answer = mtc_answer.reset_index(drop = True)
y = 0
true_false_answers = pd.read_excel('true_false_edited.xlsx')
true_false_answers = true_false_answers.reset_index(drop = True)
z = 0
class student_question_type(object):
def open_mcq_window(self):
self.window = QtWidgets.QMainWindow()
self.ui =student_mcq_window()
self.ui.setupUi(self.window)
global mcq_answer
for z in range(len(mcq_answer)):
options = mcq_answer['Options'][z]
if type(options) is list:
pass
else:
options = ast.literal_eval(options)
keyword = mcq_answer['Keywords'][z]
options.append(keyword)
random.shuffle(options)
mcq_answer['Options'][z] = options
print(mcq_answer)
mcq_answer = pd.concat([mcq_answer[:1],mcq_answer[1:].sample(frac=1)]).reset_index(drop=True)
print(mcq_answer)
self.window.show()
def open_mct_window(self):
self.window = QtWidgets.QMainWindow()
self.ui = student_match_the_column_window()
self.ui.setupUi(self.window)
global mtc_answer
print(mtc_answer)
mtc_answer = pd.concat([mtc_answer[:4],mtc_answer[4:].sample(frac=1)]).reset_index(drop=True)
print(mtc_answer)
self.window.show()
def true_or_false_window(self):
self.window = QtWidgets.QMainWindow()
self.ui = student_true_or_false_window()
self.ui.setupUi(self.window)
global true_false_answers
print(true_false_answers)
true_df = pd.DataFrame()
true_df['Questions'] = true_false_answers[0]
true_df['Answers'] = 'True'
true_df['True_Sentence'] = true_df['Questions']
false_df = | pd.DataFrame() | pandas.DataFrame |
import unittest
import pandas as pd
import numpy as np
from pandas.util.testing import assert_frame_equal
from pdblp import pdblp
import os
IP_PORT = 8194
class TestBCon(unittest.TestCase):
def setUp(self):
self.con = pdblp.BCon(port=IP_PORT, timeout=5000)
self.con.start()
cdir = os.path.dirname(__file__)
self.path = os.path.join(cdir, 'data/')
def tearDown(self):
pass
def pivot_and_assert(self, df, df_exp, with_date=False):
# as shown below, since the raw data returned from bbg is an array
# with unknown ordering, there is no guruantee that the `position` will
# always be the same so pivoting prior to comparison is necessary
#
# fieldData = {
# INDX_MWEIGHT[] = {
# INDX_MWEIGHT = {
# Member Ticker and Exchange Code = "BON8"
# Percentage Weight = 2.410000
# }
# INDX_MWEIGHT = {
# Member Ticker and Exchange Code = "C N8"
# Percentage Weight = 6.560000
# }
# INDX_MWEIGHT = {
# Member Ticker and Exchange Code = "CLN8"
# Percentage Weight = 7.620000
# }
# }
# }
name_cols = list(df_exp.name.unique())
sort_cols = list(df_exp.name.unique())
index_cols = ["name", "position", "field", "ticker"]
if with_date:
sort_cols.append("date")
index_cols.append("date")
df = (df.set_index(index_cols).loc[:, "value"]
.unstack(level=0).reset_index().drop(columns="position")
.sort_values(by=sort_cols, axis=0))
df_exp = (df_exp.set_index(index_cols).loc[:, "value"]
.unstack(level=0).reset_index().drop(columns="position")
.sort_values(by=sort_cols, axis=0))
# deal with mixed types resulting in str from csv read
for name in name_cols:
try:
df_exp.loc[:, name] = df_exp.loc[:, name].astype(float)
except ValueError:
pass
for name in name_cols:
try:
df.loc[:, name] = df.loc[:, name].astype(float)
except ValueError:
pass
if with_date:
df.loc[:, "date"] = pd.to_datetime(df.loc[:, "date"],
format="%Y%m%d")
df_exp.loc[:, "date"] = pd.to_datetime(df_exp.loc[:, "date"],
format="%Y%m%d")
assert_frame_equal(df, df_exp)
def test_bdh_one_ticker_one_field_pivoted(self):
df = self.con.bdh('SPY US Equity', 'PX_LAST', '20150629', '20150630')
midx = pd.MultiIndex(levels=[["SPY US Equity"], ["PX_LAST"]],
labels=[[0], [0]], names=["ticker", "field"])
df_expect = pd.DataFrame(
index=pd.date_range("2015-06-29", "2015-06-30"),
columns=midx,
data=[205.42, 205.85]
)
df_expect.index.names = ["date"]
assert_frame_equal(df, df_expect)
def test_bdh_one_ticker_one_field_longdata(self):
df = self.con.bdh('SPY US Equity', 'PX_LAST', '20150629', '20150630',
longdata=True)
idx = pd.Index(["date", "ticker", "field", "value"])
data = [["2015-06-29", "2015-06-30"],
["SPY US Equity", "SPY US Equity"], ["PX_LAST", "PX_LAST"],
[205.42, 205.85]]
df_expect = pd.DataFrame(data=data, index=idx).transpose()
df_expect.loc[:, "date"] = pd.to_datetime(df_expect.loc[:, "date"])
df_expect.loc[:, "value"] = np.float64(df_expect.loc[:, "value"])
assert_frame_equal(df, df_expect)
def test_bdh_one_ticker_two_field_pivoted(self):
cols = ['PX_LAST', 'VOLUME']
df = self.con.bdh('SPY US Equity', cols, '20150629', '20150630')
midx = pd.MultiIndex(
levels=[["SPY US Equity"], cols],
labels=[[0, 0], [0, 1]], names=["ticker", "field"]
)
df_expect = pd.DataFrame(
index=pd.date_range("2015-06-29", "2015-06-30"),
columns=midx,
data=[[205.42, 202621332], [205.85, 182925106]]
)
df_expect = df_expect.astype(np.float64)
df_expect.index.names = ["date"]
assert_frame_equal(df, df_expect)
def test_bdh_one_ticker_two_field_longdata(self):
cols = ['PX_LAST', 'VOLUME']
df = self.con.bdh('SPY US Equity', cols, '20150629', '20150630',
longdata=True)
idx = pd.Index(["date", "ticker", "field", "value"])
data = [["2015-06-29", "2015-06-29", "2015-06-30", "2015-06-30"],
["SPY US Equity", "SPY US Equity", "SPY US Equity", "SPY US Equity"], # NOQA
["PX_LAST", "VOLUME", "PX_LAST", "VOLUME"],
[205.42, 202621332, 205.85, 182925106]]
df_expect = | pd.DataFrame(data=data, index=idx) | pandas.DataFrame |
# %%
import pandas as pd
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
# %%
# 메뉴 데이터를 불러옵니다.
data = pd.read_csv("All Menu (Various Versions)/국방부메뉴_v2.1.csv", index_col=0)
data
# %%
# 요청해야하는 URL주소를 가져옵니다 (네이버).
urls = []
for name in data['메뉴이름']:
url = 'https://search.naver.com/search.naver?sm=tab_hty.top&where=nexearch&query=' + str(name)
urls.append(url)
urls
# %%
# 병렬로 10개씩 URL주소를 요청합니다.
from concurrent.futures import ThreadPoolExecutor
def get_url(url):
return requests.get(url)
with ThreadPoolExecutor(max_workers=10) as pool:
response_list = list(pool.map(get_url,urls))
# %%
# 모든 요청이 정상적으로 실행되었는지 확인합니다.
count = 0
for response in response_list:
if response.status_code != 200: count += 1
count
# %%
# 각 요청(메뉴)마다 메인에 있는 섹션의 개수를 계산합니다.
def get_number_of_sections(response_content: str) -> int:
soup = BeautifulSoup(response_content, 'lxml')
main = soup.find("div", {"class": "main_pack"})
sections = main.find_all("section")
return len(sections)
# %%
# 모든 요청의 섹션 개수를 가져오고 저장합니다.
section_num = []
for response in response_list:
num = get_number_of_sections(response.content)
section_num.append(num)
section_num
# %%
# 섹션의 개수가 기준 맛 점수가 됩니다.
data['맛'] = section_num
data
# %%
# 맛 점수를 포함하여 데이터를 저장합니다.
data.to_csv("Second Model Data/국방부메뉴_v2.1_맛_점수_포함.csv")
# %%
# 모든 부대를 불러옵니다.
unitNumbers = [1691, 2171, 3296, 3389, 5021, 5322, 6176, 6282, 6335, 7369, 7652, 8623, 8902, 9030]
unitNumbers
# %%
def clean_data(unit: str) -> pd.DataFrame:
# 데이터를 불러오고 필요한 열만 남깁니다
data = pd.read_csv("Processed CSV Data Files(Attached Allergy)/제"+ unit +"부대 메뉴정보.csv", index_col=0)
data = data[data['메뉴이름'].notna()]
data = data.iloc[:, 1:]
data = data.drop(columns=data.columns[2:22])
data = data.reset_index(drop=True)
# 정규 표현식을 통한 한글 외 문자 제거
data['메뉴이름'] = data['메뉴이름'].str.replace("[^ㄱ-ㅎㅏ-ㅣ가-힣 ]","")
return data
# %%
def get_url(url):
return requests.get(url)
def get_responses(urls: list) -> list:
# 병렬로 10개씩 URL주소를 요청합니다.
with ThreadPoolExecutor(max_workers=10) as pool:
response_list = list(pool.map(get_url,urls))
# 모든 요청이 정상적으로 실행되었는지 확인합니다.
for response in response_list:
if response.status_code != 200:
print("문제 있음!")
break
return response_list
# %%
def get_scores_of_meals(response_list: list) -> list:
# 모든 요청의 섹션 개수를 가져오고 저장합니다.
section_num = []
for response in response_list:
num = get_number_of_sections(response.content)
section_num.append(num)
return section_num
# %%
def generate_combination_score(unit: str) -> pd.DataFrame:
data = clean_data(unit)
# 식사명과 날짜에 맞추어 정렬합니다.
sorted_data = data.sort_values(['식사명', '날짜'])
sorted_data = pd.DataFrame(sorted_data)
# 각 식사명과 날짜마다 몇개의 메뉴가 있는지 계산합니다.
number_of_menus = sorted_data.groupby(['식사명', '날짜']).count()['메뉴이름']
number_of_menus = number_of_menus.to_list()
# 식사명과 날짜로 군집화하고 한 끼니의 모든 메뉴를 한 값으로 저장합니다.
meals = data.groupby(['식사명', '날짜'])['메뉴이름'].apply(lambda x: "%s" % ' '.join(x))
meals = pd.DataFrame(meals)
print(unit + ": 군집화 완료")
# 요청해야하는 URL주소를 가져옵니다 (네이버).
urls = []
for meal in meals['메뉴이름']:
url = 'https://search.naver.com/search.naver?sm=tab_hty.top&where=nexearch&query=' + str(meal)
urls.append(url)
response_list = get_responses(urls)
section_num = get_scores_of_meals(response_list)
print(unit + ": 페이지 요청 완료")
# 한 끼니 종합점수를 새로운 열로 추가합니다.
meals['종합점수'] = section_num
meals = meals.reset_index()
meals.rename(columns = {"메뉴이름": "한끼니"}, inplace = True)
# 각 메뉴마다 종합점수를 추가합니다.
menu_with_score = pd.merge(sorted_data, meals, on=['식사명', '날짜'])
# 각 메뉴마다 다른 메뉴와의 조합점수를 계산합니다.
combination_score = | pd.DataFrame(columns=['메뉴이름', '다른메뉴', '조합점수']) | pandas.DataFrame |
import sys
from transformers.modeling_openai import OpenAIGPTLMHeadAgenModel
import numpy as np
from transformers import *
import torch
from torch.utils.data import DataLoader
from generate_ivp import sample_sequence_ivp
import pandas as pd
from utils import *
from utils_g import *
from utils_ivp import agen_vector
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from examples.run_generation import *
ps = [0.4]
BETA = 5
cats = {
'pos': 0,
'equal': 1,
'neg': 2
}
def gen_p(model, test_dataset, descat, cat_head, cats):
outlist = []
outp = []
sens = []
for i in ps:
for j in range(len(test_dataset)):
sen = test_dataset[j]
cat = '<' + cats[j] + '>'
sen.extend(tokenizer_g.encode(cat))
sen.append(tokenizer_g.bos_token_id)
senlen = len(sen)
e = torch.FloatTensor(descat[j]).to(device_g)
# e [1,3]
out = sample_sequence(
model=model,
context=sen,
length=max_sen_len,
top_p=i,
e=e,
cat_head=cat_head,
beta=BETA,
device=device_g
)
out = out[0, senlen:].tolist()
text = tokenizer_g.decode(out, clean_up_tokenization_spaces=True, skip_special_tokens=False)
sen = tokenizer_g.decode(sen, clean_up_tokenization_spaces=True, skip_special_tokens=False)
end_ind = text.find('<')
if end_ind >= 0:
text = text[0: end_ind]
sens.append(sen)
outlist.append(text)
outp.append(i)
return outlist, outp, sens
def add_cat(orisen, dataset):
resds = []
descat = []
orids = []
es = np.zeros((1, 3))
es[0][0] = 1
descat = ['pos'] * len(dataset)
es = np.repeat(es, len(dataset), axis=0)
eq = np.zeros((1, 3))
eq[0][1] = 1
tem = ['equal'] * len(dataset)
descat.extend(tem)
eqs = np.repeat(eq, len(dataset), axis=0)
es = np.append(es, eqs, axis=0)
en = np.zeros((1, 3))
en[0][2] = 1
tem = ['neg'] * len(dataset)
descat.extend(tem)
ens = np.repeat(en, len(dataset), axis=0)
es = np.append(es, ens, axis=0)
for i in range(3):
orids.extend(orisen)
resds.extend(dataset)
return orids, resds, es, descat
def gen_roc(model, cat_head):
orisen, test_dataset = process_in_g(ROC_DEV_G, train=False)
orids, test_dataset, es, descat = add_cat(orisen, test_dataset)
outlist, outp, sens = gen_p(model, test_dataset, es, cat_head, descat)
df = pd.DataFrame()
test_dataset = | pd.Series(data=test_dataset) | pandas.Series |
import re
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers import (
CategoricalTransformer, LabelEncodingTransformer, OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test__get_intervals(self):
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert result[0] == expected_intervals
def test_fit(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = np.array(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer.fit(data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert transformer.intervals == expected_intervals
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.fuzzy = False
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.25
@patch('scipy.stats.norm.rvs')
def test__get_value_fuzzy(self, rvs_mock):
# setup
rvs_mock.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test_reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = np.array(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer.fit(data)
result = transformer.reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows
Output:
- the output of `_transform_by_category`
Side effects:
- `_transform_by_category` will be called once
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer.transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_row_called(self):
"""Test that the `_transform_by_row` method is called.
When the number of rows is less than or equal to the number of categories,
expect that the `_transform_by_row` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 4 rows
Output:
- the output of `_transform_by_row`
Side effects:
- `_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer.transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_row.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_row.return_value
def test__transform_by_row(self):
"""Test the `_transform_by_row` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 4 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_row(data)
# Asserts
expected = np.array([0.875, 0.625, 0.375, 0.125])
assert (transformed == expected).all()
@patch('psutil.virtual_memory')
def test__reverse_transfrom_by_matrix_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_matrix` method is called.
When there is enough virtual memory, expect that the
`_reverse_transform_by_matrix` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_matrix`
Side effects:
- `_reverse_transform_by_matrix` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_matrix.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_matrix.return_value
@patch('psutil.virtual_memory')
def test__reverse_transfrom_by_matrix(self, psutil_mock):
"""Test the _reverse_transform_by_matrix method with numerical data
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories and means. Also patch
the `psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform_by_matrix(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_category` method is called.
When there is not enough virtual memory and the number of rows is greater than the
number of categories, expect that the `_reverse_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 5 rows
Output:
- the output of `_reverse_transform_by_category`
Side effects:
- `_reverse_transform_by_category` will be called once
"""
# Setup
transform_data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = transform_data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(
categorical_transformer_mock, transform_data)
# Asserts
categorical_transformer_mock._reverse_transform_by_category.assert_called_once_with(
transform_data)
assert reverse == categorical_transformer_mock._reverse_transform_by_category.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category(self, psutil_mock):
"""Test the _reverse_transform_by_category method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 5 rows
Ouptut:
- the original data
"""
data = pd.Series([1, 3, 3, 2, 1])
transformed = pd.Series([0.875, 0.375, 0.375, 0.625, 0.875])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
reverse = transformer._reverse_transform_by_category(transformed)
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_row` method is called.
When there is not enough virtual memory and the number of rows is less than or equal
to the number of categories, expect that the `_reverse_transform_by_row` method
is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_row`
Side effects:
- `_reverse_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock.starts = pd.DataFrame(
[0., 0.25, 0.5, 0.75], index=[4, 3, 2, 1], columns=['category'])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_row.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_row.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row(self, psutil_mock):
"""Test the _reverse_transform_by_row method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means, starts,
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.starts = pd.DataFrame(
[4, 3, 2, 1], index=[0., 0.25, 0.5, 0.75], columns=['category'])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer.reverse_transform(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
class TestOneHotEncodingTransformer:
def test__prepare_data_empty_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[], [], []]
# Assert
with pytest.raises(ValueError):
ohet._prepare_data(data)
def test__prepare_data_nested_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[[]]]
# Assert
with pytest.raises(ValueError):
ohet._prepare_data(data)
def test__prepare_data_list_of_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = [['a'], ['b'], ['c']]
out = ohet._prepare_data(data)
# Assert
expected = np.array(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test__prepare_data_pandas_series(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
out = ohet._prepare_data(data)
# Assert
expected = pd.Series(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test_fit_no_nans(self):
"""Test the ``fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be activated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b', 'c'])
np.testing.assert_array_equal(ohet.decoder, ['a', 'b', 'c'])
assert ohet.dummy_encoded
assert not ohet.dummy_na
def test_fit_no_nans_numeric(self):
"""Test the ``fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, 3])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2, 3])
np.testing.assert_array_equal(ohet.decoder, [1, 2, 3])
assert not ohet.dummy_encoded
assert not ohet.dummy_na
def test_fit_nans(self):
"""Test the ``fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
and NA should be activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', None])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b'])
np.testing.assert_array_equal(ohet.decoder, ['a', 'b', np.nan])
assert ohet.dummy_encoded
assert ohet.dummy_na
def test_fit_nans_numeric(self):
"""Test the ``fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated and NA activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, np.nan])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2])
np.testing.assert_array_equal(ohet.decoder, [1, 2, np.nan])
assert not ohet.dummy_encoded
assert ohet.dummy_na
def test_fit_single(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'a', 'a'])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a'])
def test__transform_no_nan(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation.
Input:
- Series with values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet.dummies = ['a', 'b', 'c']
ohet.num_dummies = 3
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_no_nan_categorical(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch.
Input:
- Series with categorical values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet.dummies = ['a', 'b', 'c']
ohet.indexer = [0, 1, 2]
ohet.num_dummies = 3
ohet.dummy_encoded = True
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation. Null
values should be represented by the same encoding.
Input:
- Series with values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([np.nan, None, 'a', 'b'])
ohet.dummies = ['a', 'b']
ohet.dummy_na = True
ohet.num_dummies = 2
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans_categorical(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation using
the categorical branch. Null values should be
represented by the same encoding.
Input:
- Series with categorical values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([np.nan, None, 'a', 'b'])
ohet.dummies = ['a', 'b']
ohet.indexer = [0, 1]
ohet.dummy_na = True
ohet.num_dummies = 2
ohet.dummy_encoded = True
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single(self):
"""Test the ``_transform`` with one category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
where it should be a single column.
Input:
- Series with a single category
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet.dummies = ['a']
ohet.num_dummies = 1
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single_categorical(self):
"""Test the ``_transform`` with one category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch where it should
be a single column.
Input:
- Series with a single category
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = | pd.Series(['a', 'a', 'a']) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 24 20:03:24 2019
@author: RV
"""
# Python(R)
# Modeules/packageslibraries
# OS - submodules/path/join
#eg. (os.path.join)
# pandas
# scipy
# onspy
#%% Setup
import os
projFld = "C:/Users/RV/Documents/Teaching/2019_01_Spring/ADEC7430_Spring2019/Lecture02"
codeFld = os.path.join(projFld, "PyCode")
fnsFld = os.path.join(codeFld, "_Functions")
outputFld = os.path.join(projFld, "Output")
rawDataFld = os.path.join(projFld, "RawData")
savedDataFld = os.path.join(projFld, "SavedData")
#%% load some functions
fnList = ["fn_logMyInfo"] # this is a list
for fn in fnList:
exec(open(os.path.join(fnsFld, fn + ".py")).read())
# Explain: name, extension name, read+write
# create also a file where we will log some data
logf = os.path.join(rawDataFld, "logfile.csv")
# test writing to log
from _Functions.fn_logMyInfo import fn_logMyInfo
fn_logMyInfo("test writing to log", useConsole=True, useFile=logf)
# can we enhance what we write? How about we add a timestamp?
#%% introduction to datetime
import datetime as DT
# what time is it now?
DT.datetime.now() # do you like the format?
# micro-seconds
# can we format this more friendly?
nowtime = DT.datetime.now()
nowtimef = nowtime.strftime(format="%Y-%m-%d %H:%M:%S") # remember this
nowtimef
type(nowtimef) # so this is a string
# let's add microseconds as well
nowtimef = nowtime.strftime(format="%Y-%m-%d %H:%M:%S.%f") # remember this
nowtimef
#%%
# do you want to keep writing the long formula above? I'd rather write a function
def nowdt():
return(DT.datetime.now().strftime(format="%Y-%m-%d %H:%M:%S.%f"))
nowdt()
#%% now let's add timestamp to our log output
fn_logMyInfo(nowdt() + "," + " second test writing to log", useFile = logf)
# open the log file - do you notice anything unpleasant? e.g. the messages are appended
# can we try to save to a new line?
fn_logMyInfo("\n" + nowdt() + "," + " second test writing to log", useFile = logf)
# this is better... but lengthened our logging function quite a lot
#@@@@ add here a wrapper for this function, with defaults for: newline, timestamp, using given file
# Excel file doesn't show a micro-second digits but a text reader will do.
#%% Remember how this function works...
#@@@@ how to print the function so we can see how it's put together?
#%%
#==============================================================================
# Exploratory analysis
#==============================================================================
# Data from Kaggle's Titanic challenge comes already split in Train & Test
# See slides - why do we need this split?
# point to the files
rawTrainFile = os.path.join(rawDataFld, "Lecture2_train.csv")
rawTestFile = os.path.join(rawDataFld, "Lecture2_test.csv")
#%% Pandas - transformation and data management, package
# read them into pandas DataFrame
import pandas as pd
rawTrain = pd.read_csv(rawTrainFile, sep=',')
# ParserWarning: Falling back to the "python" engine b/c the "c" engine doesn't support regex separators (seperators > 1 char and different from '\s+' are interpreted as regax)
# So you cannot do sep = ',,' and we don't have to do anything with lines '\n'
# If you want to test the warnings you would get when longer separators exist...
# let's understand a bit this DataFrame
# size
rawTrain.shape # how many rows, how many columns?
# print top 7 records
rawTrain.head(7) # notice the dots?
# let's expand the view - need options for pandas printout
pd.set_option('display.width', 1000)
pd.set_option('max_colwidth', 500)
pd.set_option('display.max_columns', 12)
rawTrain.head(7) # does it look better? did the dots vanish or are they still there?
# What is rawTrain?
type(rawTrain)
# list all columns
rawTrain.columns
# list all columns AND their types
rawTrain.dtypes # to think... if CSV is a text file, how did pandas figure out the types?
# does it make sense to have Age "float64"? (fractional) B/C Age has missing values
# int64 doesn't allow for missing values
#L Let's force pandas to read everything as a character
rawTrain_c = pd.read_csv(rawTrainFile, sep=',', dtype=object)
rawTrain_c.dtypes
rawTrain_c.head(5)
# for numeric variables, try this:
rawTrain.describe() # anything interesting here? See Age.
# Only numeric data. Missing data in Age b/c less counts than others.
# are there missing values?
pd.isnull(rawTrain)
pd.isnull(rawTrain).astype(int)
pd.isnull(rawTrain).astype(int).aggregate(sum)
# can we see some records with missing Age valeus?
agecond = rawTrain.Age.isnull()
agecond
agecond.value_counts() # so here are our 177 missing recordss
rawTrain.loc[agecond].head(10).drop(columns=['Name', 'Ticket'])
# Data.location[which row is missing = True].first 10 rows and drop them and create a new dataframe
# how is missing age represented? NaN
#%% maybe drop some columns/vars - e.g. Name
rTrain = rawTrain # so very dangerous with data.table, which is otherwise a very powerful tool
rTrain = rTrain.drop(columns=['Name'])
rTrain.shape
rawTrain.shape
# so changing the copy did not change the original - take a moment to enjoy this
# Which, changing copies in R will also change the original
#%% save a copy of the data for faster later access
rTrainFile = os.path.join(savedDataFld, "rTrain.pkl")
rTrain.to_pickle(rTrainFile)
rTrain.to_csv(rTrainFile+".csv")
# go open the last saved CSV file; what's with the first column?
rTrain.to_csv(rTrainFile + ".csv", index=None)
# go try again - did that column vanish?
#%% distribution of ages?
# simple boxplot
rTrain.boxplot(column=['Age'])
# more complex - Age boxplot by "Survived"
rTrain.groupby('Survived').boxplot(column=['Age'])
# more complex - Age boxplot by passenger class (Pclass)
rTrain.groupby('Pclass').boxplot(column=['Age'])
# HW: In 1-2 sentences describe what this boxplot tells you
rTrain.groupby('Survived').boxplot(column=['Pclass'])
# HW: In 1-2 sentences describe what this boxplot tells you
#%% Quantitles
rTrain.quantile(.1)
rTrain.quantile(.9)
rTrain.quantile([0.05*i for i in range(20)])
# Notice the choice of layout between one value and multiple-values for quantile request!
#%% Character functions - trim
# do we need to trim (?) some names or other character values?
# in Python this is called stripping the leading/trailing characters
' left and right '.strip()
#%% working with counts
# table(rTrain[,Age,by=list(Sex)]) - R version
# simple counts by value - in descending order of count/frequency
rTrain.Age.value_counts() # ughh, how many passengers of age 62 are there? Hard to see...
# same counts but now sort by actual values (index for the DataFrame)
rTrain.Age.value_counts().sort_index()
# counts/frequency by groups:
rTrain.groupby(['Sex']).Age.value_counts()
rTrain.groupby(['Sex']).Age.value_counts().sort_index() # sorted by (descending) frenquency
# better sorting, but a bit cumbersome to read already... see the left-most index (gender)
# in such cases, resetting the (multi)index is the best
rTrain.groupby(['Sex']).Age.value_counts().sort_index().reset_index(name='freq')
# HW: from left to right, explain what the "chain" of the commands above does
#%% Create new variables - grouped age values
# table(rTrain[,5*ceiling(Age/5),by=list(Sex)]) # R version, included for reference
import numpy as np # nice package/library/module, we'll use it frequently!
# We'll use an anonymous or inline or "lambda" function (not quite the same thing, but we'll think about them as such for now)
rTrain.groupby(['Sex']).apply(lambda x: 5*np.ceil(x.Age/5)).value_counts().sort_index()
# Can you figure out what is happening above?
# not quite what we would have liked... where are the groups by Sex in the output?
# best to create the variable separately...
rTrain['Age5'] = 5*np.ceil(rTrain['Age']/5)
rTrain.groupby(['Sex']).Age5.value_counts().sort_index()
# more like it...
# now clean up the index and sorting as we did above
# similar view for Survived by Sex/Gender:
# table(rTrain[,Sex, by=list(Survived)]) # R version
rTrain.groupby(['Survived']).Sex.value_counts()
#%% Plotting / Graphics
# Both matplotlib and seaborn are quite powerful
import matplotlib.pyplot as plt
import seaborn as sns
#boxplot(rTrain[,Fare])
sns.distplot(rTrain['Age'], bins=20)
# what has happened? is your plot blank as well???
# hunch: this is probably due to missing values; let's try to remove them before plotting
AgeCond = rTrain['Age'].isnull() # define missing condition
AgeCond.value_counts() # missings vs non-missings
sns.distplot(rTrain.loc[~AgeCond, 'Age'], bins=20)
# a better view
sns.violinplot(rTrain['Age'])
# correlation
# var(rTrain[,Age], rTrain[,Fare], na.rm = T) # R version
# !! careful with over-correlating... http://www.tylervigen.com/spurious-correlations !!
#corrplot(cor(rTrain), method='number')
sns.pairplot(rTrain)
# doesn't really do much, does it? Why?
# You guessed it - probably some missing values - by where the plotting stopped, missings are in "Age"
# We'll use the age-missing filter created above
# But first let's drop a variable, it should not be very relevant (?)
rTrain = rTrain.drop(columns=['Cabin'])
sns.pairplot(rTrain.loc[~AgeCond]) # aha, a bit better?
# note that is works for all columns - both numeric and character!
sns.pairplot(rTrain.loc[~AgeCond], kind="scatter", hue="Survived", markers=["o", "s"], palette="Set2")
sns.pairplot(rTrain.loc[~AgeCond], kind="scatter", hue="Survived", plot_kws=dict(s=80, edgecolor="white", linewidth=2.5))
#@@ play with these parameters, how to research what is available? think, search
# can we drop some (more) variables?
rTrain = rTrain.drop(columns=['PassengerId'])
sns.pairplot(rTrain.loc[~AgeCond], kind="scatter", hue="Survived", markers=["o", "s"], palette="Set2")
#%% Using seaborn for nicer boxplots
sns.boxplot(rTrain['Age'])
sns.boxenplot(rTrain['Age'])
#%% Pairwise & simple regression plotting
#plot(Survived ~ Fare + Pclass, data=rTrain) # R version
sns.catplot(x='Fare', y='Pclass', hue='Survived', data=rTrain)
sns.catplot(x='Fare', y='Pclass', hue='Survived', data=rTrain, kind='violin')
# "violin" is not very helpful here, due to the data - but keep it in mind for later...
#@@@@ fix catplot
sns.lmplot()
# linear regression plots - in one plot
sns.lmplot(x='Fare', y='Pclass', hue='Survived', data=rTrain)
# linear regression plots - in two side-by-side plots
sns.lmplot(x='Fare', y='Pclass', col='Survived', data=rTrain)
#sns.catplot(rTrain['Fare'], rTrain['Pclass'], hue=rTrain['Survived'])
#@@@@ complete this one too
x = sns.pairplot(rTrain, hue='Survived', x_vars='Fare', y_vars='Pclass')
x.fig
# perhaps a bit of jittering would help
# We'll try to add random "noise" to the two variables Survived and Pclass
howMuchJitter = 0.2
import random
#!! Please note the difference between how we modify Pclass and Survived
rTrain['Pclass_j'] = rTrain['Pclass'] + random.gauss(0,howMuchJitter)
# ... and how we modify Survived
rTrain['Survived_j'] = rTrain['Survived']
rTrain['Survived_j'] += [random.gauss(0,howMuchJitter) for i in range(rTrain.shape[0])]
sns.pairplot(rTrain[['Survived_j','Pclass_j']])
# a bit too tiny.. let's change some size
sns.pairplot(rTrain[['Survived_j', 'Pclass_j']], height=5)
# HW: Analyze and describe the distribution of Survived_j and Pclass_j
# Hint: use describe(), boxplot, histogram(distplot)
# include the code and your conclusions in the online Text submission for the Assignment in Module 3
#%% imputation by mode
import scipy
agecond = rTrain.Age.isnull()
rTrain_Age_Mode = scipy.stats.mode(rTrain.loc[~agecond].Age)
rTrain_Age_Mode # we only need the mode, let's extract it
rTrain_Age_Mode.mode # almost there, just need the value from inside the array
rTrain_Age_Mode.mode[0] # finally...
rTrain_Age_Mode = rTrain_Age_Mode.mode[0] # yep, we keep only the relevant value
rTrain_Age_Mode
# now impute... but let's keep track of where we imputed, eh?
rTrain['Age_imputed'] = 0
rTrain.loc[agecond, 'Age_imputed'] = 1
# let's do a cross-tabulation to make sure we flagged exactly the missing Age records
pd.crosstab(rTrain['Age_imputed'], agecond)
# now we're ready for imputation
rTrain.loc[agecond, 'Age'] = rTrain_Age_Mode
# check with a histogram
sns.distplot(rTrain.Age)
#????????? mode(rTrain[,Age]) # not quite what we were expecting, right? see p.123 in DL for a "trick" which is not that good...
#%% mode of a list of numbers
testList = [1,2,3,4,1,2,3,1,2,1]
from collections import Counter
tcnts = Counter(testList)
tcnts.most_common()
tcnts.most_common(1)
tcnts.most_common(2)
# does this work on strings?
testList2 = [str(i) for i in testList]
tcnts2 = Counter(testList2)
tcnts2.most_common() # seem so
# another way:
from statistics import mode
mode(testList)
mode(testList2)
from scipy.stats import mode
mode(testList)
mode(testList2)
# try to tease the object above...
x = mode(testList)
type(x)
x.mode # and other ones...
# which one do you find more useful?
#%% imputation by mode
import scipy
agecond = rTrain.Age.isnull()
rTrain_Age_Mode = scipy.stats.mode(rTrain.loc[~agecond].Age)
rTrain_Age_Mode # we only need the mode, let's extract it
rTrain_Age_Mode.mode # almost there, just need the value from inside the array
rTrain_Age_Mode.mode[0] # finally...
rTrain_Age_Mode = rTrain_Age_Mode.mode[0] # yep, we keep only the relevant value
rTrain_Age_Mode
# now impute... but let's keep track of where we imputed, eh?
rTrain['Age_imputed'] = 0
rTrain.loc[agecond, 'Age_imputed'] = 1
# let's do a cross-tabulation to make sure we flagged exactly the missing Age records
| pd.crosstab(rTrain['Age_imputed'], agecond) | pandas.crosstab |
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == pd.Period("2011-01-01", freq="D")
result = idx[-1]
assert result == pd.Period("2011-01-31", freq="D")
result = idx[0:5]
expected = pd.period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[0:10:2]
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[-20:-5:3]
expected = pd.PeriodIndex(
["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[4::-1]
expected = PeriodIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_getitem_index(self):
idx = period_range("2007-01", periods=10, freq="M", name="x")
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False, True, True, False, False, False]]
exp = pd.PeriodIndex(
["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x"
)
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range("2007-01", periods=50, freq="M")
ts = Series(np.random.randn(len(rng)), rng)
with pytest.raises(KeyError, match=r"^'2006'$"):
ts["2006"]
result = ts["2008"]
assert (result.index.year == 2008).all()
result = ts["2008":"2009"]
assert len(result) == 24
result = ts["2008-1":"2009-12"]
assert len(result) == 24
result = ts["2008Q1":"2009Q4"]
assert len(result) == 24
result = ts[:"2009"]
assert len(result) == 36
result = ts["2009":]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice("2008", "2009")]
def test_getitem_datetime(self):
rng = period_range(start="2012-01-01", periods=10, freq="W-MON")
ts = Series(range(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
assert idx[0] == pd.Period("2011-01", freq="M")
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert s[pd.Period("2011-01", freq="M")] == pd.Period("2011-01", freq="M")
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start="2012-01-01", periods=10, freq="D")
ts = Series(range(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[ | Period("2012-01-02", freq="D") | pandas.Period |
#!/usr/bin/python
import finaExp as fe
import os, pandas, urllib
from ofxparse import OfxParser as ofp
from ofxparse.ofxparse import OfxParserException as ofpe
def importOFX(fileName):
'''importOFX brings in the OFX transaction objects to be analyzed'''
if not(os.path.exists("data")):
os.makedirs("data")
currentData = fe.unPickleData()
transList = []
ofx = ofp.parse(fileName)
for t in ofx.account.statement.transactions:
transList.append({'id':t.id, 'amount':t.amount, 'checknum':t.checknum, 'date':t.date, 'mcc':t.mcc, 'memo':t.memo, 'payee':t.payee, 'sic':t.sic, 'type':t.type, 'cat':''})
df = | pandas.DataFrame.from_records(transList, columns=['id', 'date', 'payee','cat', 'amount', 'type', 'memo', 'checknum', 'sic']) | pandas.DataFrame.from_records |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: | pd.Timestamp("2012-08-05 00:00:00") | pandas.Timestamp |
import pandas as pd
import pytest
@pytest.mark.functions
def test_convert_matlab_date():
mlab = [
733_301.0,
729_159.0,
734_471.0,
737_299.563_296_356_5,
737_300.000_000_000_0,
]
df = | pd.DataFrame(mlab, columns=["dates"]) | pandas.DataFrame |
#!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020-2021, Intel Corporation
#
#
# csv_compare.py -- compare CSV files (EXPERIMENTAL)
#
# In order to compare all CSV are plotted on the same chart.
# XXX include hostname for easier reporting.
#
import argparse
import os
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
from textwrap import wrap
from matplotlib.ticker import ScalarFormatter
column_to_label = {
'threads': '# of threads',
'iodepth': 'iodepth',
'bs': 'block size [B]',
'lat': 'latency [usec]',
'bw': 'bandwidth [Gb/s]',
'cpuload': 'CPU load [%]'
}
column_to_description = {
'threads': 'threads={}',
'iodepth': 'iodepth={}',
'bs': 'block size={}B',
}
column_default = {
'threads': 1,
'iodepth': 1,
'bs': None,
}
dimensions = {'threads', 'iodepth', 'bs', 'cpuload'}
layouts = {
'lat_avg': {
'nrows': 1,
'ncols': 1,
'x': '<arg_axis>',
'columns': [
'lat_avg'
],
'xscale': '<arg_xscale>',
},
'lat_pctls': {
'nrows': 1,
'ncols': 2,
'x': '<arg_axis>',
'columns': [
# XXX When 99.999 percentile will be stabilized 99.99 can be
# replaced with 99.999.
'lat_pctl_99.9', 'lat_pctl_99.99'
],
'xscale': '<arg_xscale>',
},
'lat_pctls_999': {
'nrows': 1,
'ncols': 2,
'x': 'bs',
'columns': [
'lat_pctl_99.0', 'lat_pctl_99.9'
],
'xscale': 'log',
},
'lat_pctls_99999': {
'nrows': 1,
'ncols': 2,
'x': 'bs',
'columns': [
'lat_pctl_99.99', 'lat_pctl_99.999'
],
'xscale': 'log',
},
'lat_pctl_99999': {
'nrows': 1,
'ncols': 1,
'x': 'bs',
'columns': [
'lat_pctl_99.999'
],
'xscale': 'log',
},
'lat_all': {
'nrows': 4,
'ncols': 2,
'x': '<arg_axis>',
'columns': [
'lat_avg', 'lat_stdev',
'lat_min', 'lat_max',
'lat_pctl_99.0', 'lat_pctl_99.9',
'lat_pctl_99.99', 'lat_pctl_99.999'
],
'xscale': '<arg_xscale>',
},
'bw': {
'nrows': 1,
'ncols': 1,
'x': '<arg_axis>',
'columns': [
'bw_avg'
],
'xscale': '<arg_xscale>',
},
}
empty = {'lat_avg': [0], 'lat_pctl_99.9': [0], 'lat_pctl_99.999': [0], \
'bs': [1], 'bw_avg': [0], 'threads': [1]}
def get_label(column):
"""Find a text label for an axis describing a provided CSV column.
:param column: name of the CSV column
:type column: str
:return: a label for an axis
:rtype: str
"""
for key, label in column_to_label.items():
if key in column:
return label
def dfs_filter(dfs, df_names, column_list):
"""Filter out all pandas.DataFrame without required columns
:param dfs: list of pandas.DataFrame objects to draw on the subplot
:type dfs: list[pandas.DataFrame]
:param df_names: a list of human readable descriptions for dfs
:type df_names: list[str]
:param column_list: a list of required columns
:type column_list: list[str]
:return: a list of pandas.DataFrame and their names
:rtype: list[pandas.DataFrame], list[str]
"""
dfs_out = []
df_names_out = []
# loop over all (pandas.DataFrame, str) pairs
for df, df_name in zip(dfs, df_names):
has_all = True
for column in column_list:
if column not in df.columns:
has_all = False
break
# if DataFrame does not have all specified columns just skip
# the DataFrame
if not has_all:
continue
# append the DataFrame and its name to the outputs
dfs_out.append(df)
df_names_out.append(df_name)
return dfs_out, df_names_out
def dfs_all_values(dfs, column):
"""Find all possible values of a column in the pandas.DataFram list
:param dfs: list of pandas.DataFrame objects to draw on the subplot
:type dfs: list[pandas.DataFrame]
:param column: a columns
:type column: str
:return: a sorted list of possible values from the column
:rtype: list[values]
"""
values = []
# loop over all (pandas.DataFrame, str) pairs
for df in dfs:
values.extend(df[column].tolist())
# set() removes duplicates
# sorted() converts Set to List and sort the elements
return sorted(set(values))
def draw_plot(ax, dfs, legend, x, y, xscale, yaxis_max):
"""Draw multiple lines y(x) using data from the dfs list on the ax subplot.
:param ax: an axes (subplot)
:type ax: matplotlib.axes
:param dfs: list of pandas.DataFrame objects to draw on the subplot
:type dfs: list[pandas.DataFrame]
:param legend: a list of human readable descriptions for dfs
:type legend: list[str]
:param x: a column to be drawn on the x-axis
:type x: str
:param y: a column to be drawn on the y-axis
:type y: str
:param xscale: a x-axis scale
:type xscale: str
"""
xticks = dfs_all_values(dfs, x)
# loop over all pandas.DataFrame objects
for df in dfs:
# setting the x-column as an index is required to draw the y-column
# as a function of x argument
df = df.set_index(x)
# plot line on the subplot
df[y].plot.line(ax=ax, rot=45, marker='.')
if xscale == "linear":
ax.set_xscale(xscale)
else:
ax.set_xscale(xscale, base=2)
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.set_xticks(xticks)
ax.set_xlabel(get_label(x))
ax.set_ylabel(get_label(y))
ax.set_ylim(bottom=0)
if yaxis_max is not None:
ax.set_ylim(top=float(yaxis_max))
ax.legend(legend, fontsize=6)
ax.grid(True)
def draw_table(ax, dfs, legend, x, y):
"""Draw a table of all data used to chart y(x)
:param ax: an axes (subplot)
:type ax: matplotlib.axes
:param dfs: list of pandas.DataFrame objects to draw on the subplot
:type dfs: list[pandas.DataFrame]
:param legend: a list of human readable descriptions for dfs
:type legend: list[str]
:param x: a column to be drawn on the x-axis
:type x: str
:param y: a column to be drawn on the y-axis
:type y: str
"""
col_labels = dfs_all_values(dfs, x)
column_legend = []
cell_text = []
# loop over all pandas.DataFrame objects
for df in dfs:
# to allow query y(x) easily
df = df.set_index(x)
df_row = df[y]
# build a row with filled blanks '-'
row = ["{:.2f}".format(df_row[column]) if column in df_row.index else '-' \
for column in col_labels]
cell_text.append(row)
ax.axis('tight')
ax.axis('off')
ax.table(cellText=cell_text, rowLabels=legend, colLabels=col_labels, \
loc='top')
def get_content_height(im, ncols):
width, height = im.size
# pick a vertical line where the content is expected
x = width / (ncols + 1)
for y in range(height - 1, 0, -1):
pixel = im.getpixel((x, y))
if pixel[0] != 255 or pixel[1] != 255 or pixel[2] != 255:
return y + 1
return 0
def crop_to_content(file, ncols):
# open the file
im = Image.open(file)
# calculate the crop parameters
width, height = im.size
left = 0
top = 0
right = width
bottom = get_content_height(im, ncols)
bottom += 100
# crop and save the output file
im = im.crop((left, top, right, bottom))
im.save(file)
def split_in_half(input, output, top):
# open the file
im = Image.open(input)
# calculate the crop parameters
width, height = im.size
left = 0
top = int(top * height)
right = width
bottom = int(top + 0.5 * height)
# crop and save the output file
im = im.crop((left, top, right, bottom))
im.save(output)
def get_const_param(dfs, column):
df = dfs[0]
value = column_default[column]
if column in df.columns:
value = df.at[0, column]
return value
def main():
parser = argparse.ArgumentParser(
description='Compare CSV files (EXPERIMENTAL)')
parser.add_argument('csv_files', metavar='CSV_FILE', nargs='+',
help='a CSV log file to process')
parser.add_argument('--output_file', metavar='OUTPUT_FILE',
default='compare.png', help='an output file')
parser.add_argument('--output_layout', metavar='OUTPUT_LAYOUT',
choices=layouts.keys(), required=True, help='an output file layout')
parser.add_argument('--arg_axis', metavar='ARG_AXIS',
choices=dimensions, required=False,
help='an axis for layouts which requires to pick one')
parser.add_argument('--yaxis_max', metavar='YMAX',
default=None, help='a y-axis max value')
parser.add_argument('--arg_xscale', metavar='XSCALE',
choices=['linear', 'log'], required=False, help='an x-axis scale')
parser.add_argument('--output_with_tables', action='store_true',
help='an output file layout')
parser.add_argument('--output_title', metavar='OUTPUT_TITLE',
default='title', help='an output title')
parser.add_argument('--legend', metavar='SERIES', nargs='+',
help='a legend for the data series read from the CSV files')
parser.add_argument('--legend_from_file_name_comment', action='store_true',
help='generate a legend from the file name comments __COMMENT__')
args = parser.parse_args()
# generate or validate the legend
if args.legend_from_file_name_comment:
args.legend = []
for fname in args.csv_files:
comment = fname.split('__')
if len(comment) != 3 or len(comment[1]) == 0:
args.legend.append(fname)
args.legend.append(comment[1])
elif args.legend is None:
args.legend = args.csv_files
elif len(args.legend) != len(args.csv_files):
raise Exception(
'The number of legend entries does not match the number of CSV files')
# read all CSV files
dfs = []
for csv_file in args.csv_files:
try:
df = | pd.read_csv(csv_file) | pandas.read_csv |
"""Collect specific gene ontologies, and additional background/complex information """
import os
import re
import functools
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sa
import statsmodels.formula.api as sfa
from GEN_Utils import FileHandling
from loguru import logger
from scipy import stats
import scikit_posthocs as sp
from utilities.database_map_and_filter import (go_lineage_tracer,go_term_details,
ontology_wordfinder, uniprot_go_genes, create_uniprot_xref, ortholog_map)
logger.info('Import OK')
clustered_path = f'results/lysate_denaturation/clustering/clustered.xlsx'
background_path = f'results/lysate_denaturation/normalised/normalised_summary.xlsx'
resource_folder = f'resources/bioinformatics_databases/'
output_folder = 'results/lysate_denaturation/gene_ontology_datasets/'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
def add_homolog_id(datasets):
for name, data in datasets.items():
data['homologue_id'] = data['Proteins'].map(swiss_to_homologue_id)
data.to_csv(f'{output_folder}{name}.csv')
datasets.update({name: data})
return datasets
# MIG database for homology
homology_db = pd.read_table(f'{resource_folder}HOM_MouseHumanSequence.txt')
homology_db.dropna(subset=['SWISS_PROT IDs', 'HomoloGene ID'], inplace=True)
swiss_to_homologue_id = dict(zip(homology_db['SWISS_PROT IDs'], homology_db['HomoloGene ID']))
# Dataset 1: Any proteins associated with generic "protein complex" GO term GO:0032991
# collect proteins from ontology
complex_genes = uniprot_go_genes(tax_id='10090', go_term='GO:0032991', child_terms=True, direct=False, output='list')
complex_genes = pd.DataFrame(complex_genes).rename(columns={0: 'Proteins'})
# Dataset 2: Against proteins associated with specific complexes: proteasome
# find terms associated with proteasome
potential_proteasome_terms = ontology_wordfinder(['proteasome']) # decided on "GO:0000502: proteasome complex"
proteasome_genes = uniprot_go_genes(tax_id='10090', go_term='GO:0000502', child_terms=False, direct=True, output='list')
proteasome_genes = pd.DataFrame(proteasome_genes).rename(columns={0: 'Proteins'})
# Dataset 3: Against proteins associated with specific complexes: ribosome
# find terms associated with proteasome
potential_terms = ontology_wordfinder(['ribosome']) # decided on "GO:0003735 structural constituent of ribosome""
ribosome_genes = uniprot_go_genes(tax_id='10090', go_term='GO:0003735', child_terms=False, direct=True, output='list')
ribosome_genes = pd.DataFrame(ribosome_genes).rename(columns={0: 'Proteins'})
# Dataset 4: Against proteins associated with specific complexes: DNA repair complex
# find terms associated with proteasome
potential_terms = ontology_wordfinder(['DNA repair complex']) # decided on "GO:1990391 DNA repair complex""
dna_genes = uniprot_go_genes(tax_id='10090', go_term='GO:1990391', child_terms=False, direct=True, output='list')
dna_genes = | pd.DataFrame(dna_genes) | pandas.DataFrame |
import csv
import httplib2
from apiclient.discovery import build
import urllib
import json
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
from plotly.tools import FigureFactory as FF
plotly.tools.set_credentials_file(username = 'ediering', api_key='k23nwbsle7')
# This API key is provided by google as described in the tutorial
API_KEY = '<KEY>'
# This is the table id for the fusion table
TABLE_ID = '15CnIT8u1snCOSRYjV3lPrEnUR_5qoGZ1ZhwGytAt'
try:
fp = open("data.json")
response = json.load(fp)
except IOError:
service = build('fusiontables', 'v1', developerKey=API_KEY)
query = "SELECT * FROM " + TABLE_ID #+ " WHERE 'Total Energy Cost ($)' > 0 AND 'Total Energy Cost ($)' < 1000000 "
response = service.query().sql(sql=query).execute()
fp = open("data.json", "w+")
json.dump(response, fp)
# print len(response['rows'])
data_df = pd.DataFrame(response[u'rows'], columns = response[u'columns'])
working = data_df[['Site', 'Site ID', 'Year', 'Total Energy (kBtu)', 'Total Energy Cost ($)']]
pivot_cost = working.pivot(index='Site ID', columns='Year', values='Total Energy Cost ($)')
pivot_energy = working.pivot(index='Site ID', columns='Year', values='Total Energy (kBtu)')
def totalcostplot_energy():
pivot_cost = working.pivot(index='Site ID', columns='Year', values='Total Energy Cost ($)')
pivot_energy = working.pivot(index='Site ID', columns='Year', values='Total Energy (kBtu)')
rows = pivot_cost.index
plot = []
for i in xrange(len(rows)):
index = rows[i]
trace = go.Scatter(
x = pivot_cost.columns.values,
y = pivot_cost.loc[index],
#mode = 'markers'
)
plot.append(trace)
layout = go.Layout(
xaxis=dict(
autotick=False),
showlegend=False)
fig= go.Figure(data=plot, layout=layout)
return fig
def boxplot():
ten = pd.to_numeric(pivot_cost['2010']).dropna()
eleven = pd.to_numeric(pivot_cost['2011']).dropna()
twelve = pd.to_numeric(pivot_cost['2012']).dropna()
thirteen = pd.to_numeric(pivot_cost['2013']).dropna()
fourteen = | pd.to_numeric(pivot_cost['2014']) | pandas.to_numeric |
import pandas as pd
from pandas.util.testing import assert_frame_equal
import numpy as np
import os
from dataactbroker.helpers import validation_helper
from dataactvalidator.app import ValidationManager, ValidationError
from dataactvalidator.filestreaming.csvReader import CsvReader
from dataactcore.models.validationModels import FileColumn
from dataactcore.models.lookups import FIELD_TYPE_DICT
FILES_DIR = os.path.join('tests', 'integration', 'data')
READ_ERROR = os.path.join(FILES_DIR, 'appropReadError.csv')
BLANK_C = os.path.join(FILES_DIR, 'awardFinancialBlank.csv')
def test_is_valid_type():
assert validation_helper.is_valid_type(None, 'STRING') is True
assert validation_helper.is_valid_type(None, 'STRING') is True
assert validation_helper.is_valid_type(None, 'INT') is True
assert validation_helper.is_valid_type(None, 'DECIMAL') is True
assert validation_helper.is_valid_type(None, 'BOOLEAN') is True
assert validation_helper.is_valid_type(None, 'LONG') is True
assert validation_helper.is_valid_type('1234Test', 'STRING') is True
assert validation_helper.is_valid_type('1234Test', 'INT') is False
assert validation_helper.is_valid_type('1234Test', 'DECIMAL') is False
assert validation_helper.is_valid_type('1234Test', 'BOOLEAN') is False
assert validation_helper.is_valid_type('1234Test', 'LONG') is False
assert validation_helper.is_valid_type('', 'STRING') is True
assert validation_helper.is_valid_type('', 'INT') is True
assert validation_helper.is_valid_type('', 'DECIMAL') is True
assert validation_helper.is_valid_type('', 'BOOLEAN') is True
assert validation_helper.is_valid_type('', 'LONG') is True
assert validation_helper.is_valid_type('01234', 'STRING') is True
assert validation_helper.is_valid_type('01234', 'INT') is True
assert validation_helper.is_valid_type('01234', 'DECIMAL') is True
assert validation_helper.is_valid_type('01234', 'LONG') is True
assert validation_helper.is_valid_type('01234', 'BOOLEAN') is False
assert validation_helper.is_valid_type('1234.0', 'STRING') is True
assert validation_helper.is_valid_type('1234.0', 'INT') is False
assert validation_helper.is_valid_type('1234.00', 'DECIMAL') is True
assert validation_helper.is_valid_type('1234.0', 'LONG') is False
assert validation_helper.is_valid_type('1234.0', 'BOOLEAN') is False
def test_clean_col():
# None cases
assert validation_helper.clean_col('') is None
assert validation_helper.clean_col(' ') is None
assert validation_helper.clean_col('\n') is None
assert validation_helper.clean_col('\"\"') is None
assert validation_helper.clean_col(np.nan) is None
assert validation_helper.clean_col(None) is None
# clean cases
assert validation_helper.clean_col('\nclean me! ') == "clean me!"
assert validation_helper.clean_col(0) == '0'
assert validation_helper.clean_col(1) == '1'
assert validation_helper.clean_col(' \" double quotes\"') == 'double quotes'
assert validation_helper.clean_col([]) == '[]'
assert validation_helper.clean_col({}) == '{}'
def test_clean_frame_vectorized():
df_under_test = pd.DataFrame([
['""', "", " lspace", '"lquote'],
["''", " ", "rspace ", 'rquote"'],
["'hello'", " ", " surround space ", '"surround quote"'],
['"hello"', "\n\t", None, '" surround quote and space "'],
['"hello you"', "5", np.NaN, ' " surround quote and space "\t'],
], columns=list("ABCD"))
df_under_test = validation_helper.clean_frame_vectorized(df_under_test)
expected_df = pd.DataFrame([
[None, None, "lspace", '"lquote'],
["''", None, "rspace", 'rquote"'],
["'hello'", None, "surround space", "surround quote"],
["hello", None, None, "surround quote and space"],
["hello you", "5", None, "surround quote and space"],
], columns=list("ABCD"))
assert_frame_equal(df_under_test, expected_df)
def test_clean_frame_vectorized_mixed_types():
df_under_test = pd.DataFrame([
['""', "", np.NaN, '"25'],
["''", " ", "NaN", '-10"'],
["'10'", " ", np.int64(12), '"0"'],
[77, "\n\t", None, 0.0],
['"11 8"', "5", np.float64(8.2), '99\t'],
], columns=list("ABCD"))
df_under_test = validation_helper.clean_frame_vectorized(df_under_test, convert_to_str=True)
expected_df = pd.DataFrame([
[None, None, None, '"25'],
["''", None, "NaN", '-10"'],
["'10'", None, "12", "0"],
["77", None, None, "0.0"],
["11 8", "5", "8.2", "99"],
], columns=list("ABCD"))
assert_frame_equal(df_under_test, expected_df)
def test_clean_numbers():
# Normal cases
assert validation_helper.clean_numbers('10') == '10'
assert validation_helper.clean_numbers('1,00') == '100'
assert validation_helper.clean_numbers('-10,000') == '-10000'
assert validation_helper.clean_numbers('0') == '0'
# This is originall designed for just strings but we should still account for these
assert validation_helper.clean_numbers(10) == '10'
assert validation_helper.clean_numbers(-10) == '-10'
assert validation_helper.clean_numbers(0) == '0'
assert validation_helper.clean_numbers(0) == '0'
assert validation_helper.clean_numbers(None) is None
assert validation_helper.clean_numbers(['A']) == ['A']
def test_clean_numbers_vectorized_all_strings():
df_under_test = pd.DataFrame([
["10,003,234", "bad,and", "2242424242", "-10"],
["0", "8", "9.424.2", "-10,000"],
["9.24242", ",2,094", ",01", ",-0,0"],
["1,45", "0055", None, np.NaN]
], columns=list("ABCD"))
for col in df_under_test.columns:
validation_helper.clean_numbers_vectorized(df_under_test[col])
expected_df = pd.DataFrame([
["10003234", "bad,and", "2242424242", "-10"],
["0", "8", "9.424.2", "-10000"],
["9.24242", "2094", "01", "-00"],
["145", "0055", None, np.NaN]
], columns=list("ABCD"))
| assert_frame_equal(df_under_test, expected_df) | pandas.util.testing.assert_frame_equal |
from typing import Dict, List
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import pandas as pd
import seaborn as sns
from tqdm import tqdm
import wandb
api = wandb.Api()
entity = "proteins"
import matplotlib.ticker as ticker
class StupidLogFormatter(ticker.LogFormatter):
def __init__(
self,
base: float = 10.0,
labelOnlyBase=False,
minor_thresholds=None,
linthresh=None,
):
super().__init__(
base=base,
labelOnlyBase=labelOnlyBase,
minor_thresholds=minor_thresholds,
linthresh=linthresh,
)
def _num_to_string(self, x, vmin, vmax):
if x > 20000:
s = "%1.0e" % x
elif x < 1:
s = "%1.0e" % x
else:
s = self._pprint_val(x, vmax - vmin)
return s
def multimsa_pair_plot(df, k1, k2, m="auc"):
filtered_df_x = df[df.sweep_name == k1]
filtered_df_y = df[df.sweep_name == k2]
lsuffix = "_x"
rsuffix = "_y"
merged_df = pd.merge(
filtered_df_x, filtered_df_y, on="pdb_idx", suffixes=(lsuffix, rsuffix)
)
print("comparison families: ", len(merged_df))
lseqs = "num_seqs" + lsuffix
rseqs = "num_seqs" + rsuffix
if lseqs in merged_df.keys():
seqs_column = lseqs
elif rseqs in merged_df.keys():
seqs_column = rseqs
else:
print("no seqs found for ")
print(df_x["model"].iloc(0))
print(df_y["model"].iloc(0))
return
plt.plot([0, 1], [0, 1], c="k")
num_seqs = merged_df[seqs_column]
plt.scatter(
merged_df[m + lsuffix],
merged_df[m + rsuffix],
c=num_seqs,
s=9,
norm=colors.LogNorm(vmin=num_seqs.min(), vmax=num_seqs.max()),
cmap="viridis",
)
formatter = StupidLogFormatter(base=2.0)
cbar = plt.colorbar(format=formatter)
# cbar.locator = matplotlib.ticker.LogLocator(base=2)
# cbar.update_ticks()
plt.xlabel(k1)
plt.ylabel(k2)
cbar.set_label("\# of MSA sequences")
print("spoagef")
cbar.set_ticks(
ticker.LogLocator(base=2.0),
update_ticks=True,
)
cbar.minorticks_off()
def add_apc_default(df: pd.DataFrame, sweep_name: str) -> pd.DataFrame:
# Adds modified sweep whose default metrics are apc'd
d = df[df.sweep_name == sweep_name]
d.loc[:, "sweep_name"] = sweep_name + "-apc"
d.loc[:, "pr_at_L"] = d.loc[:, "pr_at_L_apc"]
d.loc[:, "pr_at_L_5"] = d.loc[:, "pr_at_L_5_apc"]
d.loc[:, "auc"] = d.loc[:, "auc_apc"]
d["apc"] = True
return df.append(d)
def parse_old_model(df):
d = df[
[
"sweep_name",
"pdb",
"pdb_idx",
"len_ref",
"num_seqs",
"run_state",
"Train_Precision_@_l/1",
"Train_Precision_apc_@_l/1",
"Train_Precision_@_l/5",
"Train_Precision_apc_@_l/5",
"Train_Auc",
"Train_Auc_apc",
]
]
d = d.rename(
columns={
"Train_Precision_@_l/1": "pr_at_L",
"Train_Precision_apc_@_l/1": "pr_at_L_apc",
"Train_Precision_@_l/5": "pr_at_L_5",
"Train_Precision_apc_@_l/5": "pr_at_L_5_apc",
"Train_Auc": "auc",
"Train_Auc_apc": "auc_apc",
"len_ref": "msa_length",
}
)
d["log_num_seqs"] = np.log2(d.num_seqs)
d["model"] = d["sweep_name"].map(lambda x: x.split("-")[0])
if "use_bias" in df.columns:
d["use_bias"] = df.use_bias
else:
d["use_bias"] = False
return d
def load_attention_msa_df(sweep_id, sweep_name, model_name, pdb_map):
# Loads sweep df for runs from old repo
project = "gremlin-contacts"
runs = api.runs(f"{entity}/{project}", {"sweep": f"{sweep_id}"}, per_page=1000)
print(f"{sweep_id} has {len(runs)} runs")
id_list = []
summary_list = []
config_list = []
name_list = []
model_list = []
state_list = []
tags_list = []
num_contacts_list = []
for run in tqdm(runs):
tags_list.append(run.tags)
state_list.append(run.state)
id_list.append(run.id)
# run.summary are the output key/values like accuracy. We call ._json_dict to omit large files
summary_list.append(run.summary._json_dict)
# run.config is the input metrics. We remove special values that start with _.
config_list.append(
{str(k): v for k, v in run.config.items() if not k.startswith("_")}
)
# run.name is the name of the run.
name_list.append(run.name)
model_list.append(model_name)
# currently unused, very slow to download true contact files
# num_contacts_list.append(get_num_true_contacts(run))
# num_contacts_df = pd.DataFrame({'num_true_contacts': num_contacts_list})
summary_df = pd.DataFrame.from_records(summary_list)
config_df = pd.DataFrame.from_dict(config_list)
pdb_id_df = pd.DataFrame({"pdb_idx": list(config_df.pdb.map(pdb_map))})
state_df = pd.DataFrame({"run_state": state_list})
name_df = pd.DataFrame({"name": name_list})
run_id_df = | pd.DataFrame({"run_id": id_list}) | pandas.DataFrame |
import datetime
from unittest import TestCase
import numpy as np
import pandas as pd
from mlnext import pipeline
class TestColumnSelector(TestCase):
def setUp(self):
data = np.arange(8).reshape(-1, 2)
cols = ['a', 'b']
self.df = pd.DataFrame(data, columns=cols)
def test_select_columns(self):
t = pipeline.ColumnSelector(keys=['a'])
expected = self.df.loc[:, ['a']]
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
class TestColumnDropper(TestCase):
def setUp(self):
data = np.arange(8).reshape(-1, 2)
cols = ['a', 'b']
self.df = pd.DataFrame(data, columns=cols)
def test_drop_columns(self):
t = pipeline.ColumnDropper(columns=['b'])
expected = self.df.loc[:, ['a']]
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_drop_columns_verbose(self):
t = pipeline.ColumnDropper(columns=['b'], verbose=True)
expected = self.df.loc[:, ['a']]
result = t.transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_drop__missing_columns(self):
t = pipeline.ColumnDropper(columns=['c'])
with self.assertWarns(Warning):
t.transform(self.df)
class TestColumnRename(TestCase):
def test_rename_columns(self):
t = pipeline.ColumnRename(lambda x: x.split('.')[-1])
df = pd.DataFrame(columns=['a.b.c', 'd.e.f'])
expected = pd.DataFrame(columns=['c', 'f'])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestNaDropper(TestCase):
def test_drop_na(self):
t = pipeline.NaDropper()
df = pd.DataFrame([1, 0, pd.NA])
expected = pd.DataFrame([1, 0], dtype=object)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestClip(TestCase):
def test_clip(self):
t = pipeline.Clip(lower=0.5, upper=1.5)
df = pd.DataFrame([[0.1, 0.4, 0.6, 0.8, 1.2, 1.5]])
expected = pd.DataFrame([[0.5, 0.5, 0.6, 0.8, 1.2, 1.5]])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestDatetimeTransformer(TestCase):
# FIXME: fails in gitlab pipeline but succeeds locally
def test_datetime(self):
t = pipeline.DatetimeTransformer(columns=['time'])
df = pd.DataFrame([['2021-01-04 14:12:31']], columns=['time'])
expected = pd.DataFrame([[datetime.datetime(2021, 1, 4, 14, 12, 31)]],
columns=['time'])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_missing_cols(self):
t = pipeline.DatetimeTransformer(columns=['t'])
df = pd.DataFrame([['2021-01-04 14:12:31']], columns=['time'])
with self.assertRaises(ValueError):
t.fit_transform(df)
class TestNumericTransformer(TestCase):
# FIXME: fails in gitlab pipeline but succeeds locally
def test_numeric(self):
t = pipeline.NumericTransformer(columns=['1'])
df = pd.DataFrame([0, 1], columns=['1'], dtype=object)
expected = pd.DataFrame([0, 1], columns=['1'], dtype=np.int64)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_numeric_missing_column(self):
t = pipeline.NumericTransformer(columns=['2'])
df = pd.DataFrame([0, 1], columns=['1'], dtype=object)
with self.assertRaises(ValueError):
t.fit_transform(df)
def test_numeric_additional_column(self):
t = pipeline.NumericTransformer(columns=['2'])
df = | pd.DataFrame([[0, 1]], columns=['1', '2'], dtype=object) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
def test_pi_add_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + one
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize('five', [5, np.array(5, dtype=np.int64)])
def test_sub(self, five):
rng = period_range('2007-01', periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
class TestPeriodIndexSeriesMethods(object):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
tm.assert_index_equal(result, exp)
def test_pi_ops_errors(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
with tm.assert_raises_regex(TypeError, msg):
obj + ng
with pytest.raises(TypeError):
# error message differs between PY2 and 3
ng + obj
with tm.assert_raises_regex(TypeError, msg):
obj - ng
with pytest.raises(TypeError):
np.add(obj, ng)
if _np_version_under1p10:
assert np.add(ng, obj) is NotImplemented
else:
with pytest.raises(TypeError):
np.add(ng, obj)
with pytest.raises(TypeError):
np.subtract(obj, ng)
if _np_version_under1p10:
assert np.subtract(ng, obj) is NotImplemented
else:
with pytest.raises(TypeError):
np.subtract(ng, obj)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'NaT', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='2M', name='idx')
expected = PeriodIndex(['2011-07', '2011-08',
'NaT', '2011-10'], freq='2M', name='idx')
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
f = lambda x: x + pd.offsets.Day()
exp = PeriodIndex(['2011-01-02', '2011-02-02', '2011-03-02',
'2011-04-02'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x + pd.offsets.Day(2)
exp = PeriodIndex(['2011-01-03', '2011-02-03', '2011-03-03',
'2011-04-03'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x - pd.offsets.Day(2)
exp = PeriodIndex(['2010-12-30', '2011-01-30', '2011-02-27',
'2011-03-30'], freq='D', name='idx')
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
s = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
msg_idx = r"Input has different freq from PeriodIndex\(freq=D\)"
msg_s = r"Input cannot be converted to Period\(freq=D\)"
for obj, msg in [(idx, msg_idx), (s, msg_s)]:
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
obj + pd.offsets.Hour(2)
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
pd.offsets.Hour(2) + obj
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
obj - pd.offsets.Hour(2)
def test_pi_sub_period(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, -11, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(idx, pd.Period('2012-01', freq='M'))
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = pd.Index([12, 11, 10, 9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(pd.Period('2012-01', freq='M'), idx)
if _np_version_under1p10:
assert result is NotImplemented
else:
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - pd.Period('NaT', freq='M'), exp)
tm.assert_index_equal(pd.Period('NaT', freq='M') - idx, exp)
def test_pi_sub_pdnat(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
exp = pd.TimedeltaIndex([pd.NaT] * 4, name='idx')
tm.assert_index_equal(pd.NaT - idx, exp)
tm.assert_index_equal(idx - pd.NaT, exp)
def test_pi_sub_period_nat(self):
# GH 13071
idx = PeriodIndex(['2011-01', 'NaT', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, np.nan, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = pd.Index([12, np.nan, 10, 9], name='idx')
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - | pd.Period('NaT', freq='M') | pandas.Period |
import mysql.connector
import datetime
import pandas as pd
def is_empty(df):
if df.empty:
print('Empty table')
db_user = input('Enter database user : ')
db_password = input('Enter database password : ')
connection = mysql.connector.connect(host='localhost', user = db_user, password = db_password, db='twitter')
cursor = connection.cursor()
print("")
print("Welcome to my Twitter application")
print("In each page there will be guidlines")
print("Please choose the desired option and enter data")
print(',' * 50)
while True:
print("1-Log in")
print("2-Sign up")
option = int(input())
if option == 1:
arguments = []
print('Enter userName')
userName = input()
arguments.append(userName)
print('Enter password')
password = input()
arguments.append(password)
cursor.callproc('login', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchone()[0]
if 'invalid'not in result:
print(result)
connection.commit()
break
else:
print(result)
input()
connection.rollback()
elif option == 2:
arguments = []
print('Enter userName(20 character long):')
userName = input()
arguments.append(userName)
print('Enter first name(20 character long):')
firstname = input()
arguments.append(firstname)
print('Enter last name(20 character long):')
lastname = input()
arguments.append(lastname)
flag = False
print('Enter date of birth(YYYY-MM-DD):')
while not flag:
try:
birthdate = input()
birthdate = datetime.datetime.strptime(birthdate, '%Y-%m-%d')
flag = True
except:
print('Please enter date of birth in valid format:')
arguments.append(birthdate)
print('Enter bio(if you dont want to have bio, enter \"none\"):')
bio = input()
if bio == 'none':
bio = None
arguments.append(bio)
print('Enter password(<PASSWORD> character long ):')
password = input()
arguments.append(password)
cursor.callproc('create_account', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchone()[0]
if result != 'Sorry, this username is already taken.':
print(result)
connection.commit()
break
else:
print(result)
connection.rollback()
input()
else:
print('INVALID INPUT!')
print('')
print('Welcome to your twitter page')
while True:
print('0-Quit')
print('1-Send a new tweet')
print('2-Get personal tweets')
print('3-Get personal tweets and replies')
print('4-Follow')
print('5-Unfollow')
print('6-Block')
print('7-Unblock')
print('8-Get following activities')
print('9-Get a specific user activities')
print('10-Add a new comment')
print('11-Get comments of specific tweet')
print('12-Gets tweets consist of specific hashtag')
print('13-Like')
print('14-Get like numbers of specific tweet')
print('15-List of liking of specific tweet')
print('16-Popular tweets')
print('17-Send a text message in direct')
print('18-Send a tweet in direct')
print('19-Receive a list of messages received from the specific user')
print('20-Get a list of message senders')
print('21-get login records')
option = int(input())
if option == 0:
break
elif option == 1:
arguments = []
print('Enter your tweet content(Maximum 256 character long):')
tweet_content = input()
arguments.append(tweet_content)
cursor.callproc('send_tweet', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchone()[0]
print(result)
connection.commit()
input()
elif option == 2:
cursor.callproc('get_own_tweets')
result = ''
for i in cursor.stored_results():
result = i.fetchall()
df = pd.DataFrame(result)
is_empty(df)
print(df.to_markdown())
input()
elif option == 3:
cursor.callproc('get_own_tweets_and_replies')
result = ''
for i in cursor.stored_results():
result = i.fetchall()
df = pd.DataFrame(result)
is_empty(df)
print(df.to_markdown())
input()
elif option == 4:
arguments = []
print('Enter the username of the person you want to follow:')
username = input()
arguments.append(username)
cursor.callproc('follow', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchone()[0]
print(result)
connection.commit()
input()
elif option == 5:
arguments = []
print('Enter the username of the person you want to unfollow:')
username = input()
arguments.append(username)
cursor.callproc('stop_follow', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchone()[0]
print(result)
connection.commit()
input()
elif option == 6:
arguments = []
print('Enter the username of the person you want to block:')
username = input()
arguments.append(username)
cursor.callproc('block', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchone()[0]
print(result)
connection.commit()
input()
elif option == 7:
arguments = []
print('Enter the username of the person you want to unblock:')
username = input()
arguments.append(username)
cursor.callproc('stop_block', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchone()[0]
print(result)
connection.commit()
input()
elif option == 8:
cursor.callproc('get_following_activity')
result = ''
for i in cursor.stored_results():
result = i.fetchall()
df = pd.DataFrame(result)
is_empty(df)
print(df.to_markdown())
input()
elif option == 9:
arguments = []
print('Enter the username of the person whose activities you want to see:')
username = input()
arguments.append(username)
cursor.callproc('get_user_activity', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchall()
df = pd.DataFrame(result)
is_empty(df)
print(df.to_markdown())
input()
elif option == 10:
arguments = []
print('Enter the tweet ID you want to comment on:')
tweet_id = int(input())
arguments.append(tweet_id)
print('Enter your comment content:')
comment_content = input()
arguments.append(comment_content)
cursor.callproc('comment', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchone()[0]
print(result)
connection.commit()
input()
elif option == 11:
arguments = []
print('Enter the tweet ID you want to see it\'s comments:')
tweet_id = int(input())
arguments.append(tweet_id)
cursor.callproc('get_comments_of_tweet', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchall()
df = pd.DataFrame(result)
is_empty(df)
print(df.to_markdown())
input()
elif option == 12:
arguments = []
print('Enter the hashtag you want to see its tweets')
hashtag = input()
arguments.append(hashtag)
cursor.callproc('hashtag_tweets', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchall()
df = pd.DataFrame(result)
is_empty(df)
print(df.to_markdown())
input()
elif option == 13:
arguments = []
print('Enter the tweet ID you want to like it:')
tweet_id = int(input())
arguments.append(tweet_id)
cursor.callproc('liking', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchone()[0]
print(result)
connection.commit()
input()
elif option == 14:
arguments = []
print('Enter the tweet ID you want to see it\'s number of likes:')
tweet_id = int(input())
arguments.append(tweet_id)
cursor.callproc('number_of_likes', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchall()
df = pd.DataFrame(result)
is_empty(df)
print(df.to_markdown())
input()
elif option == 15:
arguments = []
print('Enter the tweet ID you want to see it\'s List of likings :')
tweet_id = int(input())
arguments.append(tweet_id)
cursor.callproc('list_of_liking', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchall()
df = pd.DataFrame(result)
is_empty(df)
print(df.to_markdown())
input()
elif option == 16:
cursor.callproc('get_popular_tweets')
result = ''
for i in cursor.stored_results():
result = i.fetchall()
df = pd.DataFrame(result)
is_empty(df)
print(df.to_markdown())
input()
elif option == 17:
arguments = []
print('Enter the username to which you want to send a text message:')
username = input()
arguments.append(username)
print('Enter your text message:')
message = input()
arguments.append(message)
cursor.callproc('direct_text_message', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchone()[0]
print(result)
connection.commit()
input()
elif option == 18:
arguments = []
print('Enter the username to which you want to send a tweet:')
username = input()
arguments.append(username)
print('Enter the tweet ID you want to send it:')
tweet_id = int(input())
arguments.append(tweet_id)
cursor.callproc('direct_tweet_message', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchone()[0]
print(result)
connection.commit()
input()
elif option == 19:
arguments = []
print('Enter the username whose messages you want to view:')
username = input()
arguments.append(username)
cursor.callproc('get_a_user_messages', arguments)
result = ''
for i in cursor.stored_results():
result = i.fetchall()
df = pd.DataFrame(result)
is_empty(df)
print(df.to_markdown())
input()
elif option == 20:
cursor.callproc('list_of_message_sender')
result = ''
for i in cursor.stored_results():
result = i.fetchall()
df = | pd.DataFrame(result) | pandas.DataFrame |
import os, csv
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.model_selection import train_test_split
from scipy import signal
class ProcessSignalData(object):
def __init__(self):
# path to video data from signal_output.py
self.dir = './processed_new/videos'
self.full_path = ''
self.dataframe = pd.DataFrame()
self.real_data = pd.DataFrame()
self.fake_data = pd.DataFrame()
self.dataset = pd.DataFrame()
self.real_data_mean = {}
self.fake_data_mean = {}
self.real_data_var = {}
self.fake_data_var = {}
self.real_data_std = {}
self.fake_data_std = {}
self.real_data_psd = {}
self.fake_data_psd = {}
self.real_data_csd = {}
self.fake_data_csd = {}
self.real_data_f1 = {}
self.fake_data_f1 = {}
self.real_data_test = {}
self.fake_data_test = {}
self.real_data_RCCE = {}
self.real_data_LCCE = {}
self.real_data_LCRC = {}
self.fake_data_RCCE = {}
self.fake_data_LCCE = {}
self.fake_data_LCRC = {}
self.real_count = 0
self.fake_count = 0
self.vid_count = 0
self.data_path_lcce = './lcce250.csv'
self.data_path_lcrc = './lcrc250.csv'
self.data_path_rcce = './rcce250.csv'
self.data_path_m = './mean_data16.csv'
self.data_path_v = './new_chrom/var_data16.csv'
self.data_path_s = './new_chrom/std_data16.csv'
self.data_path_p = './new_chrom/psd_data16.csv'
self.data_path_c = './new_chrom/csd_data_128.csv'
self.data_path_c = './f1_data_128.csv'
self.log_path = './process_log.csv'
self.test_data_lcce_path = './new_chrom/test_lcce.csv'
self.test_data_lcrc_path = './new_chrom/test_lcrc.csv'
self.test_data_rcce_path = './new_chrom/test_rcce.csv'
self.train_data_lcce_path = './new_chrom/train_lcce.csv'
self.train_data_lcrc_path = './new_chrom/train_lcrc.csv'
self.train_data_rcce_path = './new_chrom/train_rcce.csv'
self.test_data_v_path = './new_chrom/train_data_v32c.csv'
self.train_data_v_path = './new_chrom/test_data_v32c.csv'
self.test_data_m_path = './new_chrom/train_data_m32c.csv'
self.train_data_m_path = './new_chrom/test_data_m32c.csv'
self.test_data_s_path = './new_chrom/train_data_s32c.csv'
self.train_data_s_path = './new_chrom/test_data_s32c.csv'
self.test_data_p_path = './new_chrom/train_data_p128c.csv'
self.train_data_p_path = './new_chrom/test_data_p128c.csv'
self.test_data_c_path = './train_data_c128c.csv'
self.train_data_c_path = './test_data_c128c.csv'
self.test_data_f1_path = './train_data_f1-128c.csv'
self.train_data_f1_path = './test_data_f1-128c.csv'
self.test_data_test_path = './train_data_test.csv'
self.train_data_test_path = './test_data_test.csv'
self.main()
def new_chrom(self, red, green, blue):
# calculation of new X and Y
Xcomp = 3 * red - 2 * green
Ycomp = (1.5 * red) + green - (1.5 * blue)
# standard deviations
sX = np.std(Xcomp)
sY = np.std(Ycomp)
alpha = sX / sY
# -- rPPG signal
bvp = Xcomp - alpha * Ycomp
return bvp
def main(self):
# length of video in frames to process
sample_length = 250
# interval for mean, var, std
group_size = 32
#window for psd
psd_size = 128
for paths, subdir, files in os.walk(self.dir):
for file in files:
if file.endswith('.csv'):
self.full_path = os.path.join(paths, file)
if 'rejected' in self.full_path.lower() or '.txt' in self.full_path.lower() or 'imposter' in self.full_path.lower():
pass
else:
print(self.full_path)
self.dataset = pd.read_csv(self.full_path)
right_R = self.dataset['RC-R'].iloc[:sample_length]
left_R = self.dataset['LC-R'].iloc[:sample_length]
chin_R = self.dataset['C-R'].iloc[:sample_length]
forehead_R = self.dataset['F-R'].iloc[:sample_length]
outerR_R = self.dataset['OR-R'].iloc[:sample_length]
outerL_R = self.dataset['OL-R'].iloc[:sample_length]
center_R = self.dataset['CE-R'].iloc[:sample_length]
right_G = self.dataset['RC-G'].iloc[:sample_length]
left_G = self.dataset['LC-G'].iloc[:sample_length]
chin_G = self.dataset['C-G'].iloc[:sample_length]
forehead_G = self.dataset['F-G'].iloc[:sample_length]
outerR_G = self.dataset['OR-G'].iloc[:sample_length]
outerL_G = self.dataset['OL-G'].iloc[:sample_length]
center_G = self.dataset['CE-G'].iloc[:sample_length]
right_B = self.dataset['RC-B'].iloc[:sample_length]
left_B = self.dataset['LC-B'].iloc[:sample_length]
chin_B = self.dataset['C-B'].iloc[:sample_length]
forehead_B = self.dataset['F-B'].iloc[:sample_length]
outerR_B = self.dataset['OR-B'].iloc[:sample_length]
outerL_B = self.dataset['OL-B'].iloc[:sample_length]
center_B = self.dataset['CE-B'].iloc[:sample_length]
right_C = self.dataset['RC-chrom'].iloc[:sample_length]
left_C = self.dataset['LC-Chrom'].iloc[:sample_length]
chin_C = self.dataset['C-chrom'].iloc[:sample_length]
forehead_C = self.dataset['F-chrom'].iloc[:sample_length]
outerR_C = self.dataset['OR-chrom'].iloc[:sample_length]
outerL_C = self.dataset['OL-chrom'].iloc[:sample_length]
center_C = self.dataset['CE-chrom'].iloc[:sample_length]
chrom_R = right_C
chrom_L = left_C
chrom_CE = center_C
chrom_OL = outerL_C
chrom_OR = outerR_C
#chrom_R = self.new_chrom(right_R, right_G, right_B)
#chrom_L = self.new_chrom(left_R, left_G, left_B)
chrom_C = self.new_chrom(chin_R, chin_G, chin_B)
chrom_F = self.new_chrom(forehead_R, forehead_G, forehead_B)
#chrom_OR = self.new_chrom(outerR_R, outerR_G, outerR_B)
#chrom_OL = self.new_chrom(outerL_R, outerL_G, outerL_B)
#chrom_CE = self.new_chrom(center_R, center_G, center_B)
difg_LCRC = (self.dataset['RC-G'].iloc[:sample_length] - self.dataset['LC-G'].iloc[:sample_length]).abs()
difc_LCRC = (self.dataset['RC-chrom'].iloc[:sample_length] - self.dataset['LC-Chrom'].iloc[:sample_length]).abs()
difg_o1 = (self.dataset['C-G'].iloc[:sample_length] - self.dataset['F-G'].iloc[:sample_length]).abs()
difc_o1 = (self.dataset['C-chrom'].iloc[:sample_length] - self.dataset['F-chrom'].iloc[:sample_length]).abs()
difg_o2 = (self.dataset['OR-G'].iloc[:sample_length] - self.dataset['OL-G'].iloc[:sample_length]).abs()
difc_o2 = (self.dataset['OR-chrom'].iloc[:sample_length] - self.dataset['OL-chrom'].iloc[:sample_length]).abs()
difc_LCCe = (self.dataset['LC-Chrom'].iloc[:sample_length] - self.dataset['CE-chrom'].iloc[
:sample_length]).abs()
difc_RCCe = (self.dataset['RC-chrom'].iloc[:sample_length] - self.dataset['CE-chrom'].iloc[
:sample_length]).abs()
difc_LCRC = (chrom_R.iloc[:sample_length] - chrom_L.iloc[:sample_length]).abs()
difc_LCCe = (chrom_L.iloc[:sample_length] - chrom_CE.iloc[:sample_length]).abs()
difc_RCCe = (chrom_R.iloc[:sample_length] - chrom_CE.iloc[:sample_length]).abs()
difc_LCOL = (chrom_L.iloc[:sample_length] - chrom_OL.iloc[:sample_length]).abs()
difc_RCOR = (chrom_R.iloc[:sample_length] - chrom_OR.iloc[:sample_length]).abs()
difg_LCOL = (self.dataset['LC-G'].iloc[:sample_length] - self.dataset['OL-G'].iloc[:sample_length]).abs()
difg_RCOR = (self.dataset['RC-G'].iloc[:sample_length] - self.dataset['OR-G'].iloc[:sample_length]).abs()
# green channel features
# right cheek - left cheek
difg_LCRC_lst = [difg_LCRC.iloc[i:i + group_size] for i in
range(0, len(difg_LCRC) - group_size + 1, group_size)]
# forehead - chin
difg_o1_lst = [difg_o1.iloc[i:i + group_size] for i in
range(0, len(difg_o1) - group_size + 1, group_size)]
# outer right - outer left
difg_o2_lst = [difg_o2.iloc[i:i + group_size] for i in
range(0, len(difg_o2) - group_size + 1, group_size)]
# chrominance features
# right cheek - left cheek
difc_LCRC_lst = [difc_LCRC.iloc[i:i + group_size] for i in
range(0, len(difc_LCRC) - group_size + 1, group_size)]
# forehead - chin
difc_o1_lst = [difc_o1.iloc[i:i + group_size] for i in
range(0, len(difc_o1) - group_size + 1, group_size)]
# outer right - outer left
difc_o2_lst = [difc_o2.iloc[i:i + group_size] for i in
range(0, len(difc_o2) - group_size + 1, group_size)]
# mean
difg_LCRC_mean = np.array([difg_LCRC_lst[i].mean() for i in range(len(difg_LCRC_lst))])
difc_LCRC_mean = np.array([difc_LCRC_lst[i].mean() for i in range(len(difc_LCRC_lst))])
print("MEAN")
print(difc_LCRC_mean)
difg_o1_mean = np.array([difg_o1_lst[i].mean() for i in range(len(difg_o1_lst))])
difc_o1_mean = np.array([difc_o1_lst[i].mean() for i in range(len(difc_o1_lst))])
difg_o2_mean = np.array([difg_o2_lst[i].mean() for i in range(len(difg_o2_lst))])
difc_o2_mean = np.array([difc_o2_lst[i].mean() for i in range(len(difc_o2_lst))])
# variance
difg_LCRC_var = np.array([difg_LCRC_lst[i].var() for i in range(len(difg_LCRC_lst))])
difc_LCRC_var = np.array([difc_LCRC_lst[i].var() for i in range(len(difc_LCRC_lst))])
print("VAR")
print(difc_LCRC_var)
difg_o1_var = np.array([difg_o1_lst[i].var() for i in range(len(difg_o1_lst))])
difc_o1_var = np.array([difc_o1_lst[i].var() for i in range(len(difc_o1_lst))])
difg_o2_var = np.array([difg_o2_lst[i].var() for i in range(len(difg_o2_lst))])
difc_o2_var = np.array([difc_o2_lst[i].var() for i in range(len(difc_o2_lst))])
# standard deviation
difg_LCRC_std = np.array([difg_LCRC_lst[i].std() for i in range(len(difg_LCRC_lst))])
difc_LCRC_std = np.array([difc_LCRC_lst[i].std() for i in range(len(difc_LCRC_lst))])
print("STD")
print(difc_LCRC_std)
difg_o1_std = np.array([difg_o1_lst[i].std() for i in range(len(difg_o1_lst))])
difc_o1_std = np.array([difc_o1_lst[i].std() for i in range(len(difc_o1_lst))])
difg_o2_std = np.array([difg_o2_lst[i].std() for i in range(len(difg_o2_lst))])
difc_o2_std = np.array([difc_o2_lst[i].std() for i in range(len(difc_o2_lst))])
# power spectral density
f, difg_LCRC_psd = signal.welch(difg_LCRC, nperseg=psd_size)
f, difc_LCCe_psd = signal.welch(difc_LCCe, nperseg=psd_size)
f, difc_RCCe_psd = signal.welch(difc_RCCe, nperseg=psd_size)
f, difc_LCRC_psd = signal.welch(difc_LCRC, nperseg=psd_size)
print("PSD")
print(difc_LCRC_psd)
f, difg_o1_psd = signal.welch(difg_o1, nperseg=psd_size)
f, difc_o1_psd = signal.welch(difc_o1, nperseg=psd_size)
f, difg_o2_psd = signal.welch(difg_o2, nperseg=psd_size)
f, difc_o2_psd = signal.welch(difc_o2, nperseg=psd_size)
# cross power spectral density
left_C.fillna(0, inplace=True)
center_C.fillna(0, inplace=True)
right_C.fillna(0, inplace=True)
outerL_C.fillna(0, inplace=True)
outerR_C.fillna(0, inplace=True)
f, difc_LCCe_v_csd = signal.csd(left_C, center_C, nperseg=128)
f, difc_LCRC_v_csd = signal.csd(left_C, right_C, nperseg=128)
f, difc_RCCe_v_csd = signal.csd(right_C, center_C, nperseg=128)
f, difc_LCOL_v_csd = signal.csd(left_C, outerL_C, nperseg=128)
f, difc_RCOR_v_csd =signal.csd(right_C, outerR_C, nperseg=128)
difc_LCCe_csd_0 = []
difc_LCRC_csd_0 = []
difc_RCCe_csd_0 = []
difc_LCOL_csd_0 = []
difc_RCOR_csd_0 = []
difc_LCCe_csd_1 = []
difc_LCRC_csd_1 = []
difc_RCCe_csd_1 = []
difc_LCOL_csd_1 = []
difc_RCOR_csd_1 = []
for i in range(len(difc_LCCe_v_csd)):
difc_LCCe_csd_0.append(difc_LCCe_v_csd[i].real)
difc_LCCe_csd_1.append(difc_LCCe_v_csd[i].imag)
for i in range(len(difc_LCRC_v_csd)):
difc_LCRC_csd_0.append(difc_LCRC_v_csd[i].real)
difc_LCRC_csd_1.append(difc_LCRC_v_csd[i].imag)
for i in range(len(difc_RCCe_v_csd)):
difc_RCCe_csd_0.append(difc_RCCe_v_csd[i].real)
difc_RCCe_csd_1.append(difc_RCCe_v_csd[i].imag)
for i in range(len(difc_LCOL_v_csd)):
difc_LCOL_csd_0.append(difc_LCOL_v_csd[i].real)
difc_LCOL_csd_1.append(difc_LCOL_v_csd[i].imag)
for i in range(len(difc_RCOR_v_csd)):
difc_RCOR_csd_0.append(difc_RCOR_v_csd[i].real)
difc_RCOR_csd_1.append(difc_RCOR_v_csd[i].imag)
csd2_LCCe = []
csd2_LCRC = []
csd2_RCCe = []
for i in range(len(difc_RCCe_csd_0)):
csd2_LCCe.append((difc_LCCe_csd_0[i], difc_LCCe_csd_1[i]))
csd2_LCRC.append((difc_LCRC_csd_0[i], difc_LCRC_csd_1[i]))
csd2_RCCe.append((difc_RCCe_csd_0[i], difc_RCCe_csd_1[i]))
# f1 feature
t = np.abs(difc_LCCe_v_csd)
j = np.argmax(t)
max_cLCCe = (difc_LCCe_csd_0[j], difc_LCCe_csd_1[j])
mean_cLCCe = [np.mean(np.asarray(difc_LCCe_csd_0)), np.mean(np.asarray(difc_LCCe_csd_1))]
f1LCCe = np.array([max_cLCCe[0], max_cLCCe[1], mean_cLCCe[0], mean_cLCCe[1]])
t = np.abs(difc_LCRC_v_csd)
j = np.argmax(t)
max_cLCRC = (difc_LCRC_csd_0[j], difc_LCRC_csd_1[j])
mean_cLCRC = [np.mean(np.asarray(difc_LCRC_csd_0)), np.mean(np.asarray(difc_LCRC_csd_1))]
f1LCRC = np.array([max_cLCRC[0], max_cLCRC[1], mean_cLCRC[0], mean_cLCRC[1]])
t = np.abs(difc_RCCe_v_csd)
j = np.argmax(t)
max_cRCCe = (difc_RCCe_csd_0[j], difc_RCCe_csd_1[j])
mean_cRCCe = [np.mean(np.asarray(difc_RCCe_csd_0)), np.mean(np.asarray(difc_RCCe_csd_1))]
f1RCCe = np.array([max_cRCCe[0], max_cRCCe[1], mean_cRCCe[0], mean_cRCCe[1]])
t = np.abs(difc_LCOL_v_csd)
j = np.argmax(t)
max_cLCOL = (difc_LCOL_csd_0[j], difc_LCOL_csd_1[j])
mean_cLCOL = [np.mean(np.asarray(difc_LCOL_csd_0)), np.mean(np.asarray(difc_LCOL_csd_1))]
f1LCOL = np.array([max_cLCOL[0], max_cLCOL[1], mean_cLCOL[0], mean_cLCOL[1]])
t = np.abs(difc_RCOR_v_csd)
j = np.argmax(t)
max_cRCOR = (difc_RCOR_csd_0[j], difc_RCOR_csd_1[j])
mean_cRCOR = [np.mean(np.asarray(difc_RCOR_csd_0)), np.mean(np.asarray(difc_RCOR_csd_1))]
f1RCOR = np.array([max_cRCOR[0], max_cRCOR[1], mean_cRCOR[0], mean_cRCOR[1]])
derived_data_mean = np.concatenate([difg_LCRC_mean, difc_LCRC_mean, difg_o1_mean, difc_o1_mean,
difg_o2_mean, difc_o2_mean])
derived_data_var = np.concatenate([difg_LCRC_var, difc_LCRC_var, difg_o1_var, difc_o1_var,
difg_o2_var, difc_o2_var])
derived_data_std = np.concatenate([difg_LCRC_std, difc_LCRC_std, difg_o1_std, difc_o1_std,
difg_o2_std, difc_o2_std])
derived_data_psd = np.concatenate([difc_LCCe_psd, difc_LCRC_psd, difc_RCCe_psd])
derived_data_csd = np.concatenate([difc_LCCe_csd_0, difc_LCCe_csd_1, difc_LCRC_csd_0, difc_LCRC_csd_1, difc_RCCe_csd_0, difc_RCCe_csd_1])
derived_data_rcsd = np.concatenate([difc_LCCe_csd_0, difc_LCRC_csd_0, difc_RCCe_csd_0])
derived_data_f1 = np.concatenate([f1LCCe, f1LCRC, f1RCCe])
derived_data_test = np.concatenate([f1LCCe, f1LCRC, f1RCCe, f1LCOL, f1RCOR, difc_LCRC_std, difc_LCRC_var, difc_LCRC_psd, difc_LCRC_mean])
chrom_data = self.dataset['RC-chrom'].iloc[50] - self.dataset['C-chrom'].iloc[50]
if 'fake' in self.full_path.lower():
self.fake_data_LCCE[self.fake_count] = difc_LCCe
self.fake_data_LCRC[self.fake_count] = difc_LCRC
self.fake_data_RCCE[self.fake_count] = difc_RCCe
self.fake_data_mean[self.fake_count] = derived_data_mean
self.fake_data_var[self.fake_count] = derived_data_var
self.fake_data_std[self.fake_count] = derived_data_std
self.fake_data_psd[self.fake_count] = derived_data_psd
self.fake_data_csd[self.fake_count] = derived_data_csd
self.fake_data_f1[self.fake_count] = derived_data_f1
self.fake_data_test[self.fake_count] = derived_data_test
self.fake_count += 1
else:
self.real_data_LCCE[self.real_count] = difc_LCCe
self.real_data_LCRC[self.real_count] = difc_LCRC
self.real_data_RCCE[self.real_count] = difc_RCCe
self.real_data_mean[self.real_count] = derived_data_mean
self.real_data_var[self.real_count] = derived_data_var
self.real_data_std[self.real_count] = derived_data_std
self.real_data_psd[self.real_count] = derived_data_psd
self.real_data_csd[self.real_count] = derived_data_csd
self.real_data_f1[self.real_count] = derived_data_f1
self.real_data_test[self.real_count] = derived_data_test
self.real_count += 1
self.vid_count += 1
self.real_df_LCCE = pd.DataFrame(self.real_data_LCCE)
self.real_df_LCRC = pd.DataFrame(self.real_data_LCRC)
self.real_df_RCCE = pd.DataFrame(self.real_data_RCCE)
self.fake_df_LCCE = pd.DataFrame(self.fake_data_LCCE)
self.fake_df_LCRC = pd.DataFrame(self.fake_data_LCRC)
self.fake_df_RCCE = pd.DataFrame(self.fake_data_RCCE)
self.real_df_m = pd.DataFrame(self.real_data_mean)
self.fake_df_m = pd.DataFrame(self.fake_data_mean)
self.real_df_v = pd.DataFrame(self.real_data_var)
self.fake_df_v = pd.DataFrame(self.fake_data_var)
self.real_df_s = pd.DataFrame(self.real_data_std)
self.fake_df_s = | pd.DataFrame(self.fake_data_std) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Module for model evaluation
"""
# Built-in
from copy import deepcopy
from typing import Any, Iterable, List, Tuple
# Other
from joblib import delayed, Parallel
import numpy as np
import pandas as pd
def rmse(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculates root mean squared error RMSE
Args:
y_true (np.ndarray): Actual values Y
y_pred (np.ndarray): Predicted values Y
Returns:
[float]: rmse
"""
error = y_true - y_pred
return np.sqrt((error ** 2).mean())
def smape(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculates symmetric mean absolute percentage error SMAPE
Args:
y_true (np.ndarray): Actual values Y
y_pred (np.ndarray): Predicted values Y
Returns:
[float]: smape
"""
error = np.abs(y_true - y_pred) / (np.abs(y_true) + np.abs(y_pred))
return 100 * np.mean(error)
def time_series_cross_val_scores(
model: Any,
X: pd.DataFrame,
Y: pd.DataFrame,
responses: List[str],
k: int = 10,
h_list: List[int] = [1, 3, 5, 7, 14, 21],
min_train_size: int = 100,
n_jobs: int = 1,
) -> dict:
"""
Calculate cross validation scores for time series by sampling k split points without replacement to split into train/test sets. For each response specified and
it will calculate the 1 step cross val forecast error, 5 step cross val forecast error, etc... based on h_list values.
Args:
model (Any): Model class with fit and forecast methods. Forecasts should include columns with name response_pred.
X (pd.DataFrame): Dataframe of predictors
Y (pd.DataFrame): Dataframe of responses
responses (List[str]): List of responses which should be included as column in Y and as columns of model.forecast(X) as response_pred
k (int, optional): Number of cross validation splits to perform. Defaults to 10.
h_list (List[str], optional): List of h step forecasts to calculate cross validation error. Defaults to [1, 3, 5, 7, 14, 21].
min_train_size (int, optional): Minimum size of a training set. Defaults to 50.
n_jobs (int, optional): Number of jobs to run in parallel, -1 for all processors. Defaults to 1.
Returns:
dict: Dictionary of with keys 'all' for all cross validation results and 'summarised' for the summarised rmse results per h and response.
"""
n = X.shape[0]
max_h = max(h_list)
end = n - max_h
# Sample split points without replacement from the bounds (min_train_size, total points - maximum h forecast)
split_values = np.arange(start=min_train_size, stop=end + 1)
split_points = np.random.choice(split_values, size=k, replace=False)
# Run model for each split point and get scores
parallel = Parallel(n_jobs=n_jobs)
results = parallel(
delayed(fit_and_score)(
model,
X,
Y,
split_point=split_point,
responses=responses,
h_list=h_list,
)
for split_point in split_points
)
cv_scores_all = pd.concat(results)
# Aggregate to get rmse and smape grouped by h and response
cv_scores_groupby = cv_scores_all.groupby(by=["h", "response"])
rmse_scores = (
cv_scores_groupby.apply(lambda x: rmse(x["actual"], x["forecast"]))
.to_frame("rmse")
.reset_index()
)
smape_scores = (
cv_scores_groupby.apply(lambda x: smape(x["actual"], x["forecast"]))
.to_frame("smape")
.reset_index()
)
cv_scores_summarised = pd.merge(rmse_scores, smape_scores)
cv_scores = {"all": cv_scores_all, "summarised": cv_scores_summarised}
return cv_scores
def fit_and_score(
model: Any,
X: pd.DataFrame,
Y: pd.DataFrame,
split_point: int,
responses: List[str],
h_list: List[int] = [1, 3, 5, 7, 14, 21],
) -> pd.DataFrame:
"""
Split data into train/test sets using the split_point and then fits the model on the training and calculates scores on the test set.
Args:
model (Any): Model class with fit and forecast methods. Forecasts should include columns with name response_pred.
X (pd.DataFrame): Dataframe of predictors
Y (pd.DataFrame): Dataframe of responses
split_point (int): Split point to separate train and test sets
responses (List[str]): List of responses which should be included as column in Y and as columns of model.forecast(X) as response_pred
h_list (List[str], optional): List of h step forecasts to calculate cross validation error. Defaults to [1, 3, 5, 7, 14, 21].
Returns:
[pd.DataFrame]: Dataframe containing the result scores for the model
"""
max_h = max(h_list)
model = deepcopy(model)
# Split into train/test sets
X_train, Y_train = X.iloc[:split_point], Y.iloc[:split_point]
Y_test = Y.iloc[split_point : (split_point + max_h)]
model.fit(X_train, Y_train)
# Get forecasts for last max_h values
forecasts = model.forecast(h=max_h)
forecasts = forecasts.iloc[-max_h:]
# Get result for each h forecast and response
cv_scores = []
for h in h_list:
for response in responses:
forecast = forecasts.iloc[h - 1][response + "_pred"]
actual = Y_test.iloc[h - 1][response]
error = forecast - actual
cv_scores.append(
{
"split_point": split_point,
"h": h,
"response": response,
"forecast": forecast,
"actual": actual,
"error": error,
}
)
cv_scores = | pd.DataFrame(cv_scores) | pandas.DataFrame |
############################### LightBGM Voting #######################################
import numpy as np
import pandas as pd
import logging
#from sklearn.externals import joblib
import joblib
np.warnings.filterwarnings('ignore')
from sklearn.model_selection import KFold
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve,auc
from sklearn.preprocessing import OneHotEncoder
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p"
logging.basicConfig(filename='train.log', level=logging.INFO, format=LOG_FORMAT, datefmt=DATE_FORMAT)
# load data set
train_df = pd.read_csv('./train_set.csv', sep='\t')
test_df = | pd.read_csv('./test_a.csv', sep='\t', nrows=None) | pandas.read_csv |
# -*- coding: utf-8 -*-
import chainer
# v0.7.0 で動作確認
import chainerrl
from chainerrl import replay_buffer
from chainerrl import experiments
from chainerrl import links
from chainerrl import explorers
from chainerrl.q_functions import DistributionalDuelingDQN
import gym
import random
import cv2
import datetime as dt
import plotly
import plotly.graph_objs as go
import plotly.io as pio
import pandas as pd
import numpy as np
import sqlite3 as lite
import pandas.io.sql as psql
from sklearn.model_selection import train_test_split
import logging
import sys
import os
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='')
# from chainerrl_visualizer import launch_visualizer
plotly.io.orca.ensure_server()
# gym の atri の画像サイズ
RAW_IMAGE_HEIGHT = 210
RAW_IMAGE_WIDTH = 160
# ChainerRL標準のサイズ
OBSERVATION_IMAGE_HEIGHT = 84
OBSERVATION_IMAGE_WIDTH = 84
MAX_TIME = 45
IMAGE_TIME_STEPS = 45
TEST_STEP = 500
STEPS = 30000
PER_FRAME = 4 # gymの仕様の4フレームを環境にする
GPU_DEVICE = -1 # 利用しない場合は -1
WRITE_OBSERVATION_IMAGE = True
WRITE_RAW_IMAGE = True
cols = ['date', 'open', 'high', 'low', 'close', 'volume']
trade_cols = ['buy_sell', 'open_price',
'close_price', 'reward', 'holding_count']
episode_cols = ['time', 'train_profit',
'train_win_rate', 'test_profit', 'test_win_rate']
class Position:
def __init__(self, buy_sell, open_price):
# 買いか売りか
self.buy_sell = buy_sell
# 取得価格
self.open_price = open_price
# 目標価格
self.target_price = self._target_price()
# ロスカット価格
self.loss_cut_price = self._loss_cut_price()
# 決済価格
self.close_price = None
self.reward = None
self.holding_times = 0
def cal_reward(self, close_price):
self.close_price = close_price
if (self.buy_sell == 'Buy'):
diff = self.close_price - self.target_price
if (self.buy_sell == 'Sell'):
diff = self.target_price - self.close_price
if (diff > 0):
self.reward = 1
else:
self.reward = -1
def count_up(self):
self.holding_times += 1
def to_pd(self):
return pd.DataFrame(
[[
self.buy_sell,
self.open_price,
self.close_price,
self.reward,
self.holding_times
]],
columns=trade_cols)
def _target_price(self):
# BitMEXのスプレッドが0.5ドルで固定
# 手数料はTaker手数料 0.075% Maker手数料 0.05%
# 目標は 0.1%
if (self.buy_sell == 'Buy'):
return self.open_price * 1.001
if (self.buy_sell == 'Sell'):
return self.open_price * 0.999
def _loss_cut_price(self):
# 0.2% 分予想から外れたらロスカット
if (self.buy_sell == 'Buy'):
return self.open_price * 0.998
if (self.buy_sell == 'Sell'):
return self.open_price * 1.002
def is_over_loss(self, price):
if (self.buy_sell == 'Buy'):
return self.loss_cut_price > price
if (self.buy_sell == 'Sell'):
return self.loss_cut_price < price
class Trade(gym.core.Env):
def __init__(self, df, test=False):
self.test = test
self.df = df.reset_index(drop=True)
self.df_row = len(self.df)
self.position = None
self.start_time = self._start_time()
self.time = 0
self.before_action = None
self.consecutive_times = 0
# 0: buy, 1: sell, 2: close, 3: wait
self.action_space = gym.spaces.Discrete(4)
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(OBSERVATION_IMAGE_HEIGHT, OBSERVATION_IMAGE_WIDTH),
dtype=np.uint8)
if (self.test):
# テスト時には取引を記録してCSVで出力する
self.trades_df = pd.DataFrame([], columns=trade_cols)
def step(self, action):
reward = 0
done = False
current_price = self.current_df().iloc[-1]['close']
if (action == self.before_action):
self.consecutive_times += 1
else:
self.consecutive_times = 0
if (self.position is None):
if (action == 0):
self.position = Position('Buy', current_price)
if (action == 1):
self.position = Position('Sell', current_price)
if (self.position is not None):
self.position.count_up()
if (action == 2):
self.position.cal_reward(current_price)
reward += self.position.reward
if (self.test):
self.trades_df = self.trades_df.append(
self.position.to_pd(), ignore_index=True
)
else:
done = True
self.position = None
if (reward > 0):
print('win:', self.time)
else:
print('lose:', self.time)
else:
if (self.position.is_over_loss(current_price)):
reward += -2
if (not self.test):
done = True
self.position = None
print('loss cut:', self.time)
observation = self._observation()
info = {}
self.time += 1
if (self.test):
if (self.time == TEST_STEP):
done = True
self.trades_df.to_csv(
'csv/trades' + dt.datetime.now().strftime("%Y%m%d%H%M%S") + '.csv')
self.trades_df = | pd.DataFrame([], columns=trade_cols) | pandas.DataFrame |
from functools import reduce
import re
import numpy as np
import pandas as pd
from avaml import _NONE
from avaml.aggregatedata.__init__ import DatasetMissingLabel
from avaml.score.overlap import calc_overlap
__author__ = 'arwi'
VECTOR_WETNESS_LOOSE = {
_NONE: (0, 0),
"new-loose": (0, 1),
"wet-loose": (1, 1),
"new-slab": (0, 0.4),
"drift-slab": (0, 0.2),
"pwl-slab": (0, 0),
"wet-slab": (1, 0),
"glide": (0.8, 0),
}
VECTOR_FREQ = {
"dsize": {
_NONE: 0,
'0': 0,
'1': 0.2,
'2': 0.4,
'3': 0.6,
'4': 0.8,
'5': 1,
},
"dist": {
_NONE: 0,
'0': 0,
'1': 0.25,
'2': 0.5,
'3': 0.75,
'4': 1,
},
"trig": {
_NONE: 0,
'0': 0,
'10': 1 / 3,
'21': 2 / 3,
'22': 1,
},
"prob": {
_NONE: 0,
'0': 0,
'2': 1 / 3,
'3': 2 / 3,
'5': 1,
},
}
class Score:
def __init__(self, labeled_data):
def to_vec(df):
level_2 = ["wet", "loose", "freq", "lev_max", "lev_min", "lev_fill", "aspect"]
columns = | pd.MultiIndex.from_product([["global"], ["danger_level", "emergency_warning"]]) | pandas.MultiIndex.from_product |
# -*- coding:utf-8 -*-
import pandas as pd
import math
import csv
import random
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
base_elo = 1600
team_elos = {}
team_stats = {}
X = []
y = []
folder = 'data'
# calculate elo values for each team
def calc_elo(win_team, lose_team):
winner_rank = get_elo(win_team)
loser_rank = get_elo(lose_team)
rank_diff = winner_rank - loser_rank
exp = (rank_diff * -1) / 400
odds = 1 / (1 + math.pow(10, exp))
# modify K value according to rank level
if winner_rank < 2100:
k = 32
elif winner_rank >= 2100 and winner_rank < 2400:
k = 24
else:
k = 16
new_winner_rank = round(winner_rank + (k * (1 - odds)))
new_rank_diff = new_winner_rank - winner_rank
new_loser_rank = loser_rank - new_rank_diff
return new_winner_rank, new_loser_rank
# Initialize team stats data file according to Miscellaneous Opponent of each team
def initialize_data(Mstat, Ostat, Tstat):
new_Mstat = Mstat.drop(['Rk', 'Arena'], axis=1)
new_Ostat = Ostat.drop(['Rk', 'G', 'MP'], axis=1)
new_Tstat = Tstat.drop(['Rk', 'G', 'MP'], axis=1)
team_stats1 = pd.merge(new_Mstat, new_Ostat, how='left', on='Team')
team_stats1 = pd.merge(team_stats1, new_Tstat, how='left', on='Team')
print (team_stats1.info())
return team_stats1.set_index('Team', inplace=False, drop=True)
def get_elo(team):
try:
return team_elos[team]
except:
# give team a base_elo if that team does not have an elo
team_elos[team] = base_elo
return team_elos[team]
def build_dataSet(all_data):
print("Building data set..")
for index, row in all_data.iterrows():
Wteam = row['WTeam']
Lteam = row['LTeam']
# get initial elo or each team's initail elo value
team1_elo = get_elo(Wteam)
team2_elo = get_elo(Lteam)
# add 100 elo to home team
if row['WLoc'] == 'H':
team1_elo += 100
else:
team2_elo += 100
# use elo as a feature value to evaluate a team
team1_features = [team1_elo]
team2_features = [team2_elo]
# add stats we got from website for each team
for key, value in team_stats.loc[Wteam].iteritems():
team1_features.append(value)
for key, value in team_stats.loc[Lteam].iteritems():
team2_features.append(value)
# distribute two team's feature value to two sides randomly
# accordingly give 0/1 to y
if random.random() > 0.5:
X.append(team1_features + team2_features)
y.append(0)
else:
X.append(team2_features + team1_features)
y.append(1)
# update elo value according to match result
new_winner_rank, new_loser_rank = calc_elo(Wteam, Lteam)
team_elos[Wteam] = new_winner_rank
team_elos[Lteam] = new_loser_rank
return np.nan_to_num(X), np.array(y)
def predict_winner(team_1, team_2, model):
features = []
# team 1,away
features.append(get_elo(team_1))
for key, value in team_stats.loc[team_1].iteritems():
features.append(value)
# team 2,home
features.append(get_elo(team_2) + 100)
for key, value in team_stats.loc[team_2].iteritems():
features.append(value)
features = np.nan_to_num(features)
return model.predict_proba([features])
if __name__ == '__main__':
Mstat = | pd.read_csv(folder + '/15-16Miscellaneous_Stat.csv') | pandas.read_csv |
## Basketball Reference Game Log Scraping ####################################################################################
# Georgia Tech: Daily Fantasy Sports Project
# authors: <NAME> & <NAME>
#### Process Outline #########################################################################################################
# Import historical results
# Import archives (so entire process doesn't have to re-run)
# Filter game-logs to day before contest
# Run prediction/optimization for top 10 line-ups (and save).
# Find player results within game-logs and calculate total line-up score
# if a player has < 10 (? 5?) points, add to "players to remove" lsiting, re-run optimization and resave top-10 line-ups
# Produce DF that stores each line-up, its result, entry cost, win/lose cash, percentile and rough estimate from percentile --> $ won
# Produce report on total $ won/lost, ROI
# (maybe run some cross-validation to see right # of line-ups to use nightly? see if we can start filtering the data for predictions from full season --> last x games and cross-validate?)
##############################################################################################################################
##### Questions ######
## TO DOs ##
# run on everyone
# test / confirm #
##### Notes ######
# complete run time: ~ 4 minutes per day of results
##############################################################################################################################
# Package Import #
import numpy as np
import pandas as pd
from time_analysis import analysis_timeSeries # to delete in init
from optimization import DFS_Optimization # to delete in init
from datetime import date, datetime
from dateutil import rrule
# Functions #
def import_hist_results(path):
dt = pd.read_csv(path)
dt.Date = pd.to_datetime(dt.Date)
return dt
def identify_new_dates(hist_dt, imported_final_dt):
result_dates = hist_dt.Date.dt.date.drop_duplicates().tolist()
analysis_dates = imported_final_dt.Date.dt.date.drop_duplicates().tolist()
filter_dates = list(set(result_dates) - set(analysis_dates))
filter_dates.sort()
filter_dates = [date.strftime('%Y-%m-%d') for date in filter_dates]
return filter_dates
def prep_historic_data_opt(fd_hist_plyr_results, filt_date):
fd_hist_plyr_results = fd_hist_plyr_results[(fd_hist_plyr_results.Date == filt_date)]
fd_hist_plyr_results = fd_hist_plyr_results[(fd_hist_plyr_results['FD Points'] >= 10)]
fd_hist_slrs = fd_hist_plyr_results[['Position','Player Name','Salary']].copy()
fd_hist_slrs = fd_hist_slrs.rename(columns={'Player Name': 'Nickname'}).reset_index(drop=True)
fd_hist_slrs = fd_hist_slrs.sort_values('Salary', ascending=False)
return fd_hist_plyr_results, fd_hist_slrs
def merge_optim_histPlayer(pred_df, fd_hist_results):
rslts_df = pd.merge(pred_df, fd_hist_results, left_on=['Date','Player'], right_on=['Date','Player Name'], how='inner')
rslts_df = rslts_df.groupby(['Optimization_No','Date','Predicted_Score'])['FD Points'].agg(FD_Points='sum',Player_Count='count').reset_index()
rslts_df = rslts_df[['Optimization_No','Date','Predicted_Score','FD_Points','Player_Count']]
rslts_df.Date = pd.to_datetime(rslts_df.Date)
return rslts_df
def merge_model_contest(rslts_df, hst_rslts):
rslts_df = pd.merge(rslts_df, hst_rslts, left_on=['Date'], right_on=['Date'], how='inner')
rslts_df['Cash'] = np.where(rslts_df['FD_Points'] > rslts_df['Min Cash Score'],'Y','N')
rslts_df['Percentile'] = (rslts_df['FD_Points'] - rslts_df['Min Cash Score']) / (rslts_df['1st Place Score'] - rslts_df['Min Cash Score'])
rslts_df = rslts_df[['Optimization_No','Date','Predicted_Score','Cost','Player_Count','FD_Points','Cash','Percentile']]
return rslts_df
def percentile_conversion(rslts_df, prcnt_conv_dict):
conversion_df = pd.DataFrame.from_dict(prcnt_conv_dict, orient='index').reset_index()
conversion_df.columns = ['Percentile','multiplier']
rslts_df = rslts_df.sort_values('Percentile')
conversion_df = conversion_df.sort_values('Percentile')
rslts_df = | pd.merge_asof(rslts_df, conversion_df, on='Percentile', direction='nearest') | pandas.merge_asof |
"""
This file contains several helper functions to calculate spectral power from
1D and 2D EEG data.
"""
import mne
import logging
import numpy as np
import pandas as pd
from scipy import signal
from scipy.integrate import simps
from scipy.interpolate import RectBivariateSpline
logger = logging.getLogger('yasa')
__all__ = ['bandpower', 'bandpower_from_psd', 'bandpower_from_psd_ndarray',
'irasa', 'stft_power']
def bandpower(data, sf=None, ch_names=None, hypno=None, include=(2, 3),
win_sec=4, relative=True, bandpass=False,
bands=[(0.5, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 16, 'Sigma'), (16, 30, 'Beta'), (30, 40, 'Gamma')],
kwargs_welch=dict(average='median', window='hamming')):
"""
Calculate the Welch bandpower for each channel and, if specified,
for each sleep stage.
.. versionadded:: 0.1.6
Parameters
----------
data : np.array_like or :py:class:`mne.io.BaseRaw`
1D or 2D EEG data. Can also be a :py:class:`mne.io.BaseRaw`, in which
case ``data``, ``sf``, and ``ch_names`` will be automatically
extracted, and ``data`` will also be converted from Volts (MNE default)
to micro-Volts (YASA).
sf : float
The sampling frequency of data AND the hypnogram.
Can be omitted if ``data`` is a :py:class:`mne.io.BaseRaw`.
ch_names : list
List of channel names, e.g. ['Cz', 'F3', 'F4', ...]. If None,
channels will be labelled ['CHAN000', 'CHAN001', ...].
Can be omitted if ``data`` is a :py:class:`mne.io.BaseRaw`.
hypno : array_like
Sleep stage (hypnogram). If the hypnogram is loaded, the
bandpower will be extracted for each sleep stage defined in
``include``.
The hypnogram must have the exact same number of samples as ``data``.
To upsample your hypnogram, please refer to
:py:func:`yasa.hypno_upsample_to_data`.
.. note::
The default hypnogram format in YASA is a 1D integer
vector where:
- -2 = Unscored
- -1 = Artefact / Movement
- 0 = Wake
- 1 = N1 sleep
- 2 = N2 sleep
- 3 = N3 sleep
- 4 = REM sleep
include : tuple, list or int
Values in ``hypno`` that will be included in the mask. The default is
(2, 3), meaning that the bandpower are sequentially calculated
for N2 and N3 sleep. This has no effect when ``hypno`` is None.
win_sec : int or float
The length of the sliding window, in seconds, used for the Welch PSD
calculation. Ideally, this should be at least two times the inverse of
the lower frequency of interest (e.g. for a lower frequency of interest
of 0.5 Hz, the window length should be at least 2 * 1 / 0.5 =
4 seconds).
relative : boolean
If True, bandpower is divided by the total power between the min and
max frequencies defined in ``band``.
bandpass : boolean
If True, apply a standard FIR bandpass filter using the minimum and
maximum frequencies in ``bands``. Fore more details, refer to
:py:func:`mne.filter.filter_data`.
bands : list of tuples
List of frequency bands of interests. Each tuple must contain the
lower and upper frequencies, as well as the band name
(e.g. (0.5, 4, 'Delta')).
kwargs_welch : dict
Optional keywords arguments that are passed to the
:py:func:`scipy.signal.welch` function.
Returns
-------
bandpowers : :py:class:`pandas.DataFrame`
Bandpower dataframe, in which each row is a channel and each column
a spectral band.
Notes
-----
For an example of how to use this function, please refer to
https://github.com/raphaelvallat/yasa/blob/master/notebooks/08_bandpower.ipynb
"""
# Type checks
assert isinstance(bands, list), 'bands must be a list of tuple(s)'
assert isinstance(relative, bool), 'relative must be a boolean'
assert isinstance(bandpass, bool), 'bandpass must be a boolean'
# Check if input data is a MNE Raw object
if isinstance(data, mne.io.BaseRaw):
sf = data.info['sfreq'] # Extract sampling frequency
ch_names = data.ch_names # Extract channel names
data = data.get_data() * 1e6 # Convert from V to uV
_, npts = data.shape
else:
# Safety checks
assert isinstance(data, np.ndarray), 'Data must be a numpy array.'
data = np.atleast_2d(data)
assert data.ndim == 2, 'Data must be of shape (nchan, n_samples).'
nchan, npts = data.shape
# assert nchan < npts, 'Data must be of shape (nchan, n_samples).'
assert sf is not None, 'sf must be specified if passing a numpy array.'
assert isinstance(sf, (int, float))
if ch_names is None:
ch_names = ['CHAN' + str(i).zfill(3) for i in range(nchan)]
else:
ch_names = np.atleast_1d(np.asarray(ch_names, dtype=str))
assert ch_names.ndim == 1, 'ch_names must be 1D.'
assert len(ch_names) == nchan, 'ch_names must match data.shape[0].'
if bandpass:
# Apply FIR bandpass filter
all_freqs = np.hstack([[b[0], b[1]] for b in bands])
fmin, fmax = min(all_freqs), max(all_freqs)
data = mne.filter.filter_data(data.astype('float64'), sf, fmin, fmax,
verbose=0)
win = int(win_sec * sf) # nperseg
if hypno is None:
# Calculate the PSD over the whole data
freqs, psd = signal.welch(data, sf, nperseg=win, **kwargs_welch)
return bandpower_from_psd(psd, freqs, ch_names, bands=bands,
relative=relative).set_index('Chan')
else:
# Per each sleep stage defined in ``include``.
hypno = np.asarray(hypno)
assert include is not None, 'include cannot be None if hypno is given'
include = np.atleast_1d(np.asarray(include))
assert hypno.ndim == 1, 'Hypno must be a 1D array.'
assert hypno.size == npts, 'Hypno must have same size as data.shape[1]'
assert include.size >= 1, '`include` must have at least one element.'
assert hypno.dtype.kind == include.dtype.kind, ('hypno and include '
'must have same dtype')
assert np.in1d(hypno, include).any(), ('None of the stages '
'specified in `include` '
'are present in hypno.')
# Initialize empty dataframe and loop over stages
df_bp = pd.DataFrame([])
for stage in include:
if stage not in hypno:
continue
data_stage = data[:, hypno == stage]
freqs, psd = signal.welch(data_stage, sf, nperseg=win,
**kwargs_welch)
bp_stage = bandpower_from_psd(psd, freqs, ch_names, bands=bands,
relative=relative)
bp_stage['Stage'] = stage
df_bp = df_bp.append(bp_stage)
return df_bp.set_index(['Stage', 'Chan'])
def bandpower_from_psd(psd, freqs, ch_names=None, bands=[(0.5, 4, 'Delta'),
(4, 8, 'Theta'), (8, 12, 'Alpha'), (12, 16, 'Sigma'),
(16, 30, 'Beta'), (30, 40, 'Gamma')], relative=True):
"""Compute the average power of the EEG in specified frequency band(s)
given a pre-computed PSD.
.. versionadded:: 0.1.5
Parameters
----------
psd : array_like
Power spectral density of data, in uV^2/Hz.
Must be of shape (n_channels, n_freqs).
See :py:func:`scipy.signal.welch` for more details.
freqs : array_like
Array of frequencies.
ch_names : list
List of channel names, e.g. ['Cz', 'F3', 'F4', ...]. If None,
channels will be labelled ['CHAN000', 'CHAN001', ...].
bands : list of tuples
List of frequency bands of interests. Each tuple must contain the
lower and upper frequencies, as well as the band name
(e.g. (0.5, 4, 'Delta')).
relative : boolean
If True, bandpower is divided by the total power between the min and
max frequencies defined in ``band`` (default 0.5 to 40 Hz).
Returns
-------
bandpowers : :py:class:`pandas.DataFrame`
Bandpower dataframe, in which each row is a channel and each column
a spectral band.
"""
# Type checks
assert isinstance(bands, list), 'bands must be a list of tuple(s)'
assert isinstance(relative, bool), 'relative must be a boolean'
# Safety checks
freqs = np.asarray(freqs)
assert freqs.ndim == 1
psd = np.atleast_2d(psd)
assert psd.ndim == 2, 'PSD must be of shape (n_channels, n_freqs).'
all_freqs = np.hstack([[b[0], b[1]] for b in bands])
fmin, fmax = min(all_freqs), max(all_freqs)
idx_good_freq = np.logical_and(freqs >= fmin, freqs <= fmax)
freqs = freqs[idx_good_freq]
res = freqs[1] - freqs[0]
nchan = psd.shape[0]
assert nchan < psd.shape[1], 'PSD must be of shape (n_channels, n_freqs).'
if ch_names is not None:
ch_names = np.atleast_1d(np.asarray(ch_names, dtype=str))
assert ch_names.ndim == 1, 'ch_names must be 1D.'
assert len(ch_names) == nchan, 'ch_names must match psd.shape[0].'
else:
ch_names = ['CHAN' + str(i).zfill(3) for i in range(nchan)]
bp = np.zeros((nchan, len(bands)), dtype=np.float)
psd = psd[:, idx_good_freq]
total_power = simps(psd, dx=res)
total_power = total_power[..., np.newaxis]
# Check if there are negative values in PSD
if (psd < 0).any():
msg = (
"There are negative values in PSD. This will result in incorrect "
"bandpower values. We highly recommend working with an "
"all-positive PSD. For more details, please refer to: "
"https://github.com/raphaelvallat/yasa/issues/29")
logger.warning(msg)
# Enumerate over the frequency bands
labels = []
for i, band in enumerate(bands):
b0, b1, la = band
labels.append(la)
idx_band = np.logical_and(freqs >= b0, freqs <= b1)
bp[:, i] = simps(psd[:, idx_band], dx=res)
if relative:
bp /= total_power
# Convert to DataFrame
bp = | pd.DataFrame(bp, columns=labels) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import re
import ipaddress
import codecs
import time
import pandas as pd
import urllib3
from urllib3 import util
from classifier4gyoithon.GyoiClassifier import DeepClassifier
from classifier4gyoithon.GyoiExploit import Metasploit
from classifier4gyoithon.GyoiReport import CreateReport
from util import Utilty
# Type of printing.
OK = 'ok' # [*]
NOTE = 'note' # [+]
FAIL = 'fail' # [-]
WARNING = 'warn' # [!]
NONE = 'none' # No label.
# Identify product name using signature.
def identify_product(categoy, target_url, response, utility):
product_list = []
reason_list = []
full_path = os.path.dirname(os.path.abspath(__file__))
file_name = 'signature_' + categoy + '.txt'
try:
with codecs.open(os.path.join(full_path + '/signatures/', file_name), 'r', 'utf-8') as fin:
matching_patterns = fin.readlines()
for pattern in matching_patterns:
items = pattern.replace('\r', '').replace('\n', '').split('@')
keyword_list = []
product = items[0]
signature = items[1]
list_match = re.findall(signature, response, flags=re.IGNORECASE)
if len(list_match) != 0:
# Output result (header)
keyword_list.append(list_match)
utility.print_message(OK, 'category : {}'.format(categoy))
utility.print_message(OK, 'product : {}'.format(product))
utility.print_message(OK, 'reason : {}'.format(keyword_list))
utility.print_message(OK, 'target url : {}'.format(target_url))
utility.print_message(NONE, '-' * 42)
product_list.append(product)
reason_list.append(keyword_list)
except Exception as err:
utility.print_exception(err, '{}'.format(err))
return product_list, reason_list
# Classifier product name using signatures.
def classifier_signature(ip_addr, port, target_url, response, log_file, utility):
utility.print_message(NOTE, 'Analyzing gathered HTTP response using Signature.')
ip_list = []
port_list = []
vhost_list = []
judge_list = []
version_list = []
reason_list = []
scan_type_list = []
ua_list = []
http_ver_list = []
ssl_list = []
sni_list = []
url_list = []
log_list = []
product_list = []
for category in ['os', 'web', 'framework', 'cms']:
products, keywords = identify_product(category, target_url, response, utility)
for product, keyword in zip(products, keywords):
ip_list.append(ip_addr)
port_list.append(port)
vhost_list.append(ip_addr)
judge_list.append(category + ':' + str(product))
version_list.append('-')
reason_list.append(keyword)
scan_type_list.append('[ip]')
ua_list.append('-')
http_ver_list.append('HTTP/1.1')
ssl_list.append('-')
sni_list.append('-')
url_list.append(target_url)
log_list.append(log_file)
product_list.append(product)
if len(product_list) == 0:
utility.print_message(WARNING, 'Product Not Found.')
return []
# logging.
series_ip = pd.Series(ip_list)
series_port = pd.Series(port_list)
series_vhost = pd.Series(vhost_list)
series_judge = pd.Series(judge_list)
series_version = pd.Series(version_list)
series_reason = pd.Series(reason_list)
series_scan_type = pd.Series(scan_type_list)
series_ua = pd.Series(ua_list)
series_http_ver = pd.Series(http_ver_list)
series_ssl = pd.Series(ssl_list)
series_sni = pd.Series(sni_list)
series_url = pd.Series(url_list)
series_log = pd.Series(log_list)
df = pd.DataFrame({'ip': series_ip,
'port': series_port,
'vhost': series_vhost,
'judge': series_judge,
'judge_version': series_version,
'reason': series_reason,
'scantype': series_scan_type,
'ua': series_ua,
'version': series_http_ver,
'ssl': series_ssl,
'sni': series_sni,
'url': series_url,
'log': series_log},
columns=['ip', 'port', 'vhost', 'judge', 'judge_version', 'reason',
'scantype', 'ua', 'version', 'ssl', 'sni', 'url', 'log'])
saved_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'gyoithon')
df.sort_values(by='port', ascending=False).to_csv(os.path.join(saved_path, 'webconf.csv'),
mode='a',
header=False,
index=False)
return product_list
# Create webconf.csv
def create_webconf(ip_addr, port, log_file):
utility.print_message(NOTE, 'Create "webconf.csv".')
series_ip = pd.Series([ip_addr])
series_port = pd.Series([str(port)])
series_vhost = pd.Series([ip_addr])
series_judge = pd.Series(['-'])
series_version = | pd.Series(['-']) | pandas.Series |
"""Store the data in a nice big dataframe"""
import sys
from datetime import datetime, timedelta
import pandas as pd
import geopandas as gpd
import numpy as np
class Combine:
"""Combine defined countries together"""
THE_EU = [ 'Austria', 'Italy', 'Belgium', 'Latvia',
'Bulgaria', 'Lithuania', 'Croatia',
'Luxembourg', 'Cyprus', 'Malta',
'Czechia', 'Netherlands', 'Denmark',
'Poland', 'Estonia', 'Portugal',
'Finland', 'Romania', 'France',
'Slovakia', 'Germany', 'Slovenia',
'Greece', 'Spain', 'Hungary',
'Sweden', 'Ireland' ]
def __init__(self, options):
"""Init"""
self.options = options
self.timeseries = []
self.countries = None
self.description = None
self.merged = None
self.cc = None
self.populations = []
self.national_populations = None
self.get_populations()
self.countries_long = {'nl': 'The Netherlands', 'sco': 'Scotland', 'eng': 'England',
'wal': 'Wales', 'ni': 'Northern Ireland'}
self.jhu = JHU(self)
def judat(self):
"""Dumb helper for another library"""
self.timeseries.append(NLTimeseries(False).national(False))
self.combine_national(False)
#self.merged['Week'] = self.merged.index.strftime('%U')
#self.merged = self.merged.groupby(['Week']) \
#.agg({'Aantal': 'sum'})
print(self.merged)
def process(self):
"""Do it"""
cumulative = False
if self.options.pivot:
cumulative = True
for nation in self.cc:
usejhu = True
if self.options.nation:
print(f'Processing National data {nation}')
if nation in ['wal', 'sco', 'eng']:
self.timeseries.append(UKTimeseries(False).national(nation,cumulative))
usejhu = False
#if nation == 'nl':
#self.timeseries.append(NLTimeseries(False).national(cumulative))
#usejhu = False
if usejhu:
self.timeseries.append(XXTimeseries(False,
{nation: self.countries_long[nation]}).national(cumulative))
else:
print(f'Processing combined data {nation}')
if nation in ['wal', 'sco', 'eng']:
self.timeseries.append(UKTimeseries(True).get_data())
usejhu = False
if nation == 'nl':
self.timeseries.append(NLTimeseries(True).get_data())
usejhu = False
if usejhu:
self.timeseries.append(XXTimeseries(True).get_data())
if len(self.timeseries) == 0:
print('No country Data to process')
sys.exit()
if self.options.pivot:
self.combine_pivot()
return
if self.options.nation:
self.combine_national()
return
self.get_combined_data()
def combine_pivot(self):
"""Pivot data for pandas_alive"""
print('Pivotting data')
self.merged = pd.concat(self.timeseries)
self.merged['Datum'] = pd.to_datetime(self.merged['Datum'])
# So we can add it as an option later
column = 'Overleden'
#column = 'Aantal'
# Convert to 100K instead of millions
for country in self.cc:
self.merged.loc[(self.merged.country == country), 'population'] \
= self.national_populations[country] * 10
# Per-Capita
self.merged[column] = self.merged[column] / self.merged['population']
self.merged = self.merged.pivot(index='Datum',
columns='country',
values=column).fillna(0)
self.trim_data()
print(self.merged)
def combine_national(self, trim=True):
"""Combine national totals"""
self.merged = pd.concat(self.timeseries)
self.merged['Datum'] = pd.to_datetime(self.merged['Datum'])
self.merged = self.merged.set_index('Datum')
self.merged.sort_index(inplace=True)
for country in self.cc:
self.merged.loc[(self.merged.country == country), 'population'] \
= self.national_populations[country] * 10
self.merged.loc[(self.merged.country == country), 'cname'] \
= self.countries_long[country]
for column in ['Aantal', 'Ziekenhuisopname', 'Overleden']:
if column not in self.merged.columns:
continue
pgpd = f"{column}-gpd"
radaily = f"{column}-radaily"
raweekly = f"{column}-raweekly"
ranonpc = f"{column}-ranonpc"
self.merged[pgpd] = self.merged[column] / self.merged['population']
self.merged[radaily] = self.merged.groupby('country',
sort=False)[pgpd] \
.transform(lambda x: x.rolling(7, 1).mean())
self.merged[raweekly] = self.merged.groupby('country',
sort=False)[pgpd] \
.transform(lambda x: x.rolling(7).sum())
self.merged[ranonpc] = self.merged.groupby('country',
sort=False)[column] \
.transform(lambda x: x.rolling(7).sum())
if(trim):
self.trim_data()
def trim_data(self):
if self.options.startdate is not None:
self.merged = self.merged.query(f'{self.options.startdate} <= Datum')
if self.options.enddate is not None:
self.merged = self.merged.query(f'Datum <= {self.options.enddate}')
def get(self):
"""Return the data set"""
return self.merged
def get_populations(self):
"""National populations for the calculations that need it"""
self.national_populations = pd.read_csv('data/populations.csv', delimiter=',',
index_col=0, header=None, squeeze=True).to_dict()
def get_max(self, column):
"""Max value in df"""
return self.merged[column].max()
def get_combined_data(self):
"""Get a single dataframe containing all countries we deal with
I did this so I could draw combined chorpleths but that has Proven
to be somewhat more challenging than I originally thought
"""
print('Calculating combined data')
dataframe = pd.concat(self.timeseries)
dataframe = dataframe.set_index('Datum')
dataframe = dataframe.sort_index()
dataframe['pop_pc'] = dataframe['population'] / 1e5
# Filter out countries we do not want
for country in self.countries:
dataframe = dataframe[~dataframe['country'].isin([country])]
# Finally create smoothed columns
dataframe['radaily'] = dataframe.groupby('Gemeentecode',
sort=False)['Aantal'] \
.transform(lambda x: x.rolling(7, 1).mean())
dataframe['weekly'] = dataframe.groupby('Gemeentecode',
sort=False)['Aantal'] \
.transform(lambda x: x.rolling(7).sum())
dataframe['radaily_pc'] = dataframe['radaily'] / dataframe['pop_pc']
dataframe['weekly_pc'] = dataframe['weekly'] / dataframe['pop_pc']
if self.options.startdate is not None:
dataframe = dataframe.query(f'{self.options.startdate} <= Datum')
if self.options.enddate is not None:
dataframe = dataframe.query(f'Datum <= {self.options.enddate}')
print('Finished calculating combined data')
self.merged = dataframe
def parse_countries(self, country_str):
"""Sort out country data"""
ret = []
if country_str is None:
country_list = self.countries_long.keys()
else:
country_list = country_str.split(',')
if 'eu' in country_list:
country_list.remove('eu')
country_list += self.THE_EU
print('Setting EU')
for country in country_list:
country = country.lower()
count = None
#if 'nether' in country:
#count = 'nl'
if 'scot' in country:
count = 'sco'
if 'eng' in country:
count = 'eng'
if 'wal' in country:
count = 'wal'
#if 'ni' in country:
# count = 'ni'
if count is not None:
ret.append(count)
else:
retcountry = self.jhu.get_country(country)
if retcountry:
ret.append(retcountry)
self.cc = ret
self.countries = list(set(self.countries_long.keys()) - set(ret))
self.description = '_'.join(ret)
def project_for_date(self, date):
"""Project infections per Gemeente and make league table"""
if date is None:
date = self.merged.index.max().strftime('%Y%m%d')
datemax = datetime.datetime.strptime(date, '%Y%m%d')
datemin = (datemax - timedelta(days=4)).strftime('%Y%m%d')
self.merged = self.merged.query(f'{datemin} <= Datum <= {date}')
self.merged = self.merged.groupby(['Gemeentecode']) \
.agg({'Aantal': 'sum', 'Gemeentenaam': 'first',
'pop_pc': 'first', 'population': 'first', 'country': 'first'})
self.merged['percapita'] = self.merged['Aantal'] / self.merged['pop_pc']
self.merged.sort_values(by=['percapita'], ascending=False, inplace=True)
class Timeseries:
"""Abstract class for timeseries"""
def __init__(self, process=True):
self.merged = None
self.cumulative = False
if process:
self.get_pop()
self.get_map()
self.get_source_data()
def get_data(self):
"""Pass back the data series"""
return self.merged
def get_source_data(self):
"""Placeholder"""
def get_pop(self):
"""Placeholder"""
def get_map(self):
"""Placeholder"""
def set_cumulative(self, value):
"""Daily or cumulative"""
self.cumulative = value
class JHU:
"""Get data from <NAME>"""
JHD = '../COVID-19/csse_covid_19_data'
def __init__(self, combined):
"""Init"""
self.dataframe = None
self.combined = combined
self.load()
def get_country(self, country):
"""Check Country is in JHU data"""
row = self.dataframe.loc[self.dataframe['Combined_Key'] == country]
if len(row) == 0:
return False
self.combined.countries_long[row['iso2'].values[0].lower()] = country
self.combined.national_populations[row['iso2'].values[0].lower()] \
= row['Population'].values[0]
return row['iso2'].values[0].lower()
def load(self):
"""Load JHU lookup table"""
dataframe = pd.read_csv(f'{self.JHD}/UID_ISO_FIPS_LookUp_Table.csv',
delimiter=',')
dataframe['Combined_Key'] = dataframe['Combined_Key'].str.lower()
dataframe['Population'] = dataframe['Population'] / 1e6
self.dataframe = dataframe
class XXTimeseries(Timeseries):
"""Generic JHU Data class"""
# TODO: Duplicated code
JHD = '../COVID-19/csse_covid_19_data'
def __init__(self, process=True, country=None):
"""Init"""
Timeseries.__init__(self, process)
print(country.keys())
self.countrycode = list(country.keys())[0]
self.country = country[self.countrycode]
self.cumullative = False
def national(self, cumulative):
self.set_cumulative(cumulative)
"""Get columns"""
timeseries = 'csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
overleden = self.calculate(timeseries, 'Overleden')
timeseries = 'csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
aantal = self.calculate(timeseries, 'Aantal')
aantal['Overleden'] = overleden['Overleden']
return aantal.assign(country=self.countrycode)
def calculate(self, timeseries, column):
"""Get national totals"""
file = f'{self.JHD}/{timeseries}'
dataframe = pd.read_csv(file, delimiter=',')
dataframe['Country/Region'] = dataframe['Country/Region'].str.lower()
row = dataframe.loc[dataframe['Country/Region'] == self.country]
row = row.loc[row['Province/State'].isnull()]
row = row.reset_index(drop=True)
row.drop(columns=['Province/State', 'Lat', 'Long'], inplace=True)
row.set_index('Country/Region', inplace=True)
dataframe = row.T
if not self.cumulative:
dataframe[column] = dataframe[self.country] - dataframe[self.country].shift(1)
else:
dataframe[column] = dataframe[self.country]
dataframe.drop(columns=[self.country], inplace=True)
dataframe.dropna(inplace=True)
dataframe = dataframe.reset_index()
dataframe.rename(columns={'index': 'Datum'}, inplace=True)
return dataframe
class BETimeseries(Timeseries):
"""Belgium data"""
def __init__(self, process=True):
"""Init"""
Timeseries.__init__(self, process)
def national(self, cumulative):
"""Get national totals"""
self.set_cumulative(cumulative)
dataframe = pd.read_csv('data/belgiumt.csv',
delimiter=',')
dataframe.dropna(inplace=True)
dataframe.rename(columns={'CASES': 'Aantal', 'DATE': 'Datum'}, inplace=True)
dataframe = dataframe.groupby(['Datum']).agg({'Aantal': 'sum'})
dataframe = dataframe.reset_index()
dataframe = dataframe.assign(country='be')
return dataframe
def get_source_data(self):
"""Get BE source data for infections"""
dataframe = pd.read_csv('data/belgium.csv', delimiter=',')
dataframe.dropna(inplace=True)
dataframe['CASES'] = dataframe['CASES'].replace(['<5'], '0')
dataframe.rename(columns={'CASES': 'Aantal',
'NIS5': 'Gemeentecode',
'DATE': 'Datum',
'TX_DESCR_NL': 'Gemeentenaam'}, inplace=True)
dataframe.drop(columns=['TX_DESCR_FR', 'TX_ADM_DSTR_DESCR_NL', 'TX_ADM_DSTR_DESCR_FR',
'PROVINCE', 'REGION'], inplace=True)
dataframe['Datum'] = pd.to_datetime(dataframe['Datum'])
dataframe = self.resample(dataframe)
dataframe = dataframe.set_index('Gemeentecode').dropna()
merged = dataframe.join(self.pop)
merged.index = merged.index.astype('int')
# merged.reset_index(inplace=True)
merged = merged.join(self.map)
merged.reset_index(inplace=True)
merged.rename(columns={'index': 'Gemeentecode'}, inplace=True)
merged = merged.assign(country='be')
self.merged = merged
def resample(self, dataframe):
"""Timeseries is incomplete, fill it in"""
# Normally you would just resample but we have 1500 od gemeentes each needs a
# completed dataseries or chorpleths will look very odd
idx = pd.date_range(min(dataframe.Datum), max(dataframe.Datum))
gems = list(set(dataframe['Gemeentecode'].values))
newdata = []
for gem in gems:
gemdf = dataframe[dataframe['Gemeentecode'] == gem]
#gemdf.set_index(gemdf.Datum, inplace=True)
default = self.get_row(gemdf.loc[gemdf['Gemeentecode'] == gem])
gemdf['strdate'] = gemdf['Datum'].dt.strftime('%Y-%m-%d')
for date in idx:
fdate = date.strftime('%Y-%m-%d')
if fdate not in gemdf['strdate'].values:
newdata.append({'Datum': date,
'Gemeentecode': default['Gemeentecode'],
'Gemeentenaam': default['Gemeentenaam'],
'Aantal': default['Aantal']
})
else:
row = gemdf.loc[gemdf['Datum'] == date]
newdata.append({'Datum': date,
'Gemeentecode': row['Gemeentecode'].values[0],
'Gemeentenaam': row['Gemeentenaam'].values[0],
'Aantal': int(row['Aantal'].values[0])
})
return pd.DataFrame(newdata)
def get_row(self, series):
"""Return one row"""
return {'Datum': series['Datum'].values[0],
'Gemeentecode': series['Gemeentecode'].values[0],
'Gemeentenaam': series['Gemeentenaam'].values[0],
'Aantal': series['Aantal'].values[0]
}
def get_pop(self):
"""Fetch the Population figures for BE"""
pop = pd.read_csv('data/bepop.csv', delimiter=',')
pop = pop.set_index('Gemeentecode')
self.pop = pop
def get_map(self):
"""Get BE map data"""
map_df = gpd.read_file('maps/BELGIUM_-_Municipalities.shp')
map_df.rename(columns={'CODE_INS': 'Gemeentecode',
'ADMUNADU': 'Gemeentenaam'}, inplace=True)
map_df['Gemeentecode'] = map_df['Gemeentecode'].astype('int')
map_df.drop(columns=['Gemeentenaam'], inplace=True)
map_df = map_df.set_index('Gemeentecode')
self.map = map_df
class NLTimeseries(Timeseries):
"""Dutch Timeseries"""
def __init__(self, process=True):
"""Init"""
Timeseries.__init__(self, process)
def national(self, cumulative):
"""Get national totals"""
self.set_cumulative(cumulative)
df1 = self.get_subtotal('Totaal', rename=False, cumulative=self.cumulative)
df2 = self.get_subtotal('Overleden', rename=True, cumulative=self.cumulative)
df3 = self.get_subtotal('Ziekenhuisopname', rename=True, cumulative=self.cumulative)
dataframe = df1.merge(df2, on='Datum')
dataframe = dataframe.merge(df3, on='Datum')
dataframe = dataframe.assign(country='nl')
#dataframe = pd.concat([df1,df2,df3])
return dataframe
def get_subtotal(self, typ, rename=True, cumulative=True):
"""Get national totals"""
dataframe = pd.read_csv('../CoronaWatchNL/data/rivm_NL_covid19_national.csv',
delimiter=',')
dataframe = dataframe[dataframe['Type'] == typ]
dataframe.drop(columns=['Type'], inplace=True)
if not cumulative:
print('Cumulative')
dataframe['Aantal'] = dataframe['Aantal'] - dataframe['Aantal'].shift(1)
if rename:
dataframe.rename(columns={"Aantal": typ}, inplace=True)
dataframe = dataframe.fillna(0)
dataframe['Datum'] = pd.to_datetime(dataframe['Datum'])
return dataframe
def get_pop(self):
"""Fetch the Population figures for NL"""
dataframe = pd.read_csv(
'data/Regionale_kerncijfers_Nederland_31082020_181423.csv', delimiter=';')
dataframe = dataframe.set_index("Regio")
dataframe.rename(columns={"aantal": "population"}, inplace=True)
self.pop = dataframe[dataframe.columns[dataframe.columns.isin(['population'])]]
def get_source_data(self):
"""Get NL source data for infections"""
dataframe = pd.read_csv('../CoronaWatchNL/data-geo/data-municipal/RIVM_NL_municipal.csv',
delimiter=',')
dataframe = dataframe[dataframe['Type'] == 'Totaal']
dataframe['Datum'] = pd.to_datetime(dataframe['Datum'])
dataframe.drop(columns=['Type', 'Provincienaam', 'Provinciecode'], inplace=True)
dataframe.dropna(inplace=True)
dataframe = dataframe.set_index('Gemeentenaam').dropna()
nlmerged = dataframe.join(self.pop)
nlmerged.reset_index(inplace=True)
nlmerged.rename(columns={'index': 'Gemeentenaam'}, inplace=True)
nlmerged = nlmerged.set_index('Gemeentecode')
nlmerged = nlmerged.join(self.map)
nlmerged.reset_index(inplace=True)
nlmerged = nlmerged.assign(country='nl')
self.merged = nlmerged
def get_map(self):
"""Get NL map data"""
map_df = gpd.read_file('maps/gemeente-2019.geojson')
#map_df = map_df.reset_index(inplace=True)
map_df.rename(columns={'Gemeenten_': 'Gemeentecode'}, inplace=True)
map_df = map_df.set_index("Gemeentecode")
map_df.drop(columns=['Gemnr', 'Shape_Leng', 'Shape_Area'], inplace=True)
self.map = map_df
class UKTimeseries(Timeseries):
"""UK Timeseries"""
def __init__(self, process=True):
"""Init"""
Timeseries.__init__(self, process)
def national(self, country, cumulative):
"""Use national totals"""
self.set_cumulative(cumulative)
dataframe = pd.read_csv('data/ukt.csv')
if cumulative:
dataframe.rename(columns={"cumCasesBySpecimenDate": "Aantal", 'date': 'Datum',
'areaCode': 'Gemeentecode',
'cumDeaths28DaysByPublishDate': 'Overleden'}, inplace=True)
else:
dataframe.rename(columns={"newCasesBySpecimenDate": "Aantal", 'date': 'Datum',
'areaCode': 'Gemeentecode',
'newDeaths28DaysByPublishDate': 'Overleden'}, inplace=True)
dataframe.loc[(dataframe.Gemeentecode.astype(str).str.startswith('S')), 'country'] = 'sco'
dataframe.loc[(dataframe.Gemeentecode.astype(str).str.startswith('W')), 'country'] = 'wal'
dataframe.loc[(dataframe.Gemeentecode.astype(str).str.startswith('E')), 'country'] = 'eng'
dataframe.loc[(dataframe.Gemeentecode.astype(str).str.startswith('N')), 'country'] = 'ni'
dataframe = dataframe[dataframe['country'] == country]
return dataframe
def get_pop(self):
"""Fetch the population figures for the UK"""
dataframe = pd.read_csv('data/ukpop4.csv')
dataframe = dataframe.set_index("ladcode20")
dataframe = dataframe.groupby(['ladcode20']).agg(sum)
dataframe.rename(columns={"population_2019": "population"}, inplace=True)
self.pop = dataframe[dataframe.columns[dataframe.columns.isin(['population'])]]
def get_source_data(self):
"""Get UK source data for infections"""
dataframe = pd.read_csv('data/uk.csv', delimiter=',')
dataframe['date'] = pd.to_datetime(dataframe['date'])
columns = {'date': 'Datum', 'areaName': 'Gemeentenaam',
'areaCode': 'Gemeentecode', 'newCasesBySpecimenDate': 'Aantal',
'cumCasesBySpecimenDate': 'AantalCumulatief'
}
dataframe.rename(columns=columns, inplace=True)
dataframe = dataframe.set_index('Gemeentecode').dropna()
ukmerged = dataframe.join(self.pop)
ukmerged = ukmerged.join(self.map)
ukmerged.reset_index(inplace=True)
ukmerged.rename(columns={'index': 'Gemeentecode'}, inplace=True)
# <ark the countries for later filtering
ukmerged.loc[(ukmerged.Gemeentecode.astype(str).str.startswith('S')), 'country'] = 'sco'
ukmerged.loc[(ukmerged.Gemeentecode.astype(str).str.startswith('W')), 'country'] = 'wal'
ukmerged.loc[(ukmerged.Gemeentecode.astype(str).str.startswith('E')), 'country'] = 'eng'
ukmerged.loc[(ukmerged.Gemeentecode.astype(str).str.startswith('N')), 'country'] = 'ni'
self.merged = ukmerged
def get_map(self):
"""Get UK Map Data"""
map_df = gpd.read_file('maps/uk_counties_2020.geojson')
# Scotland
# map_df = map_df[~map_df['lad19cd'].astype(str).str.startswith('S')]
# Northern Ireland
# map_df = map_df[~map_df['lad19cd'].astype(str).str.startswith('N')]
map_df.rename(columns={'lad19cd': 'Gemeentecode'}, inplace=True)
map_df.drop(columns=['lad19nm', 'lad19nmw', 'st_areashape',
'st_lengthshape', 'bng_e', 'bng_n', 'long', 'lat'], inplace=True)
map_df = map_df.set_index("Gemeentecode")
self.map = map_df
class DETimeseries(Timeseries):
"""DE Timeseries"""
def __init__(self, process=True):
"""Init"""
Timeseries.__init__(self, process)
def get_pop(self):
"""Fetch the population figures for the DE"""
dataframe = pd.read_csv('data/depop.csv')
dataframe = dataframe.set_index("Gemeentenaam")
self.pop = dataframe
def get_source_data(self):
"""Get DE source data for infections"""
dataframe = pd.read_excel(
'data/germany.xlsx', sheet_name='BL_7-Tage-Fallzahlen', skiprows=[0, 1])
# Rename columns
dataframe.rename(columns={'Unnamed: 0': 'Gemeentenaam'}, inplace=True)
dataframe = dataframe.set_index('Gemeentenaam')
dataframe = dataframe.T
transform = []
for index, row in dataframe.iterrows():
for region in row.keys():
transform.append({'Datum': row.name, 'Aantal': row[region], 'Gemeentenaam': region})
dataframe = | pd.DataFrame(transform) | pandas.DataFrame |
import requests
import deeptrade
import pandas as pd
class StockPrice():
def __init__(self):
self.head = {'Authorization': "Token %s" %deeptrade.api_key}
def by_date(self,date,dataframe=False):
"""
:parameters:
- date: a day date in the format %YYYY-%MM-%DD
- dataframe: whehter result in json (False) or pandas dataframe
:returns:
json or pandas dataframe with all the tickers of the day date and
their corresponding stock price (OHLC)
"""
endpoint = deeptrade.api_base+"stock_date/"+date
g = requests.get(endpoint, headers=self.head).json()
if dataframe:
df = pd.DataFrame(g)
return df
else:
return g
def by_ticker(self,ticker,dataframe=False):
"""
:parameters:
- ticker: a ticker such as 'AMZN'
- dataframe: whehter result in json (False) or pandas dataframe
:returns:
json or pandas dataframe with all the hist. OHLC information of the ticker
"""
endpoint = deeptrade.api_base+"stocks/"+ticker
g = requests.get(endpoint, headers=self.head).json()
if dataframe:
df = pd.DataFrame(g)
return df
else:
return g
def stock_by_date_ticker(self, date, ticker, dataframe=False):
"""
:parameters:
- ticker: a day date in the format %YYYY-%MM-%DD
- dataframe: whehter result in json (False) or pandas dataframe
:returns:
json or pandas dataframe with all the hist. OHLC & stock information for the particular date
"""
endpoint = deeptrade.api_base+"stockdate/"+ticker+'/'+date
g = requests.get(endpoint, headers=self.head).json()
if dataframe:
df = | pd.DataFrame(g) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def sqlite_db() -> str:
conn = os.environ["SQLITE_URL"]
return conn
def test_read_sql_without_partition(sqlite_db: str) -> None:
query = "SELECT test_int, test_nullint, test_str, test_float, test_bool, test_date, test_time, test_datetime FROM test_table"
df = read_sql(sqlite_db, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "こんにちは", "b", "Ha好ち😁ðy̆", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
"test_date": pd.Series(
[
np.datetime64("1996-03-13"),
np.datetime64("1996-01-30"),
np.datetime64("1996-02-28"),
np.datetime64("2020-01-12"),
np.datetime64("1996-04-20"),
None
], dtype="datetime64[ns]"
),
"test_time": pd.Series(
["08:12:40", "10:03:00", "23:00:10", "23:00:10", "18:30:00", "18:30:00"], dtype="object"
),
"test_datetime": pd.Series(
[
np.datetime64("2007-01-01T10:00:19"),
np.datetime64("2005-01-01T22:03:00"),
None,
np.datetime64("1987-01-01T11:00:00"),
None,
np.datetime64("2007-10-01T10:32:00")
], dtype="datetime64[ns]"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition(sqlite_db: str) -> None:
query = "SELECT test_int, test_nullint, test_str, test_float, test_bool, test_date, test_time, test_datetime FROM test_table"
df = read_sql(
sqlite_db,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "こんにちは", "b", "Ha好ち😁ðy̆", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
"test_date": pd.Series(
[
np.datetime64("1996-03-13"),
np.datetime64("1996-01-30"),
np.datetime64("1996-02-28"),
np.datetime64("2020-01-12"),
np.datetime64("1996-04-20"),
None
], dtype="datetime64[ns]"
),
"test_time": pd.Series(
["08:12:40", "10:03:00", "23:00:10", "23:00:10", "18:30:00", "18:30:00"], dtype="object"
),
"test_datetime": pd.Series(
[
np.datetime64("2007-01-01T10:00:19"),
np.datetime64("2005-01-01T22:03:00"),
None,
np.datetime64("1987-01-01T11:00:00"),
None,
np.datetime64("2007-10-01T10:32:00")
], dtype="datetime64[ns]"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(sqlite_db: str) -> None:
queries = [
"SELECT test_int, test_nullint, test_str, test_float, test_bool, test_date, test_time, test_datetime FROM test_table WHERE test_int < 2",
"SELECT test_int, test_nullint, test_str, test_float, test_bool, test_date, test_time, test_datetime FROM test_table WHERE test_int >= 2",
]
df = read_sql(sqlite_db, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 0, 2, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, 5, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "こんにちは", "str2", "b", "Ha好ち😁ðy̆", None], dtype="object"
),
"test_float": pd.Series([None, 3.1, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, None, False, False, None, True], dtype="boolean"
),
"test_date": pd.Series(
[
np.datetime64("1996-03-13"),
np.datetime64("1996-02-28"),
np.datetime64("1996-01-30"),
np.datetime64("2020-01-12"),
np.datetime64("1996-04-20"),
None
], dtype="datetime64[ns]"
),
"test_time": pd.Series(
["08:12:40", "23:00:10", "10:03:00", "23:00:10", "18:30:00", "18:30:00"], dtype="object"
),
"test_datetime": pd.Series(
[
np.datetime64("2007-01-01T10:00:19"),
None,
np.datetime64("2005-01-01T22:03:00"),
np.datetime64("1987-01-01T11:00:00"),
None,
np.datetime64("2007-10-01T10:32:00")
], dtype="datetime64[ns]"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_without_partition_and_spa(sqlite_db: str) -> None:
query = """
SELECT test_bool, AVG(test_float) AS avg, SUM(test_int) AS sum
FROM test_table
WHERE test_nullint IS NOT NULL
GROUP BY test_bool
ORDER BY sum
"""
df = read_sql(sqlite_db, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([False, None, True], dtype="boolean"),
"avg": pd.Series([3.00, 5.45, -10.00], dtype="float64"),
"sum": pd.Series([3, 4, 1315], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_and_spa(sqlite_db: str) -> None:
query = """
SELECT test_bool, AVG(test_float) AS avg, SUM(test_int) AS sum
FROM test_table
WHERE test_nullint IS NOT NULL
GROUP BY test_bool
ORDER BY sum
"""
df = read_sql(sqlite_db, query, partition_on="sum", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([False, None, True], dtype="boolean"),
"avg": pd.Series([3.00, 5.45, -10.00], dtype="float64"),
"sum": pd.Series([3, 4, 1315], dtype="Int64"),
},
)
df = df.sort_values("sum").reset_index(drop=True)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result(sqlite_db: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(sqlite_db, query)
expected = pd.DataFrame(
data={
"test_int": pd.Series([], dtype="object"),
"test_nullint": pd.Series([], dtype="object"),
"test_str": pd.Series([], dtype="object"),
"test_float": | pd.Series([], dtype="object") | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 20 14:08:35 2019
@author: Team BTC - <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"""
#sorry the code isnt very efficient. because of time constraints and the number of people working on the project, we couldnt do all the automatizations we would have liked to do.
#Code in block comment should not be run as it will make change to the cloud database
# %% Importing libraries
# You may need to install dnspython in order to work with cloud server
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import json
import pandas as pd
import numpy as np
from tqdm import tqdm
from datetime import datetime as dt
import os
import time
import re
import copy
from textblob import TextBlob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from datetime import timedelta
from pymongo import MongoClient
import statsmodels.formula.api as smf
import statsmodels.api as sm
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
from statsmodels.tsa.api import VAR
#os.chdir('H:/Documents/Alternance/Project/')
# %% Function to scrap data from Stocktwit and add to the cloud server
# The function have 2 inputs:
# - Symbol of the asset in string
# - Rate limit: number of requests per execution, in integer
def get_stwits_data(symbol,rate_limit):
client = MongoClient('mongodb+srv://Group_fintech:[email protected]/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
exist=0
for q in db['{}'.format(symbol)].aggregate([
{ "$group": {
"_id": None,
"min": { "$min": "$ID" }
}}
]):
exist=1
min_prev_id=q['min']
http = urllib3.PoolManager()
mid=[]
duplicates=0
for j in tqdm(range(rate_limit)):
if exist==0:
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json".format(symbol)
elif exist!=0 and len(mid)==0:
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json?max={}".format(symbol,min_prev_id)
else:
min_ID=min(mid)
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json?max={}".format(symbol,min_ID)
r = http.request('GET', url)
try:
data = json.loads(r.data)
except:
print('Decode error, retry again')
continue
if duplicates==1:
print('\nThere are duplicates in the result. Other people are maybe running. \nPlease try again later.')
break
if data["response"]["status"] != 200:
print("\nYour request was denied, retry in 1 hour")
time.sleep(3600)
continue
# insert_element=[]
# break
for element in data["messages"]:
mid.append(element["id"])
symbol_list=[]
for s in element['symbols']:
symbol_list.append(s['symbol'])
try:
insert_element = {"ID": element["id"], "TimeStamp": element["created_at"], "User": element["user"]["username"], "Content": element["body"],"Sentiment": (element["entities"]["sentiment"]["basic"]=="Bullish")*2-1,'Symbols':symbol_list}
except:
insert_element = {"ID": element["id"], "TimeStamp": element["created_at"], "User": element["user"]["username"], "Content": element["body"],"Sentiment": 0,'Symbols':symbol_list}
try:
result = db['{}'.format(symbol)].insert_one(insert_element)
except:
duplicates=1
break
return insert_element
# %% Execution of the function
symbol='BTC.X'
rate_limit=2000
last_ele=get_stwits_data(symbol,rate_limit)
# %% #Creating custom lexicon
#%% Finding the time interval of the database
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
#Getting the minimum id
for q in db['BTC.X'].aggregate([
{ "$group": {
"_id": None,
"min": { "$min": "$ID" }
}}
]):
minID=q['min']
#Getting the timestamp from the min ID
for post in db['BTC.X'].find({'ID':minID}):
start_time=post['TimeStamp']
#Getting the max id
for q in db['BTC.X'].aggregate([
{ "$group": {
"_id": None,
"max": { "$max": "$ID" }
}}
]):
maxID=q['max']
#Getting the timestamp from the max ID
for post in db['BTC.X'].find({'ID':maxID}):
end_time=post['TimeStamp']
start_time=dt.strptime(start_time,'%Y-%m-%dT%H:%M:%SZ')
end_time=dt.strptime(end_time,'%Y-%m-%dT%H:%M:%SZ')
period=np.arange(dt(start_time.year,start_time.month,start_time.day),dt(end_time.year,end_time.month,end_time.day),timedelta(days=1))
#%% Creating dictionary
#Creating function to find words in positive and negative function
def create_positive_dictionary_by_day(day):
dictionary=pd.DataFrame(columns=['Word','Frequency'])
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
sentimental=1
for documents in db['BTC.X'].find({'Sentiment':sentimental,"TimeStamp":{"$regex": u"{}-{:02d}-{:02d}".format(day.astype(object).year,day.astype(object).month,day.astype(object).day)}}):
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in dictionary['Word'].tolist():
frq=copy.copy(dictionary.iloc[dictionary.index[dictionary['Word']==word].tolist()[0]][1])+1
dictionary.at[dictionary.index[dictionary['Word']==word].tolist()[0],'Frequency']=frq
else:
dictionary=dictionary.append({'Word': word ,'Frequency':1}, ignore_index=True)
return dictionary
def create_negative_dictionary_by_day(day):
dictionary=pd.DataFrame(columns=['Word','Frequency'])
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
sentimental=-1
for documents in db['BTC.X'].find({'Sentiment':sentimental,"TimeStamp":{"$regex": u"{}-{:02d}-{:02d}".format(day.astype(object).year,day.astype(object).month,day.astype(object).day)}}):
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in dictionary['Word'].tolist():
frq=copy.copy(dictionary.iloc[dictionary.index[dictionary['Word']==word].tolist()[0]][1])+1
dictionary.at[dictionary.index[dictionary['Word']==word].tolist()[0],'Frequency']=frq
else:
dictionary=dictionary.append({'Word': word ,'Frequency':1}, ignore_index=True)
return dictionary
from multiprocessing import Pool
pool = Pool()
#creating positive dictionary
df=list(tqdm(pool.imap(create_positive_dictionary_by_day, period), total=len(period)))
positive_dictionary=df[0].set_index('Word')
for i in tqdm(range(1,len(df))):
positive_dictionary=positive_dictionary.add(df[i].set_index('Word'), fill_value=0)
#creating negative dictionary
df=list(tqdm(pool.imap(create_negative_dictionary_by_day, period), total=len(period)))
negative_dictionary=df[0].set_index('Word')
for i in tqdm(range(1,len(df))):
negative_dictionary=negative_dictionary.add(df[i].set_index('Word'), fill_value=0)
negative_dictionary=negative_dictionary.sort_values('Frequency',ascending=False)
positive_dictionary=positive_dictionary.sort_values('Frequency',ascending=False)
positive_dictionary.columns=['Positive Freq']
negative_dictionary.columns=['Negative Freq']
positive_dictionary=positive_dictionary/db['BTC.X'].count_documents({'Sentiment':1})
negative_dictionary=negative_dictionary/db['BTC.X'].count_documents({'Sentiment':-1})
#Combining both dictionary
final_dict=positive_dictionary.add(negative_dictionary, fill_value=0).sort_values('Positive Freq',ascending=False)
final_dict['Pos over Neg']=final_dict['Positive Freq']/final_dict['Negative Freq']
#Removing stopwords from the dictionary
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
final_dict=final_dict.reset_index()
for i in final_dict['Word']:
if i in stop_words:
final_dict=final_dict[final_dict['Word']!=i]
#Removing words below the threshold
final_dic=final_dict.fillna(value=0)
final_dict=final_dict[(final_dict['Negative Freq']>0.0005) | (final_dict['Positive Freq']>0.0005)]
final_dict.fillna(value=0).sort_values('Pos over Neg',ascending=False).to_csv('Simple_Dictionary2.csv')
#%% Creating positive and negative word list from the lexicon
os.chdir('H:/Documents/Alternance/Project/')
lexicon=pd.read_csv('Simple_Dictionary2.csv')
lexicon=lexicon[['Word','Classification']]
neg_list=list(lexicon[lexicon['Classification']==-1]['Word'])
pos_list=list(lexicon[lexicon['Classification']==1]['Word'])
# Update lexicon result to the database
import nltk
porter = nltk.PorterStemmer()
import re
import copy
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
for i in range(32):
for documents in tqdm(db['BTC.X'].find({'Custom_Lexicon_Sentiment':{ "$exists" : False }},limit=10000)):
if documents['Sentiment']==0:
score=0
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in neg_list:
score+=-1
if word in pos_list:
score+=1
if score >0:
senti=1
elif score <0:
senti=-1
else:
senti=0
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Custom_Lexicon_Sentiment':senti}})
else:
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Custom_Lexicon_Sentiment':documents['Sentiment']}})
#%% Creating positive and negative word list from the teacher lexicon
os.chdir('H:/Documents/Alternance/Project/')
lexicon= | pd.read_csv('l2_lexicon.csv',sep=';') | pandas.read_csv |
import utils
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from tqdm import tqdm
import numpy as np
import pandas as pd
from itertools import combinations, permutations
import heapq
# Here we created a class in order to store the index of a search engine
class index:
def __init__(self, name):
self.name = name
self.index = dict()
# The following class contains methods used to create the search engine itself
class search_engine:
def __init__(self, name = 'movie search engine'):
self.name = name
self.trained = False
self.vocabulary = dict()
self.search_engines = [index(name = 'Search Engine ' + str(i)) for i in range(1,3)]
self.vectorizer = CountVectorizer(tokenizer = utils.text_cleaner)
self.transformer = TfidfTransformer(smooth_idf=False)
# This method takes as input a dataframe containing the information about the movies obtained by parsing the html files.
# The returned output is the index for each search engine.
def create_engine(self, dataframe):
# storing the itro and plot in a list for search engine 1 & 2
words_list = []
for i in tqdm(range(len(dataframe))):
words_list.append(str(dataframe.loc[i]['Plot']) + ' ' + str(dataframe.loc[i]['Intro']))
print('word list is made')
print('word frequency is running...')
# computing the word frequency for each document
words_freq = self.vectorizer.fit_transform(words_list)
words_freq = words_freq.toarray()
corpus_words = self.vectorizer.get_feature_names()
print('index initiating...')
# creating the vocabulary and initiating the index
for i,word in enumerate(corpus_words):
self.vocabulary[word] = i
self.search_engines[0].index[i]=set()
self.search_engines[1].index[i]=set()
# updating the index for search engine 1
# keep a list of documents IDs
docs_id = np.array(range(words_freq.shape[0]))
# Pick the column of the work frequency related to each term. The positive values means the term appears in the document
for j in tqdm(range(words_freq.shape[1])):
term_freq = words_freq[:,j] > 0
doc_id = docs_id[term_freq]
# Updating the index related to each term for documents ID that contains the term
for i in range(doc_id.shape[0]):
self.search_engines[0].index[j].update({doc_id[i]})
# updating the index for search engines 2
# transforming the word frequency to tf-idf score
tfidf = self.transformer.fit_transform(words_freq)
tfidf = tfidf.toarray()
# Pick the column of the tf-idf related to each term. The positive values means the term appears in the document
for j in tqdm(range(words_freq.shape[1])):
term_freq = words_freq[:,j] > 0
term_tfidf = tfidf[term_freq,j]
doc_id = docs_id[term_freq]
# Updating the index (doc_id, tfidf) related to each term for documents ID that contains the term
for i in range(term_tfidf.shape[0]):
self.search_engines[1].index[j].update({(doc_id[i],term_tfidf[i])})
# set the search engine to trained in order to avoid retraning and check the posibility of fetching query
self.trained = True
# The method below takes as input the search engine ID, a query and the dataframe and returns in output the results yielded by running the search engine.
# Provide suggestion to the user to enhance the query
def query(self, search_engine, q, dataframe):
# check whether the engine is trained or not
if self.trained:
# parsing the query into text_cleaner in order to match the words (tokenized and stemmed) we have for making index
query_words = utils.text_cleaner(q)
if search_engine == 1:
# check if the whole terms appear in document and output the result for 'Title', 'Intro' and 'URL'
return pd.DataFrame([dataframe.loc[j][['Title','Intro','Url']]
for j in set.intersection(*[self.search_engines[search_engine -1].index[i]
for i in [self.vocabulary[t]
for t in query_words if t in self.vocabulary.keys()]])])
if search_engine in [2, 3]:
# pick the document that at least one of the word in the query appears in
doc_ids = [self.search_engines[1].index[t] for t in [self.vocabulary[w] for w in query_words if w in self.vocabulary.keys()]]
mat = []
for v in doc_ids:
d = dict()
for i in v:
d[i[0]]=i[1]
mat.append(d)
# make a pandas dataframe (matrix) which element is a tfidf of the term (column) in the document (row)
Results = pd.DataFrame(mat).transpose()
# Fill zero for those terms with NaN tfidf (when the term doesn't appear in the document)
Results.fillna(0, inplace = True)
# Drop the rows that do not satisfy the conjunctive query
Results = Results.loc[[all(Results.loc[i] > 0) for i in Results.index]]
# computing the cosine similarity for search engine 2
Results['Score'] = Results.apply(utils.cosine_similarity_se2, axis = 1)
Results['Doc_ID'] = Results.index
# store the result in tuple like (value, key) in order to heapify the output
hp = [(Results.loc[row]['Score'], Results.loc[row]['Doc_ID']) for row in Results.index]
heapq.heapify(hp)
# Output the result of the first top-50 scores
hp_res = heapq.nlargest(50, hp)
# output 'Title', 'Intro' and 'URL' for top-50
Results = | pd.DataFrame([dataframe.loc[j][['Title','Intro','Url']] for j in [a[1] for a in hp_res]]) | pandas.DataFrame |
## Analysis of Study
################################################################################
### Setup -- Data Loading and Cleaning
################################################################################
###############
### Imports
###############
# Warning Supression
import warnings
warnings.simplefilter("ignore")
# Standard
import pandas as pd
import numpy as np
import os, sys
# Statistical Modeling/Curve Fitting
import statsmodels.api as sm
import statsmodels.formula.api as smf
from scipy import stats
from scipy.optimize import curve_fit
# Plotting
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.ticker import FuncFormatter
from matplotlib import rcParams
# Helpers
from scripts.libraries.helpers import load_pickle
###############
### Plot Helpers
###############
## Plotting Variables
standard_fig = (6.5, 4.2)
plot_dir = "./plots/"
stats_plots = plot_dir + "analysis/"
# rcParams['font.family'] = 'sans-serif'
# rcParams['font.sans-serif'] = ['Helvetica']
rcParams["errorbar.capsize"] = 5
FIGURE_FMT = ".pdf"
## Plot subdirectories
for d in [plot_dir, stats_plots]:
if not os.path.exists(d):
os.mkdir(d)
###############
### General Helpers
###############
## Create Equal Percentiles
create_percentiles = lambda n_bins: np.linspace(0,100, n_bins+1)
## Using Percentiles, Estimate Equal Bin Ranges on a set of Values
create_equal_bins = lambda values, n_bins: np.percentile(values, create_percentiles(n_bins))
## Standard Error
def std_error(vals): return np.std(vals) / np.sqrt(len(vals) - 1)
## Assign Value to a Bin
assign_bin = lambda val, bin_array: np.max([b for b in range(len(bin_array)) if val >= bin_array[b]])
create_bin_strings = lambda values, bins: ["[%d,%d)" % (np.ceil(x),np.ceil(y)) for x,y in list(zip(bins[:-1], bins[1:]))]+ ["[%d,%d]" % (bins[-1], max(values))]
## Flatten a List of Lists
flatten = lambda l: [item for sublist in l for item in sublist]
## Holm-Bonferroni Correction
def apply_bonferroni_correction(df,
pval_col,
alpha = 0.05):
"""
Apply Holm-Bonferroni correction to Wald Test statistics (https://en.wikipedia.org/wiki/Holm%E2%80%93Bonferroni_method)
"""
comparisons = [a for a in df.index.values if a not in ["Intercept", "groups RE"]]
df = df.replace("",np.nan).dropna()
pvals = df.loc[comparisons][[pval_col,"df_constraint"]].astype(float)
n_comparisons = pvals.df_constraint.sum()
pvals = pvals.sort_values(pval_col,ascending=True)
pvals["bonferroni_max"] = (alpha / (n_comparisons + 1 - pvals["df_constraint"].cumsum()))
pvals["k"] = np.arange(len(pvals))
k_argmin = pvals.loc[pvals[pval_col] > pvals["bonferroni_max"]].k.min()
pvals["significant"] = np.nan
pvals.loc[pvals["k"] < k_argmin,"significant"] = "*"
df = pd.merge(df,
pvals[["significant"]],
how = "left",
left_index=True,
right_index=True)
df["significant"] = df["significant"].fillna("")
return df
## Mixed LM Model Summary
def summarize_lm_model(model,
alpha = 0.05):
## Model Params
model_fit = model.summary().tables[0]
model_fit.columns = [""] * 4
## Model Statistics
model_summary = model.summary().tables[1]
wald_test = apply_bonferroni_correction(model.wald_test_terms().table,
"pvalue",
alpha)
wald_test.rename(columns = {"statistic":"chi_squared"},inplace=True)
## Display
return model_summary, wald_test
## Wald Significant Values
def print_significant(model,
wald_stats):
"""
Print out significant effects
"""
sample_size = int(model.summary().tables[0].set_index(0).loc["No. Groups:", 1])
sig_effects = wald_stats.loc[wald_stats.significant == "*"].copy()
for factor in sig_effects.index:
chi2 = wald_stats.loc[factor]["chi_squared"]
p = wald_stats.loc[factor]["pvalue"].item()
df = wald_stats.loc[factor]["df_constraint"]
outstring = "- {} chi^2({}, N = {}) = {:.5f}, p = {:.5f}".format(factor, df, sample_size, chi2, p )
print(outstring)
## Bootstrapped Confidence Interval
def bootstrap_ci(values,
alpha = 0.05,
func = np.mean,
sample_percent = 20,
samples = 1000):
"""
Bootstraped Confidence Interval
"""
processed_vals = []
values = np.array(values)
for sample in range(samples):
sample_vals = np.random.choice(values, int(values.shape[0] * sample_percent/100.), replace = True)
processed_vals.append(func(sample_vals))
return np.percentile(processed_vals, [alpha*100/2, 50, 100. - (alpha*100/2)])
###############
### Global Variables
###############
# qvc Variables
qvc_sync_metrics = ["met_qvc_last5"]
qvc_cont_metrics = ["nomet_qvc_last5"]
qvc_metrics = qvc_sync_metrics + qvc_cont_metrics
# Error Variables
error_sync_metrics = ["met_sync_error_last5",
"met_sync_error_last5_rel"]
error_cont_metrics = ["nomet_sync_error_last5",
"nomet_sync_error_last5_rel"]
error_metrics = error_sync_metrics + error_cont_metrics
# Drift Variables
drift_metrics = ["nomet_drift",
"nomet_drift_rel",
"nomet_drift_regression"]
# Variable Types
factor_variables = ["trial",
"speed_occurrence",
"trial_speed",
"gender",
"musical_experience",
"sport_experience"]
continuous_variabless = ["preferred_period",
"age"]
all_variables = factor_variables + continuous_variabless
# Fixed Primary Variables
primary_variables = ["age",
"gender",
"trial",
"speed_occurrence",
"trial_speed",
"musical_experience",
"sport_experience"]
###############
### Load and Filter Data
###############
# Load Results (file generated by `extract_tap_metrics.py`)
results = pd.read_csv("./data/processed_results.csv")
# Drop Rows with Null Primary Metrics
results = results.loc[results[qvc_metrics + error_metrics + drift_metrics + primary_variables].isnull().sum(axis = 1) == 0]
# Drop Subjects without 6 Trials
results = results.loc[results.subject.isin(set(results.subject.value_counts().loc[results.subject.value_counts() == 6].index.values))].copy()
# Add Age Bins (Explict Choice to Align with "Pitchers" study)
age_bins = [5,10,13,20,30,50,69] # Boundaries
age_bin_points = [7.5, 11.0, 16, 26, 40, 59] # Space on x-axis for plotting the bin
age_bin_strings = ["5-9","10-12","13-19","20-29","30-49","50+"] # Named Age Bins
results["age_bin"] = results["age"].map(lambda val: assign_bin(val, age_bins)) # Apply
###############
### Add Disorder Flag
###############
## Specify Disorders to Flag
disorder_filters = ["BROKEN WRIST",
"CARPAL TUNNEL",
"ACUTE ARTHRITIS",
"BROKEN WRIST (2 YEARS AGO)",
"AUTISM","ADHD","TENDONITIS",
"BROKEN ARMS, BACK, ANKLE",
"ARTHRITIS",
"COMPLEX REGIONAL PAIN SYNDROME",
"TICK DISORDER",
"FIBROMYALGIA",
"CARPAL TUNNEL (BILATERAL)",
"SLIGHT GRIP PAIN (NONE DURING EXPERIMENT)",
"AGENESIS OF THE CORPUS COLLOSUM",
"CONNECTIVE TISSUE DISORDER",
"CERVICAL FUSION (NECK)",
"MALLET FINGER"]
## Remove Disordered Subjects
results["healthy"] = np.logical_not(results.specify_disorder.isin(disorder_filters))
###############
### Preferred Period Filtering
###############
"""
Notes on Threshold Selection:
- "The Time of Our Lives: Life Span Development of Timing and Event Tracking" (see xlsx in /literature/) found a
standard error in preferred period that averaged around 6.5% (for consistent subjects). The range across groups
was anywhere from 3% to 12%.
- "Preferred rates of repetitive tapping and categorical time production" found a mean semiinterquartile range in
preferred inter-tap-interval of 3.3%. Note...Only 16 subjects.
Conclude that 10% threshold is reasonable given the variance. Should main effects highlighted in the paper remained
consistent before/after filtering and can note special cases where this wasn't true. We can also include
the relative difference as a parameter in each model to control for differences.
"""
## Filter Params
apply_preferred_period_filter = True
filter_by_threshold = True
threshold = 10
## Load file created in scripts/evaluate_preferred_period_calculations.py
pp_filter_file = "./data/preferred_period_filtering_map.csv"
pp_filter = pd.read_csv(pp_filter_file)
## Merge Preferred Period Calculation Analysis Results
results = pd.merge(results,
pp_filter,
on = "subject",
how = "left")
## Drop Subjects with Miscalculated Preferred Periods
if apply_preferred_period_filter:
if filter_by_threshold:
pp_keepers = set(pp_filter.loc[pp_filter.absolute_rel_difference <= threshold].subject)
else:
pp_keepers = set(pp_filter.loc[pp_filter.online_in_range].subject)
results["proper_preferred_period_calculation"] = results.subject.isin(pp_keepers)
## Alert User of Additional Removal
subjects_filtered = results.drop_duplicates(["subject"])["proper_preferred_period_calculation"].value_counts()
print("Applying Preferred Period Online Calculation Filter.")
print("Filtered {} more subjects. Started at {}. Now at {}".format(subjects_filtered[False],
len(results.subject.unique()),
subjects_filtered[True]))
## Apply Filter
results = results.loc[results.proper_preferred_period_calculation].reset_index(drop=True).copy()
results.drop("proper_preferred_period_calculation", axis=1, inplace=True)
###############
### Absolute Values
###############
# Absolute Synchronization Error (For when we don't care about directional differences, just absolute)
for error in error_metrics + drift_metrics:
results["abs_%s" % error] = np.abs(results[error])
# Update Variable Groups
error_metrics = error_metrics + ["abs_{}".format(met) for met in error_metrics]
drift_metrics = drift_metrics + ["abs_{}".format(met) for met in drift_metrics]
###############
### Account for Trial/Block Effects
###############
## Average Metrics Across Both Trials for a Given Trial Speed
metrics_to_average = error_metrics + qvc_metrics + drift_metrics
mean_results = pd.pivot_table(index = ["subject","trial_speed"],
values = metrics_to_average,
aggfunc = np.mean,
data = results).reset_index()
merge_vars = ["subject",
"age",
"age_bin",
"gender",
"musical_experience",
"musical_experience_yrs",
"sport_experience",
"sport_experience_yrs",
"preferred_period",
"healthy",
"rel_difference",
"absolute_rel_difference",
"online_in_range"]
mean_results = pd.merge(mean_results,
results.drop_duplicates("subject")[merge_vars],
left_on = "subject",
right_on = "subject")
################################################################################
### Independent Variable Distributions
################################################################################
## De-duplicate the data set based on subject (only want unique characteristics)
subject_deduped = results.drop_duplicates("subject")
###############
### Preferred Period
###############
## Preferred Period Distribution
mean_pp, std_pp = subject_deduped.preferred_period.mean(), subject_deduped.preferred_period.std()
counts, bins = np.histogram(subject_deduped["preferred_period"], 20)
fig, ax = plt.subplots(1,1, figsize = standard_fig, sharey = False, sharex = False)
b = ax.hist(subject_deduped["preferred_period"],
bins = bins,
normed = False,
color = "navy",
edgecolor = "navy",
alpha = .3,
label = "")
ax.set_xlabel("Preferred Period (ms)",
fontsize = 18,
fontweight = "bold")
ax.set_ylabel("Subjects",
fontsize = 18,
fontweight = "bold")
ax.axvline(mean_pp,
color = "navy",
linestyle = "--",
linewidth = 3,
label = "Mean = {:,.0f} ms".format(mean_pp))
ax.axvline(mean_pp - std_pp,
color = "navy",
linestyle = ":",
linewidth = 3,
label = "SD = {:,.0f} ms".format(std_pp))
ax.axvline(mean_pp + std_pp,
color = "navy",
linestyle = ":",
linewidth = 3,
label = "")
ax.legend(loc = "upper right",
ncol = 1,
fontsize = 16,
frameon=True,
handlelength=2,
borderpad=0.25)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelsize=16)
fig.tight_layout()
plt.savefig(stats_plots + "preferred_period" + FIGURE_FMT)
plt.savefig(stats_plots + "preferred_period" + ".png")
plt.close()
## No linear correlation between age and preferred period
fig, ax = plt.subplots(1,1, figsize = standard_fig, sharey = False, sharex = False)
ax.scatter(subject_deduped["age"],
subject_deduped["preferred_period"],
color = "darkblue",
edgecolor="darkblue",
alpha = .5,
s = 50)
ax.set_xlabel("Age (yrs.)",
fontsize = 18,
fontweight = "bold")
ax.set_ylabel("Preferred Period (ms)",
fontsize = 18,
fontweight = "bold")
ax.tick_params(labelsize=16)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.tight_layout()
plt.savefig(stats_plots + "age_preferred_period" + FIGURE_FMT)
plt.savefig(stats_plots + "age_preferred_period" + ".png")
plt.close()
## Preferred Period Effects
pp_form = "preferred_period ~ age + C(gender) + C(musical_experience)"
pp_model = smf.ols(pp_form, subject_deduped).fit()
pp_stats, pp_sig = summarize_lm_model(pp_model)
## Preferred Period ~ Musical Experience
pp_ci = lambda vals: tuple(bootstrap_ci(vals, sample_percent = 30, samples = 1000))
me_pp_avg = subject_deduped.groupby(["musical_experience"]).agg({"preferred_period":[pp_ci,np.mean,np.std,std_error]})["preferred_period"]
for i in range(3): me_pp_avg[i] = me_pp_avg["<lambda_0>"].map(lambda j: j[i])
fig, ax = plt.subplots(figsize = standard_fig)
ax.bar([0, 1],
me_pp_avg["mean"].values,
yerr = me_pp_avg["std_error"].values,
color = "navy",
edgecolor = "navy",
alpha = .5)
ax.set_xticks([0,1])
ax.set_xticklabels(["No","Yes"])
ax.set_xlabel("Musical Experience",
fontsize = 18,
fontweight = "bold")
ax.set_ylabel("Preferred Period (ms)",
fontsize = 18,
fontweight = "bold")
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelsize = 16)
fig.tight_layout()
fig.savefig(stats_plots + "musical_experience_preferred_period" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "musical_experience_preferred_period" + ".png", dpi=300)
plt.close()
## Categorical Relationship between age and preferred Period
age_pp_aggs = {"preferred_period":[np.mean, std_error, pp_ci]}
age_bin_pp_avg = subject_deduped.groupby(["age_bin"]).agg(age_pp_aggs)["preferred_period"]
for i in range(3): age_bin_pp_avg[i] = age_bin_pp_avg["<lambda_0>"].map(lambda j: j[i])
fig, ax = plt.subplots(figsize = standard_fig)
ax.fill_between([age_bin_points[0]-1] + age_bin_points[1:-1] + [age_bin_points[-1]+1+1],
age_bin_pp_avg[0].values,
age_bin_pp_avg[2].values,
color = "navy",
alpha = .3)
ax.errorbar(age_bin_points,
age_bin_pp_avg["mean"].values,
yerr = age_bin_pp_avg["std_error"].values,
color="navy",
linewidth=2)
ax.set_xticks(age_bin_points)
ticks = ax.set_xticklabels(age_bin_strings,
rotation = 45,
ha = "right")
ax.set_xlabel("Age (yrs.)",
fontsize = 18,
fontweight = "bold")
ax.set_ylabel("Preferred Period",
fontsize = 18,
fontweight = "bold")
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelsize = 16)
fig.tight_layout()
fig.savefig(stats_plots + "age_bin_preferred_period" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "age_bin_preferred_period" + ".png", dpi=300)
plt.close()
###############
### Age & Gender
###############
## Replace Binary Gender Forms
results["gender"] = results["gender"].map(lambda g: "Female" if g == 1 else "Male")
subject_deduped["gender"] = subject_deduped["gender"].map(lambda g: "Female" if g == 1 else "Male")
## Plot Age + Gender Distribution
fig, ax = plt.subplots(figsize = standard_fig)
max_count = 0
for a, age in enumerate(sorted(subject_deduped.age_bin.unique())):
for g, gender in enumerate(["Female","Male"]):
demo_count = len(subject_deduped.loc[(subject_deduped.age_bin == age)&(subject_deduped.gender==gender)])
ax.bar(0.025 + g*0.45 + a,
demo_count,
color = {"Male":"teal","Female":"orangered"}[gender],
alpha = .5,
align = "edge",
width = .45,
label = gender if a == 0 else "",
edgecolor = {"Male":"darkslategray","Female":"crimson"}[gender])
if demo_count > 0:
ax.text(0.25 + g*0.45 + a,
demo_count + 1,
demo_count,
ha = "center",
fontsize = 18)
max_count = demo_count if demo_count > max_count else max_count
ax.legend(loc = "upper left",
frameon = True,
fontsize = 16,
handlelength=2,
borderpad = 0.25,
edgecolor="gray")
ax.set_xticks(np.arange(a+1)+.5)
ticks = ax.set_xticklabels(age_bin_strings,
rotation = 0)
ax.set_xlabel("Age (yrs.)",
fontsize = 18,
fontweight = "bold")
ax.set_ylabel("Subjects",
fontsize = 18,
fontweight = "bold")
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
lim = ax.set_ylim(0, max_count+5)
ax.tick_params(labelsize = 16)
fig.tight_layout()
fig.savefig(stats_plots + "demographics" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "demographics" + ".png", dpi=300)
plt.close()
## Age + Gender + Musical Experience
fig, ax = plt.subplots(figsize = standard_fig)
for a, age in enumerate(sorted(subject_deduped.age_bin.unique())):
for g, gender in enumerate(["Female","Male"]):
bottom = 0
for e, experience in enumerate([1,0]):
demo_count = len(subject_deduped.loc[(subject_deduped.age_bin == age)&
(subject_deduped.gender==gender)&
(subject_deduped.musical_experience==experience)])
ax.bar(0.025 + g*0.45 + a,
demo_count,
bottom = bottom,
color = {"Male":"teal","Female":"orangered"}[gender],
alpha = 0.25 if e == 1 else .6,
align = "edge",
width = .45,
label = "{} ({})".format(gender, {1:"w/ M.E.",0:"w/o M.E."}[experience]) if a == 0 else "",
edgecolor = {"Male":"darkslategray","Female":"crimson"}[gender])
bottom += demo_count
ax.legend(loc = "upper left",
frameon = True,
fontsize = 12,
handlelength = 2,
borderpad = 0.25,
edgecolor = "gray")
ax.set_xticks(np.arange(a+1)+.5)
ticks = ax.set_xticklabels(age_bin_strings,
rotation = 0)
ax.set_xlabel("Age (yrs.)",
fontsize = 18,
fontweight = "bold")
ax.set_ylabel("Subjects",
fontsize = 18,
fontweight = 'bold')
ax.tick_params(labelsize = 16)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.tight_layout()
fig.savefig(stats_plots + "demographics_musicalexperience" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "demographics_musicalexperience" + ".png", dpi=300)
plt.close()
###############
### Musical Experience
###############
## Musical Experience Distribution
musical_experience_check = lambda row: "No" if row["musical_experience"] == 0 else \
"Yes\n(Specified)" if row["musical_experience_yrs"] >= 1 else \
"Yes\n(Unspecified)"
subject_deduped["musical_experience_specified"] = subject_deduped.apply(musical_experience_check, axis = 1)
musical_experience_dist = subject_deduped.musical_experience_specified.value_counts()
music_exp_subset = subject_deduped.loc[subject_deduped.musical_experience_yrs >= 1]
fig, ax = plt.subplots(1,2, figsize = standard_fig)
musical_experience_dist.plot.barh(ax = ax[0],
color = "navy",
edgecolor = "navy",
alpha = .5)
ax[0].set_yticks(np.arange(3))
ax[0].set_yticklabels(musical_experience_dist.index.values,
multialignment = "center")
ax[1].scatter(music_exp_subset["age"],
music_exp_subset["musical_experience_yrs"],
color = "navy",
edgecolor = "navy",
s = 50,
alpha = .5)
ax[1].plot([0, music_exp_subset.age.max()],
[0, music_exp_subset.age.max()],
linestyle = "--",
color = "navy",
alpha = .5)
ax[0].set_xlabel("Subjects",
fontsize=18,
fontweight = "bold")
ax[1].set_xlabel("Age (yrs.)",
fontsize=18,
fontweight = "bold")
ax[0].set_ylabel("Experience (Response)",
labelpad = 10,
fontsize=18,
fontweight = "bold")
ax[1].set_ylabel("Experience (yrs.)",
labelpad = 10,
fontsize=18,
fontweight = "bold")
ax[1].set_xlim(left=0)
ax[1].set_ylim(bottom=0)
for a in ax:
a.tick_params(labelsize = 16)
a.spines['right'].set_visible(False)
a.spines['top'].set_visible(False)
fig.tight_layout()
fig.subplots_adjust(wspace = .6)
plt.savefig(stats_plots + "musical_experience" + FIGURE_FMT)
plt.savefig(stats_plots + "musical_experience" + ".png")
plt.close()
## Musical Experience with Age/Gender
max_gender_count = 0
max_age_count = 0
fig, ax = plt.subplots(1,2, figsize = standard_fig)
for x, experience in enumerate([0,1]):
for g, gender in enumerate(["Female","Male"]):
demo_counts = len(subject_deduped.loc[(subject_deduped.musical_experience==experience)&
(subject_deduped.gender==gender)])
ax[0].bar(0.025 + x*.45 + g,
demo_counts,
color = "teal" if g == 1 else "orangered",
alpha = {0:.4,1:.8}[experience],
label = {1:"Yes",0:"No"}[experience] if g == 0 else "",
width = .45,
align = "edge",
edgecolor = "darkslategray" if g == 1 else "crimson")
max_gender_count = demo_counts if demo_counts > max_gender_count else max_gender_count
for a, age in enumerate(subject_deduped.age_bin.unique()):
demo_counts = len(subject_deduped.loc[(subject_deduped.musical_experience==experience)&
(subject_deduped.age_bin==age)])
ax[1].bar(0.025 + x*.45 + a,
demo_counts,
color = "navy",
alpha = {0:.4,1:.8}[experience],
label = {1:"Yes",0:"No"}[experience] if a == 0 else "",
width = .45,
align = "edge",
edgecolor = "navy")
max_age_count = demo_counts if demo_counts > max_age_count else max_age_count
ax[0].set_ylim(0,
max_gender_count+5)
ax[1].set_ylim(0,
max_age_count + 5)
ax[0].set_xticks(np.arange(2)+.5)
ax[0].set_xticklabels(["Female","Male"],
rotation = 45,
ha = "right")
ax[1].set_xticks(np.arange(a+1)+.5)
ax[1].set_xticklabels(age_bin_strings,
rotation = 45,
ha="right")
handles, labels = ax[1].get_legend_handles_labels()
leg = ax[1].legend(handles,
labels,
loc='upper right',
ncol = 1,
fontsize = 12,
frameon = True,
title="Musical\nExperience",
edgecolor="gray",
borderpad = 0.25,
handlelength = 2)
plt.setp(leg.get_title(), fontsize=12, multialignment="center")
for a in ax:
a.set_ylabel("Subjects",
fontsize = 18,
fontweight = "bold")
a.tick_params(labelsize = 14)
a.spines['right'].set_visible(False)
a.spines['top'].set_visible(False)
fig.text(0.3,
0.04,
"Gender",
fontsize=18,
ha="center",
va="center",
fontweight="bold")
fig.text(0.8,
0.04,
"Age",
fontsize=18,
ha="center",
va="center",
fontweight="bold")
fig.tight_layout()
fig.subplots_adjust(bottom=0.25)
plt.savefig(stats_plots + "musical_experience_demographics" + FIGURE_FMT)
plt.savefig(stats_plots + "musical_experience_demographics" + ".png")
plt.close()
###############
### Make Amenable to Condition Modeling
###############
## Choose Metrics
sync_error = "abs_met_sync_error_last5_rel"
cont_error = "abs_nomet_sync_error_last5_rel"
sync_qvc_metric = "met_qvc_last5"
cont_qvc_metric = "nomet_qvc_last5"
drift_metric = "nomet_drift_rel"
## Separate Columns
standard_cols = ["subject",
"trial_speed",
"age",
"age_bin",
"gender",
"musical_experience",
"musical_experience_yrs",
"sport_experience",
"sport_experience_yrs",
"preferred_period",
"healthy",
"rel_difference",
"absolute_rel_difference",
"online_in_range"]
met_cols = [sync_error, sync_qvc_metric]
nomet_cols = [cont_error, cont_qvc_metric, drift_metric]
## Separate DataFrames
met_df = mean_results[standard_cols + met_cols].rename(columns = {sync_error:"error",
sync_qvc_metric:"qvc"}).copy()
nomet_df = mean_results[standard_cols + nomet_cols].rename(columns = {cont_error:"error",
cont_qvc_metric:"qvc",
drift_metric:"drift"}).copy()
## Add Condition Columns
met_df["condition"] = "paced"
nomet_df["condition"] = "unpaced"
## Concatenate DataFrames
merged_results_df = pd.concat([met_df, nomet_df]).reset_index(drop=True)
## Dump Merged Results for Inter-task Analysis
merged_results_df.to_csv("./data/merged_processed_results.csv",index=False)
################################################################################
### Error Analysis
################################################################################
## Fit Mixed LM Model
error_model_formula = "error ~ C(gender) + age + preferred_period + trial_speed + C(musical_experience) + C(condition) + rel_difference"
error_model = smf.mixedlm(error_model_formula,
data = merged_results_df,
groups = merged_results_df["subject"]).fit(reml=True)
error_summary, error_wald = summarize_lm_model(error_model)
print("Error Effects:"); print_significant(error_model, error_wald)
"""
Error Effects (No Preferred Period Filtering):
- trial_speed chi^2(2, N = 303) = 12.05179, p = 0.00242
- C(musical_experience) chi^2(1, N = 303) = 11.96633, p = 0.00054
- C(condition) chi^2(1, N = 303) = 651.62292, p = 0.00000
- age chi^2(1, N = 303) = 9.48560, p = 0.00207
- preferred_period chi^2(1, N = 303) = 9.26603, p = 0.00233
Error Effects (With Uncertainty Filtering):
- C(condition) chi^2(1, N = 267) = 599.39117, p = 0.00000
Error Effects (With 5% Threshold Filtering)
- C(musical_experience) chi^2(1, N = 277) = 7.50678, p = 0.00615
- C(condition) chi^2(1, N = 277) = 629.22545, p = 0.00000
Error Effects (With 10% Threshold Filtering)
- C(musical_experience) chi^2(1, N = 282) = 7.70023, p = 0.00552
- C(condition) chi^2(1, N = 282) = 639.63177, p = 0.00000
"""
## Plot Musical Experience
musical_experience_avg = merged_results_df.groupby(["condition","trial_speed","musical_experience"]).agg({"error":[np.mean,std_error]}).reset_index()
bar_width = .95 / 2
fig, ax = plt.subplots(1,2, figsize = standard_fig, sharey = True)
for c, cond in enumerate(["paced","unpaced"]):
for s, speed in enumerate(["SlowedDown","NoChange","SpedUp"]):
for e, experience in enumerate([0,1]):
data_to_plot = musical_experience_avg.loc[(musical_experience_avg.condition == cond)&
(musical_experience_avg.trial_speed==speed)&
(musical_experience_avg.musical_experience==experience)]
ax[c].bar(0.025 + s + bar_width*e,
data_to_plot["error"]["mean"],
yerr=data_to_plot["error"]["std_error"],
color = "blue" if c == 0 else "green",
edgecolor = "blue" if c == 0 else "green",
align = "edge",
width = bar_width,
label = "" if s != 0 else "No" if e == 0 else "Yes",
alpha = .4 if e == 0 else .8)
for a in ax:
a.set_xticks(np.arange(3)+.5)
a.set_xticklabels(["20%\nSlower","Preferred","20%\nFaster"])
a.tick_params(labelsize = 14)
a.spines['right'].set_visible(False)
a.spines['top'].set_visible(False)
ax[0].set_ylabel("Absolute Timing Error",
fontsize = 16,
multialignment = "center",
labelpad = 15,
fontweight = "bold")
ax[0].yaxis.set_major_formatter(FuncFormatter(lambda x, pos: "{:.0f}%".format(x)))
ax[0].set_title("Synchronization",
fontsize = 14,
fontweight = "bold",
fontstyle="italic")
ax[1].set_title("Continuation",
fontsize = 14,
fontweight = "bold",
fontstyle="italic")
leg = ax[0].legend(loc = "upper left",
fontsize = 12,
frameon = True,
title="Musical\nExperience",
borderpad=0.25,
edgecolor = "gray",
handlelength=2)
plt.setp(leg.get_title(),fontsize=16,multialignment="center")
fig.tight_layout()
fig.subplots_adjust(wspace = .1, bottom = .2)
fig.text(0.55,
0.02,
'Metronome Condition',
ha='center',
fontsize = 14,
fontweight = "bold")
fig.savefig(stats_plots + "error_musical_experience" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "error_musical_experience" + ".png", dpi=300)
plt.close()
## Plot Age
error_aggs = lambda vals: tuple(bootstrap_ci(vals, sample_percent = 30, samples = 1000))
age_bin_ci = merged_results_df.groupby(["condition","age_bin"]).agg(error_aggs).reset_index()
age_bin_sem = merged_results_df.groupby(["condition","age_bin"]).agg({"error":std_error}).reset_index()
for i in range(3): age_bin_ci[i] = age_bin_ci["error"].map(lambda j: j[i])
fig, ax = plt.subplots(1,1, figsize = standard_fig, sharey = True)
for c, cond in enumerate(["paced","unpaced"]):
ci_data_to_plot = age_bin_ci.loc[age_bin_ci.condition == cond]
se_data_to_plot = age_bin_sem.loc[age_bin_sem.condition == cond]
ax.errorbar(age_bin_points,
ci_data_to_plot[1].values,
yerr = se_data_to_plot["error"].values,
color = "blue" if c == 0 else "green",
linewidth = 2,
alpha = .5)
ax.fill_between([age_bin_points[0]-1] + age_bin_points[1:-1] + [age_bin_points[-1]+1],
ci_data_to_plot[0].values,
ci_data_to_plot[2].values,
color = "blue" if c == 0 else "green",
alpha = .2,
label = "Synchronization" if c == 0 else "Continuation")
ax.set_ylim(bottom = 0,
top = 15)
ax.set_xlabel("Age (yrs.)",
fontsize = 16,
labelpad = 10,
fontweight = "bold")
ax.tick_params(labelsize = 14)
ax.set_xticks(age_bin_points)
ax.set_xticklabels(age_bin_strings, rotation=45, ha="right")
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel("Absolute Timing Error",
fontsize = 16,
labelpad = 10,
fontweight = "bold")
ax.yaxis.set_major_formatter(FuncFormatter(lambda x, pos: "{:.0f}%".format(x)))
ax.legend(loc = "upper right",
frameon = True,
facecolor = "white",
fontsize = 12,
edgecolor = "gray",
handlelength = 2,
borderpad = 0.25)
fig.tight_layout()
fig.savefig(stats_plots + "error_age" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "error_age" + ".png", dpi=300)
plt.close()
## Plot Age + Gender
error_aggs = lambda vals: tuple(bootstrap_ci(vals, sample_percent = 30, samples = 1000))
age_bin_ci = merged_results_df.groupby(["gender","age_bin"]).agg(error_aggs).reset_index()
age_bin_sem = merged_results_df.groupby(["gender","age_bin"]).agg({"error":std_error}).reset_index()
for i in range(3): age_bin_ci[i] = age_bin_ci["error"].map(lambda j: j[i])
fig, ax = plt.subplots(1,1, figsize = standard_fig, sharey = True)
for c, gend in enumerate([0,1]):
ci_data_to_plot = age_bin_ci.loc[age_bin_ci.gender == gend]
se_data_to_plot = age_bin_sem.loc[age_bin_sem.gender == gend]
ax.errorbar(age_bin_points,
ci_data_to_plot[1].values,
yerr = se_data_to_plot["error"].values,
color = "teal" if c == 0 else "orangered",
linewidth = 2,
alpha = .8)
ax.fill_between([age_bin_points[0]-1] + age_bin_points[1:-1] + [age_bin_points[-1]+1],
ci_data_to_plot[0].values,
ci_data_to_plot[2].values,
color = "teal" if c == 0 else "orangered",
alpha = .2,
label = "Male" if c == 0 else "Female")
ax.set_ylim(bottom = 0,
top = 15)
ax.set_xlabel("Age (yrs.)",
fontsize = 16,
labelpad = 10,
fontweight = "bold")
ax.tick_params(labelsize = 14)
ax.set_xticks(age_bin_points)
ax.set_xticklabels(age_bin_strings, rotation=45, ha="right")
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel("Absolute Timing Error",
fontsize = 16,
labelpad = 10,
fontweight = "bold")
ax.yaxis.set_major_formatter(FuncFormatter(lambda x, pos: "{:.0f}%".format(x)))
ax.legend(loc = "upper right",
frameon = True,
facecolor = "white",
fontsize = 16,
edgecolor = "gray",
handlelength = 2,
borderpad = 0.25)
fig.tight_layout()
fig.savefig(stats_plots + "error_age_gender" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "error_age_gender" + ".png", dpi=300)
plt.close()
################################################################################
### Drift Analysis (Directional)
################################################################################
## Fit Mixed LM Model
drift_model_formula = "drift ~ C(gender) + age + preferred_period + trial_speed + C(musical_experience) + rel_difference"
drift_model = smf.mixedlm(drift_model_formula,
data = nomet_df,
groups = nomet_df["subject"]).fit(reml=True)
drift_summary, drift_wald = summarize_lm_model(drift_model)
print("Drift Effects:"); print_significant(drift_model, drift_wald)
"""
Drift Effects (No Preferred Period Filtering):
- trial_speed chi^2(2, N = 303) = 49.25982, p = 0.00000
Drift Effects (With Uncertainty Filtering):
- trial_speed chi^2(2, N = 267) = 41.40586, p = 0.00000
- age chi^2(1, N = 267) = 7.88963, p = 0.00497
- preferred_period chi^2(1, N = 267) = 7.23700, p = 0.00714
Drift Effects (With 5% Threshold Filtering)
- trial_speed chi^2(2, N = 277) = 39.86926, p = 0.00000
- age chi^2(1, N = 277) = 6.98763, p = 0.00821
- preferred_period chi^2(1, N = 277) = 6.90328, p = 0.00860
Drift Effects (With 10% Threshold Filtering):
- trial_speed chi^2(2, N = 282) = 40.90040, p = 0.00000
- age chi^2(1, N = 282) = 6.43390, p = 0.01120
- preferred_period chi^2(1, N = 282) = 6.78579, p = 0.00919
"""
## T-Tests against no drift
slowed_t = sm.stats.ttest_ind(np.zeros(int(len(nomet_df)/3)),
nomet_df.loc[nomet_df.trial_speed == "SlowedDown"]["drift"].values)
constant_t = sm.stats.ttest_ind(np.zeros(int(len(nomet_df)/3)),
nomet_df.loc[nomet_df.trial_speed == "NoChange"]["drift"].values)
sped_t = sm.stats.ttest_ind(np.zeros(int(len(nomet_df)/3)),
nomet_df.loc[nomet_df.trial_speed == "SpedUp"]["drift"].values)
print_str = """T-Tests
- slowed_t -> {}
- sped_t -> {}
- constant_t -> {}""".format(slowed_t, sped_t, constant_t)
print(print_str)
"""
T-Tests (No Preferred Period Filtering)
- slowed_t -> (1.3275567940880735, 0.1848260950433708, 604.0)
- sped_t -> (-6.277113722042848, 6.58859685068371e-10, 604.0)
- constant_t -> (-2.1877377970784138, 0.029071224825391283, 604.0)
T-Tests (With Uncertainty Filtering)
- slowed_t -> (1.2720342240521, 0.20391669981027244, 532.0)
- sped_t -> (-5.701114563875354, 1.9749739164792474e-08, 532.0)
- constant_t -> (-1.6790128619505613, 0.09373672281069394, 532.0)
T-Tests (With 5% Threshold Filtering)
- slowed_t -> (1.2128057811917325, 0.22572285345057283, 552.0)
- sped_t -> (-5.625754382347419, 2.9390848986489808e-08, 552.0)
- constant_t -> (-1.9857100905171938, 0.04755980608810191, 552.0)
T-Tests (With 10% Threshold Filtering)
- slowed_t -> (1.0597393018947532, 0.28971845251315237, 562.0)
- sped_t -> (-5.848257479367463, 8.433969643966803e-09, 562.0)
- constant_t -> (-2.1305779018290023, 0.0335574728678482, 562.0)
"""
## Standard Bar Plot of Drift vs. Trial Speed
drift_by_trial_speed_avg = nomet_df.groupby(["trial_speed"]).agg({"drift":[np.mean, std_error]})
fig, ax = plt.subplots(1, 1, figsize = standard_fig, sharex = True, sharey = True)
for t, trial_speed in enumerate(["SlowedDown","NoChange","SpedUp"]):
data_to_plot = drift_by_trial_speed_avg.loc[trial_speed]
ax.bar(t,
data_to_plot["drift"]["mean"],
yerr = data_to_plot["drift"]["std_error"],
color = "navy",
alpha = .5,
edgecolor = "navy")
ax.axhline(0,
color = "navy",
linewidth = 1)
ax.set_xticks(np.arange(3))
ax.set_xticklabels(["20% Slower","Preferred","20% Faster"])
ax.set_xlabel("Metronome Condition",
fontsize = 16,
labelpad = 15,
fontweight = "bold")
ax.tick_params(labelsize = 14)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel("Drift (ITI Percent Change)",
fontsize = 16,
fontweight = "bold")
ax.yaxis.set_major_formatter(FuncFormatter(lambda x, pos: "{:.0f}%".format(x)))
ax.set_ylim(-1.2,3.2)
fig.tight_layout()
fig.savefig(stats_plots + "drift_trialspeed_bar" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "drift_trialspeed_bar" + ".png", dpi=300)
plt.close()
## Standard Bar of Drift vs. Trial Speed, broken down by musicianship
drift_by_trial_speed_me_avg = nomet_df.groupby(["trial_speed","musical_experience"]).agg({"drift": [np.mean, std_error]})
bar_width = .95/2
fig, ax = plt.subplots(1, 1, figsize = standard_fig, sharex = True, sharey = True)
for t, trial_speed in enumerate(["SlowedDown","NoChange","SpedUp"]):
for m in [0, 1]:
data_to_plot = drift_by_trial_speed_me_avg.loc[trial_speed, m]
ax.bar(0.025 + t + m*bar_width,
data_to_plot["drift"]["mean"],
yerr = data_to_plot["drift"]["std_error"],
color = "navy",
alpha = .4 if m == 0 else .8,
edgecolor = "navy",
label = {0:"No",1:"Yes"}[m] if t == 0 else "",
width = bar_width,
align = "edge")
ax.axhline(0,
color = "black",
linewidth = 1,
linestyle=":")
ax.set_xticks(np.arange(3)+.5)
ax.set_xticklabels(["20% Slower","Preferred","20% Faster"])
ax.set_xlabel("Metronome Condition",
fontsize = 16,
labelpad = 15,
fontweight = "bold")
ax.tick_params(labelsize = 14)
ax.set_ylabel("Drift (ITI Percent Change)",
fontsize = 16,
fontweight = "bold")
leg = ax.legend(loc = "upper left",
fontsize = 12,
frameon = True,
facecolor = "white",
title = "Musical\nExperience",
borderpad=0.25,
handlelength=2,
edgecolor = "gray")
plt.setp(leg.get_title(),fontsize=12, multialignment="center")
ax.set_ylim(-2.2,3.2)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_major_formatter(FuncFormatter(lambda x, pos: "{:.0f}%".format(x)))
fig.tight_layout()
fig.savefig(stats_plots + "drift_trialspeed_musicalexperience_bar" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "drift_trialspeed_musicalexperience_bar" + ".png", dpi=300)
plt.close()
## Standard Bar of Drift vs. Trial Speed, broken down by gender
drift_by_trial_speed_gender_avg = nomet_df.groupby(["trial_speed","gender"]).agg({"drift": [np.mean, std_error]})
bar_width = .95/2
fig, ax = plt.subplots(1, 1, figsize = standard_fig, sharex = True, sharey = True)
for t, trial_speed in enumerate(["SlowedDown","NoChange","SpedUp"]):
for m in [0, 1]:
data_to_plot = drift_by_trial_speed_gender_avg.loc[trial_speed, m]
ax.bar(0.025 + t + m*bar_width,
data_to_plot["drift"]["mean"],
yerr = data_to_plot["drift"]["std_error"],
color = "teal" if m == 0 else "orangered",
alpha = .5,
edgecolor = "darkslategray" if m == 0 else "crimson",
label = {0:"Male",1:"Female"}[m] if t == 0 else "",
width = bar_width,
align = "edge")
ax.axhline(0,
color = "black",
linewidth = 1,
linestyle=":")
ax.set_xticks(np.arange(3)+.5)
ax.set_xticklabels(["20% Slower","Preferred","20% Faster"])
ax.set_xlabel("Metronome Condition",
fontsize = 16,
labelpad = 15,
fontweight = "bold")
ax.tick_params(labelsize = 14)
ax.set_ylabel("Drift (ITI Percent Change)",
fontsize = 16,
fontweight = "bold")
ax.legend(loc = "upper left",
fontsize = 12,
frameon = True,
facecolor = "white",
borderpad=0.25,
handlelength=2,
edgecolor = "gray")
ax.set_ylim(-1.9,3.7)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_major_formatter(FuncFormatter(lambda x, pos: "{:.0f}%".format(x)))
fig.tight_layout()
fig.savefig(stats_plots + "drift_trialspeed_gender_bar" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "drift_trialspeed_gender_bar" + ".png", dpi=300)
plt.close()
## Combined Standard Drift + Gender + Musical Experience
fig, ax = plt.subplots(1, 3, figsize = standard_fig, sharex = False, sharey = True)
for t, trial_speed in enumerate(["SlowedDown","NoChange","SpedUp"]):
data_to_plot = drift_by_trial_speed_avg.loc[trial_speed]
ax[0].bar(t,
data_to_plot["drift"]["mean"],
yerr = data_to_plot["drift"]["std_error"],
color = "navy",
alpha = .5,
edgecolor = "navy")
ax[0].set_xticks(np.arange(3))
for t, trial_speed in enumerate(["SlowedDown","NoChange","SpedUp"]):
for m in [0, 1]:
data_to_plot = drift_by_trial_speed_gender_avg.loc[trial_speed, m]
ax[1].bar(0.025 + t + m*bar_width,
data_to_plot["drift"]["mean"],
yerr = data_to_plot["drift"]["std_error"],
color = "teal" if m == 0 else "orangered",
alpha = .5,
edgecolor = "darkslategray" if m == 0 else "crimson",
label = {0:"Male",1:"Female"}[m] if t == 0 else "",
width = bar_width,
align = "edge")
ax[1].set_xticks(np.arange(3)+.5)
for t, trial_speed in enumerate(["SlowedDown","NoChange","SpedUp"]):
for m in [0, 1]:
data_to_plot = drift_by_trial_speed_me_avg.loc[trial_speed, m]
ax[2].bar(0.025 + t + m*bar_width,
data_to_plot["drift"]["mean"],
yerr = data_to_plot["drift"]["std_error"],
color = "navy",
alpha = .4 if m == 0 else .8,
edgecolor = "navy",
label = {0:"No",1:"Yes"}[m] if t == 0 else "",
width = bar_width,
align = "edge")
ax[2].set_xticks(np.arange(3)+.5)
ax[1].legend(loc = "upper left",
fontsize = 12,
frameon = True,
facecolor = "white",
borderpad=0.25,
handletextpad=0.25,
handlelength=1)
leg = ax[2].legend(loc = "upper left",
fontsize = 12,
frameon = True,
facecolor = "white",
handletextpad=0.25,
borderpad=0.25,
handlelength=1)
plt.setp(leg.get_title(),fontsize=12,multialignment="center")
for i in range(3):
ax[i].axhline(0,
color = "black",
linewidth = 1,
linestyle = ":")
ax[i].set_xticklabels(["20% Slower","Preferred","20% Faster"], rotation=45, ha="right")
ax[i].tick_params(labelsize = 14)
if i == 1:
ax[i].set_xlabel("Metronome Condition",
fontsize = 16,
labelpad = 15,
fontweight = "bold")
ax[i].spines['right'].set_visible(False)
ax[i].spines['top'].set_visible(False)
ax[i].yaxis.set_major_formatter(FuncFormatter(lambda x, pos: "{:.0f}%".format(x)))
ax[0].set_ylabel("Drift\n(ITI Percent Change)",
fontsize = 16,
fontweight = "bold")
ax[0].set_title("General", fontweight="bold", fontstyle="italic", loc="center", fontsize=14)
ax[1].set_title("Sex", fontweight="bold", fontstyle="italic", loc="center", fontsize=14)
ax[2].set_title("Musical\nExperience", fontweight="bold", fontstyle="italic", loc="center", fontsize=14)
fig.tight_layout()
fig.subplots_adjust(wspace = 0.1)
fig.savefig(stats_plots + "combined_drift_standard_and_gender_and_musicalexperience" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "combined_drift_standard_and_gender_and_musicalexperience" + ".png", dpi=300)
plt.close()
## Combined Standard Drift + Musical Experience
fig, ax = plt.subplots(1, 2, figsize = standard_fig, sharex = False, sharey = True)
for t, trial_speed in enumerate(["SlowedDown","NoChange","SpedUp"]):
data_to_plot = drift_by_trial_speed_avg.loc[trial_speed]
ax[0].bar(t,
data_to_plot["drift"]["mean"],
yerr = data_to_plot["drift"]["std_error"],
color = "navy",
alpha = .5,
edgecolor = "navy")
ax[0].set_xticks(np.arange(3))
for t, trial_speed in enumerate(["SlowedDown","NoChange","SpedUp"]):
for m in [0, 1]:
data_to_plot = drift_by_trial_speed_me_avg.loc[trial_speed, m]
ax[1].bar(0.025 + t + m*bar_width,
data_to_plot["drift"]["mean"],
yerr = data_to_plot["drift"]["std_error"],
color = "navy",
alpha = .4 if m == 0 else .8,
edgecolor = "navy",
label = {0:"No",1:"Yes"}[m] if t == 0 else "",
width = bar_width,
align = "edge")
ax[1].set_xticks(np.arange(3)+.5)
leg = ax[1].legend(loc = "upper left",
fontsize = 12,
frameon = True,
facecolor = "white",
borderpad = 0.25,
handlelength = 2,
edgecolor = "gray")
plt.setp(leg.get_title(), fontsize=12, multialignment="center")
for i in range(2):
ax[i].axhline(0,
color = "black",
linewidth = 1,
linestyle = ":")
ax[i].set_xticklabels(["20%\nSlower","Preferred","20%\nFaster"])
ax[i].tick_params(labelsize = 14)
ax[i].spines['right'].set_visible(False)
ax[i].spines['top'].set_visible(False)
ax[i].yaxis.set_major_formatter(FuncFormatter(lambda x, pos: "{:.0f}%".format(x)))
ax[0].set_ylabel("Drift\n(ITI Percent Change)",
fontsize = 16,
fontweight = "bold")
ax[0].set_title("General", fontweight="bold", fontstyle="italic", loc="center", fontsize=14)
ax[1].set_title("Musical Experience", fontweight="bold", fontstyle="italic", loc="center", fontsize=14)
fig.text(0.55, 0.03, "Metronome Condition", fontweight="bold", fontsize=16, ha="center", va="center")
fig.tight_layout()
fig.subplots_adjust(wspace = 0.1, bottom=.2)
fig.savefig(stats_plots + "combined_drift_standard_and_musicalexperience" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "combined_drift_standard_and_musicalexperience" + ".png", dpi=300)
plt.close()
## Within-Subject
drift_pivot = pd.merge(pd.pivot_table(nomet_df,
index = ["subject"],
columns=["trial_speed"],
values = ["drift"]),
nomet_df.drop_duplicates("subject")[merge_vars],
left_index=True,
right_on="subject")
drift_pivot["slowed_change"] = drift_pivot[("drift","SlowedDown")]-drift_pivot[("drift","NoChange")] # Want this negative
drift_pivot["sped_change"] = drift_pivot[("drift","SpedUp")]-drift_pivot[("drift","NoChange")] # Want this positive
drift_pivot_melted = pd.melt(drift_pivot,
id_vars = merge_vars,
value_vars = ["slowed_change","sped_change"]).sort_values(["subject","variable"])
## T-Tests
slowed_t = sm.stats.ttest_ind(np.zeros(int(len(drift_pivot_melted)/2)),
drift_pivot_melted.loc[drift_pivot_melted.variable == "slowed_change"]["value"].values)
sped_t = sm.stats.ttest_ind(np.zeros(int(len(drift_pivot_melted)/2)),
drift_pivot_melted.loc[drift_pivot_melted.variable == "sped_change"]["value"].values)
print_str = """T-Tests
Slowed condition -> {}
Sped condition -> {}""".format(slowed_t, sped_t)
print(print_str)
"""
T-Tests (Without Preferred Period Filtering)
Slowed condition -> (3.765554497783035, 0.00018242710331818079, 604.0)
Sped condition -> (-3.7575886298128562, 0.0001881935490058379, 604.0)
T-Tests (With Uncertainty Filtering)
Slowed condition -> (3.284008542404658, 0.0010907327906088734, 532.0)
Sped condition -> (-3.6105751669668, 0.00033442385427766996, 532.0)
T-Tests (With 5% Threshold Filtering)
Slowed condition -> (3.4548741526679922, 0.0005928601070529876, 552.0)
Sped condition -> (-3.2970312997267928, 0.0010399597933973249, 552.0)
T-Tests (With 10% Threshold Filtering)
Slowed condition -> (3.419234468420245, 0.0006735150021045216, 562.0)
Sped condition -> (-3.4026003343820297, 0.000714988922207088, 562.0)
"""
## Mixed LM Model
rel_model_test_form = "value ~ C(gender) + age + preferred_period + variable + C(musical_experience) + rel_difference"
rel_model = smf.mixedlm(rel_model_test_form, data = drift_pivot_melted, groups = drift_pivot_melted["subject"]).fit()
rel_summary, rel_wald = summarize_lm_model(rel_model)
print("Within-Subject Drift Effects:"); print_significant(rel_model, rel_wald)
"""
Within-Subject Drift Effects (Without Preferred Period Filtering):
- variable chi^2(1, N = 303) = 39.12849, p = 0.00000
Within-Subject Drift Effects (With Uncertainty Filtering):
- variable chi^2(1, N = 267) = 32.58766, p = 0.00000
Within-Subject Drift Effects (With 5% Threshold Filtering):
- variable chi^2(1, N = 277) = 31.96015, p = 0.00000
Within-Subject Drift Effects (With 10% Threshold Filtering):
- variable chi^2(1, N = 282) = 32.94272, p = 0.00000
"""
## Plot Within-Subject Drift
rel_drift_by_cond = drift_pivot_melted.groupby(["variable"]).agg({"value":[np.mean, std_error]}).reset_index()
fig, ax = plt.subplots(figsize = standard_fig)
for j, var in enumerate(["slowed_change","sped_change"]):
data_to_plot = rel_drift_by_cond.loc[rel_drift_by_cond.variable == var]
ax.bar(j,
data_to_plot["value"]["mean"],
yerr = data_to_plot["value"]["std_error"],
color = "navy",
edgecolor = "navy",
alpha = .5,
width = .8)
ax.axhline(0,
color = "black",
linestyle = ":")
ax.set_xticks([0,1])
ax.set_xticklabels(["20% Slower","20% Faster"])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlabel("Metronome Condition",
fontsize = 14,
labelpad = 20,
fontweight = "bold")
ax.set_ylabel("Drift Difference from\nPreferred Period Trials",
ha = "center",
va = "center",
multialignment="center",
labelpad = 20,
fontsize = 14,
fontweight = "bold")
ax.tick_params(labelsize = 14)
fig.tight_layout()
fig.savefig(stats_plots + "within_subject_drift" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "within_subject_drift" + ".png", dpi=300)
plt.close()
################################################################################
### Variability Analysis
################################################################################
## Fit Mixed LM Model
qvc_model_form = "qvc ~ C(gender) + age + preferred_period + trial_speed + C(musical_experience) + C(condition) + rel_difference"
qvc_model = smf.mixedlm(qvc_model_form, data = merged_results_df, groups = merged_results_df["subject"]).fit(reml=True)
qvc_model_summary, qvc_wald = summarize_lm_model(qvc_model)
print("Variability Effects:"); print_significant(qvc_model, qvc_wald)
"""
Variability Effects (Without Preferred Period Filtering):
- trial_speed chi^2(2, N = 303) = 9.79005, p = 0.00748
- C(musical_experience) chi^2(1, N = 303) = 15.79788, p = 0.00007
- C(condition) chi^2(1, N = 303) = 31.87567, p = 0.00000
- age chi^2(1, N = 303) = 69.37659, p = 0.00000
- preferred_period chi^2(1, N = 303) = 10.92312, p = 0.00095
Variability Effects (With Uncertainty Filtering):
- C(musical_experience) chi^2(1, N = 267) = 13.94576, p = 0.00019
- C(condition) chi^2(1, N = 267) = 30.36104, p = 0.00000
- age chi^2(1, N = 267) = 60.06554, p = 0.00000
- preferred_period chi^2(1, N = 267) = 11.96657, p = 0.00054
Variability Effects (With 5% Threshold Filtering):
- C(musical_experience) chi^2(1, N = 277) = 14.49736, p = 0.00014
- C(condition) chi^2(1, N = 277) = 29.75238, p = 0.00000
- age chi^2(1, N = 277) = 61.33191, p = 0.00000
- preferred_period chi^2(1, N = 277) = 10.96464, p = 0.00093
Variability Effects (With 10% Threshold Filtering):
- C(musical_experience) chi^2(1, N = 282) = 14.66073, p = 0.00013
- C(condition) chi^2(1, N = 282) = 30.89055, p = 0.00000
- age chi^2(1, N = 282) = 63.64658, p = 0.00000
- preferred_period chi^2(1, N = 282) = 11.42360, p = 0.00073
"""
## Age Effects
qvc_aggs = {"qvc": lambda values: tuple(bootstrap_ci(values, sample_percent = 30))}
age_bin_var_sem = merged_results_df.groupby(["condition","age_bin"]).agg({"qvc":std_error}).reset_index()
age_bin_var_ci = merged_results_df.groupby(["condition","age_bin"]).agg(qvc_aggs).reset_index()
for i in range(3): age_bin_var_ci[i] = age_bin_var_ci["qvc"].map(lambda j: j[i])
fig, ax = plt.subplots(1, 1, figsize = standard_fig)
for c, cond in enumerate(["paced","unpaced"]):
avg_data_to_plot = age_bin_var_sem.loc[age_bin_var_sem.condition == cond]
ci_data_to_plot = age_bin_var_ci.loc[age_bin_var_ci.condition == cond]
ax.errorbar(age_bin_points,
ci_data_to_plot[1].values,
yerr = avg_data_to_plot["qvc"].values,
color = "blue" if c == 0 else "green",
linewidth = 2,
alpha = .5)
ax.fill_between([age_bin_points[0]-1] + age_bin_points[1:-1] + [age_bin_points[-1]+1],
ci_data_to_plot[0].values,
ci_data_to_plot[2].values,
color = "blue" if c == 0 else "green",
alpha = .2,
label = "Synchronization" if c == 0 else "Continuation")
ax.set_xlabel("Age (yrs.)",
fontsize = 16,
labelpad = 10,
fontweight = "bold")
ax.set_xticks(age_bin_points)
ax.set_xticklabels(age_bin_strings, rotation=45, ha="right")
ax.tick_params(labelsize = 14)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel("Quartile Variation\nCoefficient",
fontsize = 16,
labelpad = 10,
fontweight = "bold")
ax.legend(loc = "upper right",
frameon = True,
facecolor = "white",
fontsize = 14,
handlelength = 2,
borderpad = 0.25,
edgecolor = "gray")
fig.tight_layout()
fig.savefig(stats_plots + "variability_age" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "variability_age" + ".png", dpi=300)
plt.close()
## Age + Gender Effects
qvc_aggs = {"qvc": lambda values: tuple(bootstrap_ci(values, sample_percent = 30))}
age_bin_var_sem = merged_results_df.groupby(["gender","age_bin"]).agg({"qvc":std_error}).reset_index()
age_bin_var_ci = merged_results_df.groupby(["gender","age_bin"]).agg(qvc_aggs).reset_index()
for i in range(3): age_bin_var_ci[i] = age_bin_var_ci["qvc"].map(lambda j: j[i])
fig, ax = plt.subplots(1, 1, figsize = standard_fig)
for c, gend in enumerate([0,1]):
avg_data_to_plot = age_bin_var_sem.loc[age_bin_var_sem.gender == gend]
ci_data_to_plot = age_bin_var_ci.loc[age_bin_var_ci.gender == gend]
ax.errorbar(age_bin_points,
ci_data_to_plot[1].values,
yerr = avg_data_to_plot["qvc"].values,
color = "teal" if c == 0 else "orangered",
linewidth = 2,
alpha = .5)
ax.fill_between([age_bin_points[0]-1] + age_bin_points[1:-1] + [age_bin_points[-1]+1],
ci_data_to_plot[0].values,
ci_data_to_plot[2].values,
color = "teal" if c == 0 else "orangered",
alpha = .2,
label = "Male" if c == 0 else "Female")
ax.set_xlabel("Age (yrs.)",
fontsize = 16,
labelpad = 10,
fontweight = "bold")
ax.set_xticks(age_bin_points)
ax.set_xticklabels(age_bin_strings, rotation=45, ha="right")
ax.tick_params(labelsize = 14)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel("Quartile Variation\nCoefficient",
fontsize = 16,
labelpad = 10,
fontweight = "bold")
ax.legend(loc = "upper right",
frameon = True,
facecolor = "white",
fontsize = 14,
borderpad = 0.25,
edgecolor = "gray",
handlelength = 2)
fig.tight_layout()
fig.savefig(stats_plots + "variability_age_gender" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "variability_age_gender" + ".png", dpi=300)
plt.close()
## Trial Speed
speed_var_avg = merged_results_df.groupby(["condition","trial_speed"]).agg({"qvc":[np.mean, std_error]}).reset_index()
bar_width = .95/2
fig, ax = plt.subplots(figsize = standard_fig)
for c, cond in enumerate(["paced","unpaced"]):
for t, trial_speed in enumerate(["SlowedDown","NoChange","SpedUp"]):
data_to_plot = speed_var_avg.loc[(speed_var_avg.condition==cond)&
(speed_var_avg.trial_speed==trial_speed)]
ax.bar(0.025 + t + bar_width*c,
data_to_plot["qvc"]["mean"],
yerr = data_to_plot["qvc"]["std_error"],
align = "edge",
width = bar_width,
color = "blue" if c == 0 else "green",
edgecolor = "navy" if c == 0 else "darkgreen",
label = {0:"Synchronization",1:"Continuation"}[c] if t == 0 else "",
alpha = .2)
ax.set_xticks(np.arange(3)+.5)
ax.set_xticklabels(["20% Slower","Preferred","20% Faster"])
ax.set_xlabel("Metronome Condition",
fontsize = 16,
labelpad = 15,
fontweight = "bold")
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelsize = 14)
ax.set_ylabel("Quartile Variation\nCoefficient",
fontsize = 16,
labelpad = 15,
fontweight = "bold")
ax.legend(loc = "lower right",
frameon = True,
facecolor = "white",
fontsize = 16,
borderpad = 0.25,
handlelength = 2,
edgecolor = "gray")
fig.tight_layout()
fig.savefig(stats_plots + "variability_trialspeed" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "variability_trialspeed" + ".png", dpi=300)
plt.close()
## Musical Experience + Variability
qvc_music_avg = merged_results_df.groupby(["condition","trial_speed","musical_experience"]).agg({"qvc":[np.mean, std_error]}).reset_index()
fig, ax = plt.subplots(1, 2, figsize = standard_fig, sharey = True, sharex = True)
for c, cond in enumerate(["paced","unpaced"]):
for me in range(2):
data_to_plot = qvc_music_avg.set_index(["condition","musical_experience"]).loc[cond, me]
data_to_plot = data_to_plot.set_index("trial_speed").loc[["SlowedDown","NoChange","SpedUp"]]
ax[c].bar(0.025 + np.arange(3) + .95/2*me,
data_to_plot["qvc"]["mean"].values,
yerr = data_to_plot["qvc"]["std_error"].values,
color = "blue" if c == 0 else "green",
edgecolor = "blue" if c == 0 else "green",
width = .95/2,
alpha = .4 if me == 0 else .8,
align = "edge",
label = "Yes" if me == 1 else "No")
ax[c].spines['right'].set_visible(False)
ax[c].spines['top'].set_visible(False)
ax[c].set_xticks(np.arange(3) + .5)
ax[c].tick_params(labelsize = 14)
ax[c].set_xticklabels(["20% Slower","Preferred","20% Faster"], rotation=45, ha="right")
ax[0].set_title("Synchronization",
fontsize = 16,
fontweight = "bold",
fontstyle = "italic")
ax[1].set_title("Continuation",
fontsize = 16,
fontweight = "bold",
fontstyle = "italic")
ax[0].set_ylabel("Quartile Variation\nCoefficient",
fontsize = 16,
labelpad = 15,
fontweight = "bold")
leg = ax[1].legend(loc = "lower right",
fontsize = 12,
frameon = True,
facecolor = "white",
framealpha = 1,
title = "Musical\nExperience",
borderpad = 0.25,
edgecolor = "gray")
plt.setp(leg.get_title(),fontsize=12, multialignment="center")
fig.text(0.55,
0.02,
'Metronome Condition',
ha='center',
fontsize = 14,
fontweight = "bold")
fig.tight_layout()
fig.subplots_adjust(top = .9,
bottom = .35,
wspace = .12)
fig.savefig(stats_plots + "variability_musicalexperience_trialspeed" + FIGURE_FMT, dpi=300)
fig.savefig(stats_plots + "variability_musicalexperience_trialspeed" + ".png", dpi=300)
plt.close()
################################################################################
### Subject Filtering
################################################################################
## Want to understand which subjects were thrown out (and why)
## Read in Survey Data
survey_data_full = pd.read_csv("./data/survey.csv")
survey_data_full["Subject"] = survey_data_full.Subject.map(int)
################
### Stage 1
################
## Load Results
stage_1_results = load_pickle("./data/manual_inspection.pickle")
## Format Results
stage_1_results = pd.Series(stage_1_results).reset_index().rename(columns = {"index":"file",0:"pass"})
stage_1_results["subject"] = stage_1_results["file"].map(lambda i: i.split("/")[-1].replace(".mat","")).map(int)
## Extract Non-Passing + Merge Demos
stage_1_nonpassing = stage_1_results.loc[stage_1_results["pass"] != "pass"].copy()
stage_1_nonpassing = pd.merge(stage_1_nonpassing,
survey_data_full[["Subject","Age","Gender"]],
left_on = "subject",
right_on = "Subject",
how = "left")
"""
12 subjects removed due to sensor issue
3 subjects removed due to forgetting to tap (2x10yrs, 1x5yrs)
2 subject removed due to tapping style (1x6yrs, 1x14yrs)
1 subject removed due to missing survey data
"""
s1_filtered_subjects = stage_1_nonpassing.subject.unique()
################
### Stage 2
################
## Load Results
stage_2_results = load_pickle("./data/stage_2_processed.pickle")
## Identify Failures
stage_2_failures = | pd.DataFrame(stage_2_results) | pandas.DataFrame |
from data_get import *
from baseline_functions import *
from calendar_date import *
import global_vars
global_vars.init()
if global_vars.GRAPHFLAG > 0:
from graph_functions import *
from error_graphs import *
import mysql.connector
import pandas as pd
import datetime
import time
# main()
# This function goes through each SAID in the SAID_TABLE sql table, retrieves its data and runs baselinining methods on them
def main():
# Connect to table that connects SAIDs
cnx = mysql.connector.connect(user=global_vars.DATABASE_USERNAME, password=global_vars.DATABASE_PASSWORD,
host=global_vars.DATABASE_IP_RO,
database=global_vars.DATABASE_NAME)
cursor = cnx.cursor()
# return
if global_vars.INTERVALFLAG == 0:
query = "SELECT * FROM SAID_TABLE_DR_15 LIMIT %i OFFSET %i" %(global_vars.SAID_LIMIT, global_vars.SAID_OFFSET)
interval = 15
dbnm = 'MIN15'
elif global_vars.INTERVALFLAG == 1:
query = "SELECT * FROM SAID_TABLE_DR_60 LIMIT %i OFFSET %i" %(global_vars.SAID_LIMIT, global_vars.SAID_OFFSET)
interval = 60
dbnm = 'MIN60'
elif global_vars.INTERVALFLAG == 2:
query = "SELECT * FROM SAID_TABLE_NONDR_15 LIMIT %i OFFSET %i" %(global_vars.SAID_LIMIT, global_vars.SAID_OFFSET)
elif global_vars.INTERVALFLAG == 3:
query = "SELECT * FROM SAID_TABLE_NONDR_60 LIMIT %i OFFSET %i" %(global_vars.SAID_LIMIT, global_vars.SAID_OFFSET)
cursor.execute(query)
said_counter = 0
tic = time.time()
said_packet = []
said_pack_count = 0
PACK_SIZE = 25
storage_df_columns = global_vars.storage_df_columns
pack_bank = []
# Go through each SAID
for row in cursor:
SAID = str(row[0]).zfill(10)
said_packet.append(SAID)
said_pack_count += 1
if said_pack_count%PACK_SIZE != 0:
continue
pack_bank.append(said_packet)
said_packet = []
if len(pack_bank)*PACK_SIZE > 1000:
# print(pack_bank)
break
else:
continue
said_pack_count = 0
for said_packet in pack_bank:
said_pack_count += PACK_SIZE
tic_packet = time.time()
print("packet",said_packet)
packet_string = '('+str(said_packet)[1:-1]+')'
try:
cnx = mysql.connector.connect(user=global_vars.DATABASE_USERNAME, password=global_vars.DATABASE_PASSWORD,
host=global_vars.DATABASE_IP_RO,
database=global_vars.DATABASE_NAME)
tic_query = time.time()
query = "SELECT * FROM %s WHERE SA in %s" %(dbnm, packet_string)
all_interval_df = pd.read_sql_query(query,cnx)
all_interval_df['DATE'] = pd.to_datetime(all_interval_df['DATE'])
toc_query = time.time()
print("All interval df shape:", all_interval_df.shape, "- time:", toc_query-tic_query)
except:
print("Interval_df error")
said_packet = []
continue
# storage_df = pd.DataFrame(columns=storage_df_columns)
nonres_storage_df = pd.DataFrame(columns=storage_df_columns)
pdp_storage_df = pd.DataFrame(columns=storage_df_columns)
cbp_storage_df = pd.DataFrame(columns=storage_df_columns)
bip_storage_df = pd.DataFrame(columns=storage_df_columns)
amp_storage_df = pd.DataFrame(columns=storage_df_columns)
res_storage_df = pd.DataFrame(columns=storage_df_columns)
smartrate_storage_df = pd.DataFrame(columns=storage_df_columns)
smartac_storage_df = | pd.DataFrame(columns=storage_df_columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 1 15:28:19 2021
@author: ashum
"""
from skimage import io
import pandas as pd
import os
#get a list of files from file path
pathforall= r'C:\Users\ashum\OneDrive\Desktop\Leukemia\archive\C-NMC_Leukemia\training_data\fold_0\all'
dir_list=os.listdir(pathforall)
pathforhem= r'C:\Users\ashum\OneDrive\Desktop\Leukemia\archive\C-NMC_Leukemia\training_data\fold_0\hem'
dir_list2=os.listdir(pathforhem)
#initiate empty data frame
df = pd.DataFrame()
#loop images into the dataframe and label them with the class ALL (leukemia) (stopped at 300 to reduce processing time for test runs)
for i in dir_list[:300]:
img=io.imread(pathforall + "\\" + i)
my_df = pd.DataFrame(img.flatten()).transpose()
my_df['Class'] = 'ALL'
#my_df=my_df[["mean","std"]]
my_df=my_df.head(n=1)
df = df.append(my_df, ignore_index=True)
#df.hist(bins=10)
#plt.show()
#loop images into the dataframe and label them with the class HEM (non-leukemia) (stopped at 300 to reduce processing time for test runs)
df2 = pd.DataFrame()
for i in dir_list2[:300]:
img2=io.imread(pathforhem + "\\" + i)
my_df2 = pd.DataFrame(img2.flatten()).transpose()
#my_df2["mean"]=my_df2.loc[:,0].mean()
#my_df2["std"]=my_df2.iloc[:,-1].std()
my_df2['Class'] = 'Not ALL'
#my_df2=my_df2[["mean","std",]]
my_df2=my_df2.head(n=1)
df2 = df2.append(my_df2, ignore_index=True)
#df2.hist(bins=10)
#plt.show()
#union the two dataframes created
df_stack = | pd.concat([df, df2]) | pandas.concat |
from datetime import datetime, timedelta
import logging
import re
import pandas as pd
from scheduler.impala_api_client import ImpalaApiResource
from scheduler.constants import NativeQueryInfoColumn, FormativeQueryInfoColumn
from scheduler.global_utils import convert_mem_unit, spend_time
MEM_LIMIT_REGEX = re.compile(r"MEM_LIMIT=(\d+)")
HOSTS_REGEX = re.compile(r"hosts=(\d+)")
LOGGER = logging.getLogger(__name__)
class ClouderaManager(object):
"""
The ClouderaManager class that provides methods for get the query information, the configuration of
impala cluster and update the configuration of impala cluster.
"""
def __init__(self, server_url, api_version, cluster_name, username, password):
"""
Creates a ClouderaManager object that provides methods to get and update the query information and
the configuration of impala cluster.
:param server_url: (str) The Server url.
:param api_version: (str) The api version.
:param cluster_name: (str) The cluster name.
:param username: (str) The username for login cloudera manager.
:param password: (str) The password for login clo<PASSWORD>.
"""
self.__api = ImpalaApiResource(server_url, api_version, cluster_name, username, password)
@classmethod
def __add_timedelta(cls, gmt):
"""
Add timedelta on the Greenwich mean time.
:param gmt: (str) Greenwich mean time.
:return: (datetime) A datetime object that add 8 hours at greenwich mean time.
"""
return datetime.strptime(gmt, "%Y-%m-%dT%H:%M:%S.%fZ") + timedelta(hours=8)
@classmethod
def __find_mem_limit(cls, content):
"""
Search MEM_LIMIT in the given content by regex.
:param content: (str) The content to be searched.
:return: (int) A integer value of MEM_LIMIT.
"""
matcher = MEM_LIMIT_REGEX.search(content)
return int(matcher.group(1)) if matcher else 0
@classmethod
def __find_max_hosts(cls, content):
"""
Search max hosts in the given content by regex.
:param content: (str) The content to be searched.
:return: (int) A integer value of max hosts.
"""
str_hosts = HOSTS_REGEX.findall(content)
return max(int(str_host) for str_host in str_hosts) if str_hosts else 0
def __parse_requires_from_details(self, query_id):
"""
Parse mem limit and max hosts from query details by query_id.
:param query_id: (str) The query id.
:return: (tuple) A tuple object that contains mem limit and max hosts.
"""
mem_limit, max_hosts = 0, 0
try:
query_details_response = self.get_query_details(query_id)
query_details = query_details_response[NativeQueryInfoColumn.DETAILS]
mem_limit = ClouderaManager.__find_mem_limit(query_details)
max_hosts = ClouderaManager.__find_max_hosts(query_details)
except Exception as e:
LOGGER.warning(e, "query_id", query_id)
return convert_mem_unit(mem_limit, "B", "MB"), max_hosts
def fetch_page_impala_query_info(self, start_time, end_time, filter_str=""):
"""
Get filtered impala query information by page from the end_time to the start_time.
:param start_time: (datetime) The start time to fetching query information.
:param end_time: (datetime) The end time to fetching query information.
:param filter_str: (str) The filter string to fetch query information.
:return: (DataFrame) A DataFrame object of fetched query information.
"""
LOGGER.info("fetching impala query info page data, start_time: %s, end_time: %s" % (start_time, end_time))
impala_query_response = self.get_impala_queries(start_time, end_time, filter_str)
queries = impala_query_response[NativeQueryInfoColumn.QUERIES]
LOGGER.info("impala query info page data size: %d" % len(queries))
if not queries:
return None
df_queries = pd.DataFrame(impala_query_response[NativeQueryInfoColumn.QUERIES])
sr_query_ids = df_queries[NativeQueryInfoColumn.QUERY_ID]
sr_start_times = df_queries[NativeQueryInfoColumn.START_TIME].apply(ClouderaManager.__add_timedelta)
sr_duration_mills = df_queries[NativeQueryInfoColumn.DURATION_MILLIS]
sr_pools = [x[NativeQueryInfoColumn.POOL] for x in df_queries[NativeQueryInfoColumn.ATTRIBUTES]]
sr_admission_waits = [x[NativeQueryInfoColumn.ADMISSION_WAIT] for x in df_queries[NativeQueryInfoColumn.ATTRIBUTES]]
df_base = pd.DataFrame(data={FormativeQueryInfoColumn.QUERY_ID: sr_query_ids,
FormativeQueryInfoColumn.START_TIME: sr_start_times,
FormativeQueryInfoColumn.DURATION_MILLIS: sr_duration_mills,
FormativeQueryInfoColumn.POOL: sr_pools,
FormativeQueryInfoColumn.ADMISSION_WAIT: sr_admission_waits})
sr_details = sr_query_ids.apply(self.__parse_requires_from_details)
df_details = pd.DataFrame(data=(x for x in sr_details),
columns=[FormativeQueryInfoColumn.MEM_LIMIT, FormativeQueryInfoColumn.MAX_HOST])
LOGGER.info("finish fetch impala query info page data, start_time: %s, end_time: %s" % (start_time, end_time))
return df_base.join(df_details)
def fetch_impala_query_info(self, start_time, end_time, filter_str):
"""
Get total filtered impala query information between end_time and start_time.
:param start_time: (datetime) The start time to fetching query information.
:param end_time: (datetime) The end time to fetching query information.
:param filter_str: (str) The filter string to fetch query information.
:return: (DataFrame) A DataFrame object of total fetched query information.
"""
LOGGER.info("start fetch impala query info data, start_time: %s, end_time: %s" % (start_time, end_time))
data = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 10 10:56:14 2019
@author: Wignand
"""
from scipy import stats
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pylab as plt
import matplotlib as mpl
import matplotlib.cm as cm
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.image as mplimg
import matplotlib.ticker as ticker
import pylab as pl
import importlib
from autoprot import venn
import logomaker
import colorsys
import matplotlib.patches as patches
import colorsys
from itertools import chain
from datetime import date
from wordcloud import WordCloud
from wordcloud import STOPWORDS
from pdfminer3.layout import LAParams, LTTextBox
from pdfminer3.pdfpage import PDFPage
from pdfminer3.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer3.converter import PDFPageAggregator, TextConverter
import io
from PIL import Image
import plotly
import plotly.express as px
import plotly.graph_objects as go
plt.rcParams['pdf.fonttype'] = 42
from io import BytesIO
from scipy.stats import ttest_1samp
def correlogram(df, columns=None, file="proteinGroups",log=True,saveDir = None, saveType="pdf", saveName="pairPlot", lowerTriang="scatter",
sampleFrac=None, bins=100):
"""
function plots a pair plot of the dataframe
intensity columns in order to assess the reproducibility
:params df: dataframe from MaxQuant file
:params columns: the columns to be visualized
if None all intensity columns will be analyzed
:params pattern: the regex pattern that will be applied
to filter the intensity columns
:sampleFrac: float; fraction between 0 and 1 to indicate fraction of entries to be shown in scatter
might be useful for large correlograms in order to make it possible to work with those in illustrator
"""
def getColor(r):
colors = {
0.8: "#d67677",
0.81: "#d7767c",
0.82: "#d87681",
0.83: "#da778c",
0.84: "#dd7796",
0.85: "#df78a1",
0.86: "#e179ad",
0.87: "#e379b8",
0.88: "#e57ac4",
0.89: "#e77ad0",
0.90: "#ea7bdd",
0.91 : "#ec7bea",
0.92 : "#e57cee",
0.93 : "#dc7cf0",
0.94 : "#d27df2",
0.95 : "#c87df4",
0.96 : "#be7df6",
0.97 : "#b47ef9",
0.98 : "#a97efb",
0.99 : "#9e7ffd",
1 : "#927fff"
}
if r <= 0.8:
return "#D63D40"
else:
return colors[np.round(r,2)]
def corrfunc(x, y, **kws):
df = pd.DataFrame({"x":x, "y":y})
df = df.dropna()
x = df["x"].values
y = df["y"].values
r, _ = stats.pearsonr(x, y)
ax = plt.gca()
ax.annotate("r = {:.2f}".format(r),
xy=(.1, .9), xycoords=ax.transAxes)
def heatmap(x,y,**kws):
df = pd.DataFrame({"x":x, "y":y})
df = df.replace(-np.inf, np.nan).dropna()
x = df["x"].values
y = df["y"].values
r, _ = stats.pearsonr(x,y)
ax = plt.gca()
ax.add_patch(mpl.patches.Rectangle((0,0),5,5, color=getColor(r), transform=ax.transAxes))
ax.tick_params(axis = "both", which = "both", length=0)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
def lowerScatter(x,y,**kws):
data = pd.DataFrame({"x":x, "y":y})
if sampleFrac is not None:
data = data.sample(int(data.shape[0]*sampleFrac))
ax = plt.gca()
ax.scatter(data['x'],data['y'], linewidth=0)
def lowerHexBin(x,y,**kws):
plt.hexbin(x,y, cmap="Blues", bins=bins,
gridsize=50)
def lowerhist2D(x,y,**kws):
df = | pd.DataFrame({"x":x, "y":y}) | pandas.DataFrame |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import statistics
from os import mkdir
from os.path import exists, isdir
from os.path import join as pjoin
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import nltk
import numpy as np
import pandas as pd
import plotly
import plotly.express as px
import plotly.figure_factory as ff
import plotly.graph_objects as go
import pyarrow.feather as feather
import seaborn as sns
import torch
from datasets import load_from_disk
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from .dataset_utils import (CNT, DEDUP_TOT, EMBEDDING_FIELD, LENGTH_FIELD,
OUR_LABEL_FIELD, OUR_TEXT_FIELD, PROP,
TEXT_NAN_CNT, TOKENIZED_FIELD, TOT_OPEN_WORDS,
TOT_WORDS, TXT_LEN, VOCAB, WORD, extract_field,
load_truncated_dataset)
from .embeddings import Embeddings
from .npmi import nPMI
from .zipf import Zipf
pd.options.display.float_format = "{:,.3f}".format
logs = logging.getLogger(__name__)
logs.setLevel(logging.WARNING)
logs.propagate = False
if not logs.handlers:
# Logging info to log file
file = logging.FileHandler("./log_files/dataset_statistics.log")
fileformat = logging.Formatter("%(asctime)s:%(message)s")
file.setLevel(logging.INFO)
file.setFormatter(fileformat)
# Logging debug messages to stream
stream = logging.StreamHandler()
streamformat = logging.Formatter("[data_measurements_tool] %(message)s")
stream.setLevel(logging.WARNING)
stream.setFormatter(streamformat)
logs.addHandler(file)
logs.addHandler(stream)
# TODO: Read this in depending on chosen language / expand beyond english
nltk.download("stopwords")
_CLOSED_CLASS = (
stopwords.words("english")
+ [
"t",
"n",
"ll",
"d",
"wasn",
"weren",
"won",
"aren",
"wouldn",
"shouldn",
"didn",
"don",
"hasn",
"ain",
"couldn",
"doesn",
"hadn",
"haven",
"isn",
"mightn",
"mustn",
"needn",
"shan",
"would",
"could",
"dont",
"u",
]
+ [str(i) for i in range(0, 21)]
)
_IDENTITY_TERMS = [
"man",
"woman",
"non-binary",
"gay",
"lesbian",
"queer",
"trans",
"straight",
"cis",
"she",
"her",
"hers",
"he",
"him",
"his",
"they",
"them",
"their",
"theirs",
"himself",
"herself",
]
# treating inf values as NaN as well
pd.set_option("use_inf_as_na", True)
_MIN_VOCAB_COUNT = 10
_TREE_DEPTH = 12
_TREE_MIN_NODES = 250
# as long as we're using sklearn - already pushing the resources
_MAX_CLUSTER_EXAMPLES = 5000
_NUM_VOCAB_BATCHES = 2000
_TOP_N = 100
_CVEC = CountVectorizer(token_pattern="(?u)\\b\\w+\\b", lowercase=True)
class DatasetStatisticsCacheClass:
def __init__(
self,
cache_dir,
dset_name,
dset_config,
split_name,
text_field,
label_field,
label_names,
calculation=None,
use_cache=False,
):
# This is only used for standalone runs for each kind of measurement.
self.calculation = calculation
self.our_text_field = OUR_TEXT_FIELD
self.our_length_field = LENGTH_FIELD
self.our_label_field = OUR_LABEL_FIELD
self.our_tokenized_field = TOKENIZED_FIELD
self.our_embedding_field = EMBEDDING_FIELD
self.cache_dir = cache_dir
# Use stored data if there; otherwise calculate afresh
self.use_cache = use_cache
### What are we analyzing?
# name of the Hugging Face dataset
self.dset_name = dset_name
# name of the dataset config
self.dset_config = dset_config
# name of the split to analyze
self.split_name = split_name
# TODO: Chould this be "feature" ?
# which text fields are we analysing?
self.text_field = text_field
# which label fields are we analysing?
self.label_field = label_field
# what are the names of the classes?
self.label_names = label_names
## Hugging Face dataset objects
self.dset = None # original dataset
# HF dataset with all of the self.text_field instances in self.dset
self.text_dset = None
self.dset_peek = None
# HF dataset with text embeddings in the same order as self.text_dset
self.embeddings_dset = None
# HF dataset with all of the self.label_field instances in self.dset
self.label_dset = None
## Data frames
# Tokenized text
self.tokenized_df = None
# save sentence length histogram in the class so it doesn't ge re-computed
self.length_df = None
self.fig_tok_length = None
# Data Frame version of self.label_dset
self.label_df = None
# save label pie chart in the class so it doesn't ge re-computed
self.fig_labels = None
# Vocabulary with word counts in the dataset
self.vocab_counts_df = None
# Vocabulary filtered to remove stopwords
self.vocab_counts_filtered_df = None
self.sorted_top_vocab_df = None
## General statistics and duplicates
self.total_words = 0
self.total_open_words = 0
# Number of NaN values (NOT empty strings)
self.text_nan_count = 0
# Number of text items that appear more than once in the dataset
self.dedup_total = 0
# Duplicated text items along with their number of occurences ("count")
self.dup_counts_df = None
self.avg_length = None
self.std_length = None
self.general_stats_dict = None
self.num_uniq_lengths = 0
# clustering text by embeddings
# the hierarchical clustering tree is represented as a list of nodes,
# the first is the root
self.node_list = []
# save tree figure in the class so it doesn't ge re-computed
self.fig_tree = None
# keep Embeddings object around to explore clusters
self.embeddings = None
# nPMI
# Holds a nPMIStatisticsCacheClass object
self.npmi_stats = None
# TODO: Have lowercase be an option for a user to set.
self.to_lowercase = True
# The minimum amount of times a word should occur to be included in
# word-count-based calculations (currently just relevant to nPMI)
self.min_vocab_count = _MIN_VOCAB_COUNT
# zipf
self.z = None
self.zipf_fig = None
self.cvec = _CVEC
# File definitions
# path to the directory used for caching
if not isinstance(text_field, str):
text_field = "-".join(text_field)
# if isinstance(label_field, str):
# label_field = label_field
# else:
# label_field = "-".join(label_field)
self.cache_path = pjoin(
self.cache_dir,
f"{dset_name}_{dset_config}_{split_name}_{text_field}", # {label_field},
)
# Cache files not needed for UI
self.dset_fid = pjoin(self.cache_path, "base_dset")
self.tokenized_df_fid = pjoin(self.cache_path, "tokenized_df.feather")
self.label_dset_fid = pjoin(self.cache_path, "label_dset")
# Needed for UI -- embeddings
self.text_dset_fid = pjoin(self.cache_path, "text_dset")
# Needed for UI
self.dset_peek_json_fid = pjoin(self.cache_path, "dset_peek.json")
## Label cache files.
# Needed for UI
self.fig_labels_json_fid = pjoin(self.cache_path, "fig_labels.json")
## Length cache files
# Needed for UI
self.length_df_fid = pjoin(self.cache_path, "length_df.feather")
# Needed for UI
self.length_stats_json_fid = pjoin(self.cache_path, "length_stats.json")
self.vocab_counts_df_fid = pjoin(self.cache_path, "vocab_counts.feather")
# Needed for UI
self.dup_counts_df_fid = pjoin(self.cache_path, "dup_counts_df.feather")
# Needed for UI
self.fig_tok_length_fid = pjoin(self.cache_path, "fig_tok_length.png")
## General text stats
# Needed for UI
self.general_stats_json_fid = pjoin(self.cache_path, "general_stats_dict.json")
# Needed for UI
self.sorted_top_vocab_df_fid = pjoin(
self.cache_path, "sorted_top_vocab.feather"
)
## Zipf cache files
# Needed for UI
self.zipf_fid = pjoin(self.cache_path, "zipf_basic_stats.json")
# Needed for UI
self.zipf_fig_fid = pjoin(self.cache_path, "zipf_fig.json")
## Embeddings cache files
# Needed for UI
self.node_list_fid = pjoin(self.cache_path, "node_list.th")
# Needed for UI
self.fig_tree_json_fid = pjoin(self.cache_path, "fig_tree.json")
self.live = False
def set_deployment(self, live=True):
"""
Function that we can hit when we deploy, so that cache files are not
written out/recalculated, but instead that part of the UI can be punted.
"""
self.live = live
def check_cache_dir(self):
"""
First function to call to create the cache directory.
If in deployment mode and cache directory does not already exist,
return False.
"""
if self.live:
return isdir(self.cache_path)
else:
if not isdir(self.cache_path):
logs.warning("Creating cache directory %s." % self.cache_path)
mkdir(self.cache_path)
return isdir(self.cache_path)
def get_base_dataset(self):
"""Gets a pointer to the truncated base dataset object."""
if not self.dset:
self.dset = load_truncated_dataset(
self.dset_name,
self.dset_config,
self.split_name,
cache_name=self.dset_fid,
use_cache=True,
use_streaming=True,
)
def load_or_prepare_general_stats(self, save=True):
"""
Content for expander_general_stats widget.
Provides statistics for total words, total open words,
the sorted top vocab, the NaN count, and the duplicate count.
Args:
Returns:
"""
# General statistics
if (
self.use_cache
and exists(self.general_stats_json_fid)
and exists(self.dup_counts_df_fid)
and exists(self.sorted_top_vocab_df_fid)
):
logs.info("Loading cached general stats")
self.load_general_stats()
else:
if not self.live:
logs.info("Preparing general stats")
self.prepare_general_stats()
if save:
write_df(self.sorted_top_vocab_df, self.sorted_top_vocab_df_fid)
write_df(self.dup_counts_df, self.dup_counts_df_fid)
write_json(self.general_stats_dict, self.general_stats_json_fid)
def load_or_prepare_text_lengths(self, save=True):
"""
The text length widget relies on this function, which provides
a figure of the text lengths, some text length statistics, and
a text length dataframe to peruse.
Args:
save:
Returns:
"""
# Text length figure
if self.use_cache and exists(self.fig_tok_length_fid):
self.fig_tok_length_png = mpimg.imread(self.fig_tok_length_fid)
else:
if not self.live:
self.prepare_fig_text_lengths()
if save:
self.fig_tok_length.savefig(self.fig_tok_length_fid)
# Text length dataframe
if self.use_cache and exists(self.length_df_fid):
self.length_df = feather.read_feather(self.length_df_fid)
else:
if not self.live:
self.prepare_length_df()
if save:
write_df(self.length_df, self.length_df_fid)
# Text length stats.
if self.use_cache and exists(self.length_stats_json_fid):
with open(self.length_stats_json_fid, "r") as f:
self.length_stats_dict = json.load(f)
self.avg_length = self.length_stats_dict["avg length"]
self.std_length = self.length_stats_dict["std length"]
self.num_uniq_lengths = self.length_stats_dict["num lengths"]
else:
if not self.live:
self.prepare_text_length_stats()
if save:
write_json(self.length_stats_dict, self.length_stats_json_fid)
def prepare_length_df(self):
if not self.live:
if self.tokenized_df is None:
self.tokenized_df = self.do_tokenization()
self.tokenized_df[LENGTH_FIELD] = self.tokenized_df[TOKENIZED_FIELD].apply(
len
)
self.length_df = self.tokenized_df[
[LENGTH_FIELD, OUR_TEXT_FIELD]
].sort_values(by=[LENGTH_FIELD], ascending=True)
def prepare_text_length_stats(self):
if not self.live:
if (
self.tokenized_df is None
or LENGTH_FIELD not in self.tokenized_df.columns
or self.length_df is None
):
self.prepare_length_df()
avg_length = sum(self.tokenized_df[LENGTH_FIELD]) / len(
self.tokenized_df[LENGTH_FIELD]
)
self.avg_length = round(avg_length, 1)
std_length = statistics.stdev(self.tokenized_df[LENGTH_FIELD])
self.std_length = round(std_length, 1)
self.num_uniq_lengths = len(self.length_df["length"].unique())
self.length_stats_dict = {
"avg length": self.avg_length,
"std length": self.std_length,
"num lengths": self.num_uniq_lengths,
}
def prepare_fig_text_lengths(self):
if not self.live:
if (
self.tokenized_df is None
or LENGTH_FIELD not in self.tokenized_df.columns
):
self.prepare_length_df()
self.fig_tok_length = make_fig_lengths(self.tokenized_df, LENGTH_FIELD)
def load_or_prepare_embeddings(self):
self.embeddings = Embeddings(self, use_cache=self.use_cache)
self.embeddings.make_hierarchical_clustering()
self.node_list = self.embeddings.node_list
self.fig_tree = self.embeddings.fig_tree
# get vocab with word counts
def load_or_prepare_vocab(self, save=True):
"""
Calculates the vocabulary count from the tokenized text.
The resulting dataframes may be used in nPMI calculations, zipf, etc.
:param
:return:
"""
if self.use_cache and exists(self.vocab_counts_df_fid):
logs.info("Reading vocab from cache")
self.load_vocab()
self.vocab_counts_filtered_df = filter_vocab(self.vocab_counts_df)
else:
logs.info("Calculating vocab afresh")
if self.tokenized_df is None:
self.tokenized_df = self.do_tokenization()
if save:
logs.info("Writing out.")
write_df(self.tokenized_df, self.tokenized_df_fid)
word_count_df = count_vocab_frequencies(self.tokenized_df)
logs.info("Making dfs with proportion.")
self.vocab_counts_df = calc_p_word(word_count_df)
self.vocab_counts_filtered_df = filter_vocab(self.vocab_counts_df)
if save:
logs.info("Writing out.")
write_df(self.vocab_counts_df, self.vocab_counts_df_fid)
logs.info("unfiltered vocab")
logs.info(self.vocab_counts_df)
logs.info("filtered vocab")
logs.info(self.vocab_counts_filtered_df)
def load_vocab(self):
with open(self.vocab_counts_df_fid, "rb") as f:
self.vocab_counts_df = feather.read_feather(f)
# Handling for changes in how the index is saved.
self.vocab_counts_df = self._set_idx_col_names(self.vocab_counts_df)
def load_or_prepare_text_duplicates(self, save=True):
if self.use_cache and exists(self.dup_counts_df_fid):
with open(self.dup_counts_df_fid, "rb") as f:
self.dup_counts_df = feather.read_feather(f)
elif self.dup_counts_df is None:
if not self.live:
self.prepare_text_duplicates()
if save:
write_df(self.dup_counts_df, self.dup_counts_df_fid)
else:
if not self.live:
# This happens when self.dup_counts_df is already defined;
# This happens when general_statistics were calculated first,
# since general statistics requires the number of duplicates
if save:
write_df(self.dup_counts_df, self.dup_counts_df_fid)
def load_general_stats(self):
self.general_stats_dict = json.load(
open(self.general_stats_json_fid, encoding="utf-8")
)
with open(self.sorted_top_vocab_df_fid, "rb") as f:
self.sorted_top_vocab_df = feather.read_feather(f)
self.text_nan_count = self.general_stats_dict[TEXT_NAN_CNT]
self.dedup_total = self.general_stats_dict[DEDUP_TOT]
self.total_words = self.general_stats_dict[TOT_WORDS]
self.total_open_words = self.general_stats_dict[TOT_OPEN_WORDS]
def prepare_general_stats(self):
if not self.live:
if self.tokenized_df is None:
logs.warning("Tokenized dataset not yet loaded; doing so.")
self.load_or_prepare_tokenized_df()
if self.vocab_counts_df is None:
logs.warning("Vocab not yet loaded; doing so.")
self.load_or_prepare_vocab()
self.sorted_top_vocab_df = self.vocab_counts_filtered_df.sort_values(
"count", ascending=False
).head(_TOP_N)
self.total_words = len(self.vocab_counts_df)
self.total_open_words = len(self.vocab_counts_filtered_df)
self.text_nan_count = int(self.tokenized_df.isnull().sum().sum())
self.prepare_text_duplicates()
self.dedup_total = sum(self.dup_counts_df[CNT])
self.general_stats_dict = {
TOT_WORDS: self.total_words,
TOT_OPEN_WORDS: self.total_open_words,
TEXT_NAN_CNT: self.text_nan_count,
DEDUP_TOT: self.dedup_total,
}
def prepare_text_duplicates(self):
if not self.live:
if self.tokenized_df is None:
self.load_or_prepare_tokenized_df()
dup_df = self.tokenized_df[self.tokenized_df.duplicated([OUR_TEXT_FIELD])]
self.dup_counts_df = pd.DataFrame(
dup_df.pivot_table(
columns=[OUR_TEXT_FIELD], aggfunc="size"
).sort_values(ascending=False),
columns=[CNT],
)
self.dup_counts_df[OUR_TEXT_FIELD] = self.dup_counts_df.index.copy()
def load_or_prepare_dataset(self, save=True):
"""
Prepares the HF datasets and data frames containing the untokenized and
tokenized text as well as the label values.
self.tokenized_df is used further for calculating text lengths,
word counts, etc.
Args:
save: Store the calculated data to disk.
Returns:
"""
logs.info("Doing text dset.")
self.load_or_prepare_text_dset(save)
#logs.info("Doing tokenized dataframe")
#self.load_or_prepare_tokenized_df(save)
logs.info("Doing dataset peek")
self.load_or_prepare_dset_peek(save)
def load_or_prepare_dset_peek(self, save=True):
if self.use_cache and exists(self.dset_peek_json_fid):
with open(self.dset_peek_json_fid, "r") as f:
self.dset_peek = json.load(f)["dset peek"]
else:
if not self.live:
if self.dset is None:
self.get_base_dataset()
self.dset_peek = self.dset[:100]
if save:
write_json({"dset peek": self.dset_peek}, self.dset_peek_json_fid)
def load_or_prepare_tokenized_df(self, save=True):
if self.use_cache and exists(self.tokenized_df_fid):
self.tokenized_df = feather.read_feather(self.tokenized_df_fid)
else:
if not self.live:
# tokenize all text instances
self.tokenized_df = self.do_tokenization()
if save:
logs.warning("Saving tokenized dataset to disk")
# save tokenized text
write_df(self.tokenized_df, self.tokenized_df_fid)
def load_or_prepare_text_dset(self, save=True):
if self.use_cache and exists(self.text_dset_fid):
# load extracted text
self.text_dset = load_from_disk(self.text_dset_fid)
logs.warning("Loaded dataset from disk")
logs.info(self.text_dset)
# ...Or load it from the server and store it anew
else:
if not self.live:
self.prepare_text_dset()
if save:
# save extracted text instances
logs.warning("Saving dataset to disk")
self.text_dset.save_to_disk(self.text_dset_fid)
def prepare_text_dset(self):
if not self.live:
self.get_base_dataset()
# extract all text instances
self.text_dset = self.dset.map(
lambda examples: extract_field(
examples, self.text_field, OUR_TEXT_FIELD
),
batched=True,
remove_columns=list(self.dset.features),
)
def do_tokenization(self):
"""
Tokenizes the dataset
:return:
"""
if self.text_dset is None:
self.load_or_prepare_text_dset()
sent_tokenizer = self.cvec.build_tokenizer()
def tokenize_batch(examples):
# TODO: lowercase should be an option
res = {
TOKENIZED_FIELD: [
tuple(sent_tokenizer(text.lower()))
for text in examples[OUR_TEXT_FIELD]
]
}
res[LENGTH_FIELD] = [len(tok_text) for tok_text in res[TOKENIZED_FIELD]]
return res
tokenized_dset = self.text_dset.map(
tokenize_batch,
batched=True,
# remove_columns=[OUR_TEXT_FIELD], keep around to print
)
tokenized_df = pd.DataFrame(tokenized_dset)
return tokenized_df
def set_label_field(self, label_field="label"):
"""
Setter for label_field. Used in the CLI when a user asks for information
about labels, but does not specify the field;
'label' is assumed as a default.
"""
self.label_field = label_field
def load_or_prepare_labels(self, save=True):
# TODO: This is in a transitory state for creating fig cache.
# Clean up to be caching and reading everything correctly.
"""
Extracts labels from the Dataset
:return:
"""
# extracted labels
if len(self.label_field) > 0:
if self.use_cache and exists(self.fig_labels_json_fid):
self.fig_labels = read_plotly(self.fig_labels_json_fid)
elif self.use_cache and exists(self.label_dset_fid):
# load extracted labels
self.label_dset = load_from_disk(self.label_dset_fid)
self.label_df = self.label_dset.to_pandas()
self.fig_labels = make_fig_labels(
self.label_df, self.label_names, OUR_LABEL_FIELD
)
if save:
write_plotly(self.fig_labels, self.fig_labels_json_fid)
else:
if not self.live:
self.prepare_labels()
if save:
# save extracted label instances
self.label_dset.save_to_disk(self.label_dset_fid)
write_plotly(self.fig_labels, self.fig_labels_json_fid)
def prepare_labels(self):
if not self.live:
self.get_base_dataset()
self.label_dset = self.dset.map(
lambda examples: extract_field(
examples, self.label_field, OUR_LABEL_FIELD
),
batched=True,
remove_columns=list(self.dset.features),
)
self.label_df = self.label_dset.to_pandas()
self.fig_labels = make_fig_labels(
self.label_df, self.label_names, OUR_LABEL_FIELD
)
def load_or_prepare_npmi(self):
self.npmi_stats = nPMIStatisticsCacheClass(self, use_cache=self.use_cache)
self.npmi_stats.load_or_prepare_npmi_terms()
def load_or_prepare_zipf(self, save=True):
# TODO: Current UI only uses the fig, meaning the self.z here is irrelevant
# when only reading from cache. Either the UI should use it, or it should
# be removed when reading in cache
if self.use_cache and exists(self.zipf_fig_fid) and exists(self.zipf_fid):
with open(self.zipf_fid, "r") as f:
zipf_dict = json.load(f)
self.z = Zipf()
self.z.load(zipf_dict)
self.zipf_fig = read_plotly(self.zipf_fig_fid)
elif self.use_cache and exists(self.zipf_fid):
# TODO: Read zipf data so that the vocab is there.
with open(self.zipf_fid, "r") as f:
zipf_dict = json.load(f)
self.z = Zipf()
self.z.load(zipf_dict)
self.zipf_fig = make_zipf_fig(self.vocab_counts_df, self.z)
if save:
write_plotly(self.zipf_fig, self.zipf_fig_fid)
else:
self.z = Zipf(self.vocab_counts_df)
self.zipf_fig = make_zipf_fig(self.vocab_counts_df, self.z)
if save:
write_zipf_data(self.z, self.zipf_fid)
write_plotly(self.zipf_fig, self.zipf_fig_fid)
def _set_idx_col_names(self, input_vocab_df):
if input_vocab_df.index.name != VOCAB and VOCAB in input_vocab_df.columns:
input_vocab_df = input_vocab_df.set_index([VOCAB])
input_vocab_df[VOCAB] = input_vocab_df.index
return input_vocab_df
class nPMIStatisticsCacheClass:
""" "Class to interface between the app and the nPMI class
by calling the nPMI class with the user's selections."""
def __init__(self, dataset_stats, use_cache=False):
self.live = dataset_stats.live
self.dstats = dataset_stats
self.pmi_cache_path = pjoin(self.dstats.cache_path, "pmi_files")
if not isdir(self.pmi_cache_path):
logs.warning("Creating pmi cache directory %s." % self.pmi_cache_path)
# We need to preprocess everything.
mkdir(self.pmi_cache_path)
self.joint_npmi_df_dict = {}
# TODO: Users ideally can type in whatever words they want.
self.termlist = _IDENTITY_TERMS
# termlist terms that are available more than _MIN_VOCAB_COUNT times
self.available_terms = _IDENTITY_TERMS
logs.info(self.termlist)
self.use_cache = use_cache
# TODO: Let users specify
self.open_class_only = True
self.min_vocab_count = self.dstats.min_vocab_count
self.subgroup_files = {}
self.npmi_terms_fid = pjoin(self.dstats.cache_path, "npmi_terms.json")
def load_or_prepare_npmi_terms(self):
"""
Figures out what identity terms the user can select, based on whether
they occur more than self.min_vocab_count times
:return: Identity terms occurring at least self.min_vocab_count times.
"""
# TODO: Add the user's ability to select subgroups.
# TODO: Make min_vocab_count here value selectable by the user.
if (
self.use_cache
and exists(self.npmi_terms_fid)
and json.load(open(self.npmi_terms_fid))["available terms"] != []
):
available_terms = json.load(open(self.npmi_terms_fid))["available terms"]
else:
true_false = [
term in self.dstats.vocab_counts_df.index for term in self.termlist
]
word_list_tmp = [x for x, y in zip(self.termlist, true_false) if y]
true_false_counts = [
self.dstats.vocab_counts_df.loc[word, CNT] >= self.min_vocab_count
for word in word_list_tmp
]
available_terms = [
word for word, y in zip(word_list_tmp, true_false_counts) if y
]
logs.info(available_terms)
with open(self.npmi_terms_fid, "w+") as f:
json.dump({"available terms": available_terms}, f)
self.available_terms = available_terms
return available_terms
def load_or_prepare_joint_npmi(self, subgroup_pair):
"""
Run on-the fly, while the app is already open,
as it depends on the subgroup terms that the user chooses
:param subgroup_pair:
:return:
"""
# Canonical ordering for subgroup_list
subgroup_pair = sorted(subgroup_pair)
subgroup1 = subgroup_pair[0]
subgroup2 = subgroup_pair[1]
subgroups_str = "-".join(subgroup_pair)
if not isdir(self.pmi_cache_path):
logs.warning("Creating cache")
# We need to preprocess everything.
# This should eventually all go into a prepare_dataset CLI
mkdir(self.pmi_cache_path)
joint_npmi_fid = pjoin(self.pmi_cache_path, subgroups_str + "_npmi.csv")
subgroup_files = define_subgroup_files(subgroup_pair, self.pmi_cache_path)
# Defines the filenames for the cache files from the selected subgroups.
# Get as much precomputed data as we can.
if self.use_cache and exists(joint_npmi_fid):
# When everything is already computed for the selected subgroups.
logs.info("Loading cached joint npmi")
joint_npmi_df = self.load_joint_npmi_df(joint_npmi_fid)
npmi_display_cols = [
"npmi-bias",
subgroup1 + "-npmi",
subgroup2 + "-npmi",
subgroup1 + "-count",
subgroup2 + "-count",
]
joint_npmi_df = joint_npmi_df[npmi_display_cols]
# When maybe some things have been computed for the selected subgroups.
else:
if not self.live:
logs.info("Preparing new joint npmi")
joint_npmi_df, subgroup_dict = self.prepare_joint_npmi_df(
subgroup_pair, subgroup_files
)
# Cache new results
logs.info("Writing out.")
for subgroup in subgroup_pair:
write_subgroup_npmi_data(subgroup, subgroup_dict, subgroup_files)
with open(joint_npmi_fid, "w+") as f:
joint_npmi_df.to_csv(f)
else:
joint_npmi_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
import pandas as pd
import numpy as np
from typing import List
TRAIN_DATA_PATH = "train.csv"
TEST_DATA_PATH = "test.csv"
def substrings_in_string(big_string: str, substrings: List[str]):
for substring in substrings:
if big_string.find(substring) != -1:
return substring
# print(big_string)
return np.nan
def replace_missing_fare_to_class_mean(df):
df["Fare"] = df["Fare"].map(lambda x: np.nan if x == 0 else x)
classmeans = pd.pivot_table(df, index='Pclass', values='Fare', aggfunc='mean')
df['Fare'] = df.apply(
lambda x: classmeans['Fare'][x['Pclass']] if pd.isnull(x['Fare']) else x['Fare'],
axis=1
)
def fill_cabin_embarked_to_na_unknown_mode(df):
df['Cabin'] = df['Cabin'].fillna('Unknown')
from scipy.stats import mode
modeEmbarked = mode(df['Embarked'])[0][0]
df['Embarked'] = df['Embarked'].fillna(modeEmbarked)
def replace_titles_to_4_class(df):
title_list = [
'Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev', 'Dr', 'Ms', 'Mlle', 'Col', 'Capt',
'Mme', 'Countess', 'Don', 'Jonkheer'
]
df['Title'] = df['Name'].map(lambda x: substrings_in_string(x, title_list))
def replace_titles(x):
title = x['Title']
if title in ['Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col']:
return 'Mr'
elif title in ['Countess', 'Mme']:
return 'Mrs'
elif title in ['Mlle', 'Ms']:
return 'Miss'
elif title == 'Dr':
if x['Sex'] == 'Male':
return 'Mr'
else:
return 'Mrs'
else:
return title
df['Title'] = df.apply(replace_titles, axis=1)
def turning_cabin_number_into_deck(df):
cabin_list = ['A', 'B', 'C', 'D', 'E', 'F', 'T', 'G', 'Unknown']
df['Deck'] = df['Cabin'].map(lambda x: substrings_in_string(str(x), cabin_list))
def turning_fare_per_pclass(df):
df['Fare_per_pclass'] = df.apply(lambda x: x['Fare'] / (4 - x['Pclass']), axis=1)
if __name__ == "__main__":
path = TRAIN_DATA_PATH
df = | pd.read_csv(path) | pandas.read_csv |
import os
import pandas as pd
import geopandas as gpd
import numpy as np
import sys
wd = '/disk/bulkw/karger/census_bulk/citylonglat/'
os.chdir(wd)
sys.path.append(wd + 'programs/02_geocode')
import matching_functions as mf
# define dictionary with townvariables by decade here:
townvars_dict = {1790: ['township'],
1800: ['township'],
1810: ['township'],
1820: ['township'],
1830: ['general_township_orig'],
1840: ['locality'],
1850: ['stdcity', 'us1850c_0043', 'us1850c_0053',
'us1850c_0054', 'us1850c_0042'],
1860: ['us1860c_0040', 'us1860c_0042', 'us1860c_0036'],
1870: ['us1870c_0040', 'us1870c_0042', 'us1870c_0043',
'us1870c_0044', 'us1870c_0035', 'us1870c_0036'],
1880: ['mcdstr', 'us1880e_0071', 'us1880e_0069',
'us1880e_0072', 'us1880e_0070'],
1900: ['stdcity', 'us1900m_0045', 'us1900m_0052'],
1910: ['stdcity', 'us1910m_0052', 'us1910m_0053',
'us1910m_0063'],
1920: ['stdmcd', 'stdcity', 'us1920c_0057',
'us1920c_0058', 'us1920c_0068', 'us1920c_0069'],
1930: ['stdmcd', 'stdcity'],
1940: ['stdcity', 'us1940b_0073', 'us1940b_0074']}
# define dictionary with county and state types here:
county_state_types_dict = {1790: ('name', 'name'),
1800: ('name', 'name'),
1810: ('name', 'name'),
1820: ('name', 'name'),
1830: ('name', 'name'),
1840: ('name', 'name'),
1850: ('fips', 'icp'),
1860: ('fips', 'fips'),
1870: ('fips', 'fips'),
1880: ('fips', 'icp'),
1900: ('fips', 'fips'),
1910: ('fips', 'fips'),
1920: ('fips', 'fips'),
1930: ('fips', 'fips'),
1940: ('fips', 'fips')}
already_done = {}
first_round = True
while True:
# enumeration district step
for decade in np.arange(1850, 1950, 10):
if decade == 1890:
continue
if decade == 1850 or decade >= 1880:
if first_round:
census_cities = pd.read_csv(
'intermediate/census_gnis_coords_gmaps_pre1910_' + str(decade) + '.csv')
if 'index' in census_cities.columns:
census_cities = census_cities.drop(columns='index')
else:
census_cities = pd.read_csv('intermediate/census_gnis_coords_' + str(decade) + '_temp.csv')
if 'index' in census_cities.columns:
census_cities = census_cities.drop(columns='index')
census_cities.columns = [column.lower() for column in census_cities.columns]
town_variables = townvars_dict[decade]
census_cities['enumdist'] = census_cities['enumdist'].astype(int)
census_cities = mf.assign_cities_by_enumdist(census_cities, town_variables)
census_cities.to_csv('intermediate/census_gnis_coords_' + str(decade) + '_temp.csv', index=False)
first_round = False
# fill in the gaps
tagged_cities_dict = {}
# step 1: build the dictionary
for decade in np.arange(1790, 1950, 10):
if decade == 1890:
continue
if os.path.exists('intermediate/census_gnis_coords_' + str(decade) + '_temp.csv'):
census_cities = pd.read_csv('intermediate/census_gnis_coords_' + str(decade) + '_temp.csv')
if 'index' in census_cities.columns:
census_cities = census_cities.drop(columns='index')
else:
census_cities = pd.read_csv('intermediate/census_gnis_coords_gmaps_pre1910_' + str(decade) + '.csv')
if 'index' in census_cities.columns:
census_cities = census_cities.drop(columns='index')
town_variables = townvars_dict[decade]
for townvar in town_variables:
temp_df = census_cities[(census_cities[townvar].notnull()) &
(census_cities['state_abb'].notnull()) &
(census_cities['lat'].notnull())]
tagged_cities_dict.update(dict(zip(list(temp_df[townvar] + ',' + temp_df['state_abb']),
list(temp_df['lat'].astype(str) + ',' + temp_df['long'].astype(str)))))
# step 2: assign lat, longs across decades
nchanges = 0
for decade in np.arange(1790, 1950, 10):
if decade == 1890:
continue
if os.path.exists('intermediate/census_gnis_coords_' + str(decade) + '_temp.csv'):
census_cities = pd.read_csv('intermediate/census_gnis_coords_' + str(decade) + '_temp.csv')
if 'index' in census_cities.columns:
census_cities = census_cities.drop(columns='index')
else:
census_cities = pd.read_csv('intermediate/census_gnis_coords_gmaps_pre1910_' + str(decade) + '.csv')
if 'index' in census_cities.columns:
census_cities = census_cities.drop(columns='index')
town_variables = townvars_dict[decade]
# determine city-variable order
share_unique = []
for townvar in town_variables:
share_unique.append(
len(census_cities.loc[(census_cities[townvar].notnull()) & (census_cities[townvar] != '')]
.drop_duplicates(['state', 'county', townvar], keep='first')) /
len(census_cities.loc[(census_cities[townvar].notnull()) & (census_cities[townvar] != ''), townvar]))
county_type, state_type = county_state_types_dict[decade]
county_gdf = mf.load_county_shape('shape_files/US_county_' + str(decade) + '_conflated.shp', county_type)
if county_type == 'fips':
county_gdf['NHGISST'] = county_gdf['NHGISST'].astype(int) / 10
county_gdf['ICPSRFIP'] = county_gdf['NHGISST'].astype(int).astype(str) + \
county_gdf['ICPSRCTYI'].astype(int).astype(str).str.zfill(4)
county_gdf['ICPSRFIP'] = county_gdf['ICPSRFIP'].astype(int)
for townvar in list(np.array(town_variables)[np.argsort(share_unique)[::-1]]):
for i in range(len(census_cities)):
row = census_cities.iloc[i]
if | pd.notnull(row['lat']) | pandas.notnull |
import pandas as pd
# Data Import
url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv'
df = pd.read_csv(url)
# Data selection
df = df[['iso_code', 'continent', 'location', 'date', 'total_cases', 'new_cases', 'total_deaths', 'new_deaths',
'total_cases_per_million', 'new_cases_per_million', 'total_deaths_per_million', 'new_deaths_per_million',
'icu_patients', 'icu_patients_per_million', 'hosp_patients', 'hosp_patients_per_million', 'new_tests',
'total_tests', 'total_tests_per_thousand', 'new_tests_per_thousand', 'positive_rate', 'tests_per_case',
'tests_units', 'total_vaccinations', 'people_vaccinated', 'people_fully_vaccinated', 'new_vaccinations',
'stringency_index', 'population']]
# NA Data Drop
df = df.dropna(subset=[
'date'
, 'continent'
, 'location'
])
# Data Transform
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
# -----------------------------------------------------------
# <NAME>
# -----------------------------------------------------------
import streamlit as st
import pandas as pd
import numpy as np
from sodapy import Socrata
import pydeck as pdk
import plotly.express as px
import requests
# from IPython.display import Image
with open("style.css") as f:
st.markdown('<style>{}</style>'.format(f.read()), unsafe_allow_html=True)
@st.cache(persist=True)
def giphy_path():
path = "https://media.giphy.com/media/rS9tqucvXWwuY/giphy.gif"
return path
path = giphy_path()
points_table_data_url = 'https://www.iplt20.com/points-table/2020'
html = requests.get(points_table_data_url).content
df_list_points_table = pd.read_html(html)
df_points_table = df_list_points_table[-1]
# print(df)
# df = pd.DataFrame(df)
st.markdown("<h1 style='text-align: center; color: #9C021B;'><strong>🏏 <u>IPL 2020 Dashboard</u> 🏏</strong></h1>", unsafe_allow_html=True)
st.markdown("_________________________________________________________________________________")
# st.markdown("<h4 style='text-align: center; color: #9C021B;'><hr></h4>", unsafe_allow_html=True)
st.markdown("<h4 style='text-align: center; color: #E07811;'>You can check latest Status of IPL 2020 along with stats of top Players</h4>", unsafe_allow_html=True)
# st.markdown("You can check latest Status of **IPL 2020** along with stats of top Players 🏏")
st.markdown(" ")
# st.markdown(" ")
@st.cache(persist=True)
def load_data_point_table():
data = pd.DataFrame(df_points_table)
data.rename(columns={'Unnamed: 0': 'Position', 'Pld': 'Match Played', 'Pts': 'Points', 'Form':'Status of Past Matches'}, inplace=True)
data = data.replace(np.nan, 'Not Played yet')
return data
data_point_table = load_data_point_table()
# st.header("Points Table of IPL 2020")
st.markdown("<h2 style='text-align: center; color: blue;'><strong><u>Points Table of IPL 2020</u></strong></h2>", unsafe_allow_html=True)
st.write(data_point_table)
st.markdown("_________________________________________________________________________________")
# Batting & Bowling stats of all team
batting_stats_data_url = 'https://www.iplt20.com/stats/2020/most-runs'
# most_run_data_url = 'https://www.iplt20.com/stats/2020/most-runs'
html = requests.get(batting_stats_data_url).content
df_list_batting_stat = pd.read_html(html)
df_batting_stat = df_list_batting_stat[-1]
# print(df)
# st.markdown(" ")
# st.markdown(" ")
# st.header("Check Top Performers of Ongoing IPL Season")
st.markdown("<h2 style='text-align: center; color: green;'><strong><u>Check Top Performers of Ongoing IPL Season</u></strong></h2>", unsafe_allow_html=True)
select_bat_bowl = st.selectbox('Which stats you want to check?', ['--Select--', 'Batting stats', 'Bowling stats'])
if select_bat_bowl == 'Batting stats':
@st.cache(persist=True)
def load_data_batting_table():
data = pd.DataFrame(df_batting_stat)
data.rename(columns={'POS':'Position.', 'PLAYER': 'Player', 'Mat': 'Matches','Inns': 'Innings', 'NO':'Not Outs','HS': 'Highest Score',
'Avg': 'Average', 'BF': 'Ball Faced', 'SR': 'Strike Rate' }, inplace=True)
# data = data.replace(np.nan, 'Not Played yet')
return data
data_batting_stats = load_data_batting_table()
if st.checkbox("Show Top 20 Batsman List (in terms of Total Runs Scored)", False):
# st.header("Batting Stats of top Players")
st.markdown("<h3 style='text-align: center; color: #4BC401;'><strong>Batting Stats of top Players</strong></h3>", unsafe_allow_html=True)
st.write(data_batting_stats.head(20))
st.markdown("_________________________________________________________________________________")
# st.subheader("Check Top 3 Best Batsman in Selective categories")
st.markdown("<h3 style='text-align: center; color: orangered;'><strong>Check Top 3 Best Batsman in Selective categories</strong></h3>", unsafe_allow_html=True)
select_category = st.selectbox('Choose the Performance category', ['--Select--', 'Top Run Scorer', 'Highest Strike Rate', 'Best Average'])
if select_category == 'Top Run Scorer':
df_bat_total_score = data_batting_stats.sort_values(by=['Runs'], ascending=False).head(3)
x = np.arange(1, 4)
df_bat_total_score['Position'] = x
data_batting_stats_new = df_bat_total_score[['Position', 'Player', 'Runs']].head(3)
fig = px.bar(data_batting_stats_new, x='Player', y='Runs',text='Runs', hover_data=['Position','Player', 'Runs'], color='Player')
fig.update_traces(texttemplate='%{text:.s}', textposition='inside')
fig.update_layout(uniformtext_minsize=12, uniformtext_mode='hide')
fig.update_layout(xaxis_title="Batsman",
yaxis_title="Total Runs Scored",
legend_title="Players",
font=dict(
family="Arial",
size=16,
color="RebeccaPurple"
))
fig.update_layout(title={'text': "Top 3 Most run scorer Batsman",
'y':0.95,
'x':0.43,
'xanchor': 'center',
'yanchor': 'top'})
st.write(fig)
st.markdown("_________________________________________________________________________________")
elif select_category == 'Highest Strike Rate':
df_bat_sr = data_batting_stats.sort_values(by=['Strike Rate'], ascending=False).head(3)
x = np.arange(1, 4)
df_bat_sr['Position'] = x
data_batting_stats_sr = df_bat_sr[['Position', 'Player', 'Strike Rate']].head(3)
fig2 = px.bar(data_batting_stats_sr, x='Player', y='Strike Rate',text='Strike Rate', hover_data=['Position','Player', 'Strike Rate'], color='Player')
fig2.update_traces(texttemplate='%{text:.4s}', textposition='inside')
fig2.update_layout(uniformtext_minsize=12, uniformtext_mode='hide')
fig2.update_layout(xaxis_title="Batsman",
yaxis_title="Strike Rate",
legend_title="Players",
font=dict(
family="Arial",
size=16,
color="RebeccaPurple"
))
fig2.update_layout(title={'text': "Top 3 Batsman with Highest Strike Rate)",
'y':0.95,
'x':0.43,
'xanchor': 'center',
'yanchor': 'top'})
st.write(fig2)
st.markdown("🤔💡 * **Batting Strike rate** is measure of runs per ball. It is calculated in **%** *")
st.markdown("_________________________________________________________________________________")
elif select_category == 'Best Average':
best_avg_data_url = 'https://www.iplt20.com/stats/2020/best-batting-average'
# most_run_data_url = 'https://www.iplt20.com/stats/2020/most-runs'
html = requests.get(best_avg_data_url).content
df_list_avg = pd.read_html(html)
df_batting_stat_best_avg = df_list_avg[-1]
@st.cache(persist=True)
def load_data_batting_table():
data = pd.DataFrame(df_batting_stat_best_avg)
data.rename(columns={'PLAYER':'Player', 'Avg': 'Average'}, inplace=True)
# data = data.replace(np.nan, 'Not Played yet')
return data
data_best_avg_stats = load_data_batting_table()
df_bat_bestavg = data_best_avg_stats.sort_values(by=['Average'], ascending=False).head(3)
x = np.arange(1, 4)
df_bat_bestavg['Position'] = x
data_batting_stats_bestavg = df_bat_bestavg[['Position', 'Player', 'Average']].head(3)
fig2 = px.bar(data_batting_stats_bestavg, x='Player', y='Average',text='Average', hover_data=['Position','Player', 'Average'], color='Player')
fig2.update_traces(texttemplate='%{text:.4s}', textposition='inside')
fig2.update_layout(uniformtext_minsize=12, uniformtext_mode='hide')
fig2.update_layout(xaxis_title="Batsman",
yaxis_title="Best Average",
legend_title="Players",
font=dict(
family="Arial",
size=16,
color="RebeccaPurple"
))
fig2.update_layout(title={'text': "Top 3 Batsman with Best Average",
'y':0.95,
'x':0.42,
'xanchor': 'center',
'yanchor': 'top'})
st.write(fig2)
st.markdown("🤔💡 * **Batting average** is the **total number of runs** a batsman have scored divided by the **number of times** they have been out*")
st.markdown("_________________________________________________________________________________")
##############################################################################################################################
##################### Bowling
bowling_stats_data_url = 'https://www.iplt20.com/stats/2020/most-wickets'
# most_run_data_url = 'https://www.iplt20.com/stats/2020/most-runs'
html = requests.get(bowling_stats_data_url).content
df_list_bowling_stat = pd.read_html(html)
df_bowling_stat = df_list_bowling_stat[-1]
if select_bat_bowl == 'Bowling stats':
@st.cache(persist=True)
def load_data_bowling_table():
data = pd.DataFrame(df_bowling_stat)
data.rename(columns={'POS':'Position.', 'PLAYER': 'Player', 'Mat': 'Matches','Inns': 'Innings', 'Ov':'Overs','Wkts': 'Wickets taken',
'BBI': 'Best Bowling in a Inns', 'Avg': 'Average', 'Econ': 'Economy Rate', 'SR': 'Strike Rate' }, inplace=True)
# data = data.replace(np.nan, 'Not Played yet')
return data
data_bowling_stats = load_data_bowling_table()
if st.checkbox("Show Top 20 Bowlers List (in terms of Total number of wickets taken)", False):
# st.header("Bowling Stats of top Players")
st.markdown("<h3 style='text-align: center; color: #4BC401;'><strong>Bowling Stats of top Players</strong></h3>", unsafe_allow_html=True)
st.write(data_bowling_stats.head(20))
st.markdown("_________________________________________________________________________________")
# st.subheader("Check Top 3 Best Bowlers in Selective categories")
st.markdown("<h3 style='text-align: center; color: purple;'><strong>Check Top 3 Best Bowlers in Selective categories</strong></h3>", unsafe_allow_html=True)
select_category = st.selectbox('Choose the Performance category', ['--Select--', 'Top Wicket Taker', 'Best Economy Rate', 'Best Bowling Average'])
######################################### !st category
if select_category == 'Top Wicket Taker':
df_bowl_top_wicket = data_bowling_stats.sort_values(by=['Wickets taken'], ascending=False).head(3)
x = np.arange(1, 4)
df_bowl_top_wicket['Position'] = x
data_bowl_stats_top_wkt = df_bowl_top_wicket[['Position', 'Player', 'Wickets taken']].head(3)
fig = px.bar(data_bowl_stats_top_wkt, x='Player', y='Wickets taken',text='Wickets taken', hover_data=['Position','Player', 'Wickets taken'], color='Player')
fig.update_traces(texttemplate='%{text:.s}', textposition='inside')
fig.update_layout(uniformtext_minsize=12, uniformtext_mode='hide')
fig.update_layout(xaxis_title="Bowlers",
yaxis_title="Wickets taken so far",
legend_title="Players",
font=dict(
family="Arial",
size=16,
color="RebeccaPurple"
))
fig.update_layout(title={'text': "Top 3 Most Wicket taker",
'y':0.95,
'x':0.42,
'xanchor': 'center',
'yanchor': 'top'})
st.write(fig)
st.markdown("_________________________________________________________________________________")
elif select_category == 'Best Economy Rate':
df_bowl_best_er = data_bowling_stats.sort_values(by=['Economy Rate'], ascending=True).head(3)
x = np.arange(1, 4)
df_bowl_best_er['Position'] = x
data_bowl_stats_best_er = df_bowl_best_er[['Position', 'Player', 'Economy Rate']].head(3)
fig = px.bar(data_bowl_stats_best_er, x='Player', y='Economy Rate', text='Economy Rate', hover_data=['Position','Player', 'Economy Rate'], color='Player')
fig.update_traces(texttemplate='%{text:.4s}', textposition='inside')
fig.update_layout(uniformtext_minsize=12, uniformtext_mode='hide')
fig.update_layout(xaxis_title="Bowlers",
yaxis_title="Economy Rate",
legend_title="Players",
font=dict(
family="Arial",
size=16,
color="RebeccaPurple"
))
fig.update_layout(title={'text': "Top 3 Bowlers with Best Economy rate",
'y':0.95,
'x':0.40,
'xanchor': 'center',
'yanchor': 'top'})
st.write(fig)
st.markdown("🤔💡 * **Economy rate** is the number of runs a bowler have conceded per over bowled. The **lower** the economy rate is, the **better** the bowler is performing.*")
st.markdown("_________________________________________________________________________________")
elif select_category == 'Best Bowling Average':
data_bowling_stats = data_bowling_stats[data_bowling_stats.Average != '-']
df_bowl_best_avg = data_bowling_stats.sort_values(by=['Average'], ascending=True).head(3)
x = np.arange(1, 4)
df_bowl_best_avg['Position'] = x
data_bowl_stats_best_avg = df_bowl_best_avg[['Position', 'Player', 'Average']].head(3)
fig = px.bar(data_bowl_stats_best_avg, x='Player', y='Average',text='Average', hover_data=['Position','Player', 'Average'], color='Player', width=750)
fig.update_traces(texttemplate='%{text:.4s}', textposition='inside')
fig.update_layout(uniformtext_minsize=12, uniformtext_mode='hide')
fig.update_layout(xaxis_title="Bowlers",
yaxis_title="Average",
legend_title="Players",
font=dict(
family="Arial",
size=16,
color="RebeccaPurple"
))
fig.update_layout(title={'text': "Top 3 Bowlers with Best Bowling Average",
'y':0.95,
'x':0.40,
'xanchor': 'center',
'yanchor': 'top'})
st.write(fig)
st.markdown("🤔💡 * **Bowling average** is the number of runs a bowler have conceded per wicket taken. The **lower** the bowling average is, the **better** the bowler is performing*")
st.markdown("_________________________________________________________________________________")
#########################################################################################################################################
#########################################################################################################################################
# st.sidebar.title("Schedule of IPL 2020")
st.sidebar.markdown("<h1 style='text-align: center; color: #F34600;'><u><strong>Schedule of IPL 2020</strong></u></h1>", unsafe_allow_html=True)
# st.sidebar.markdown("You can check the scheduled matches of IPL 2020, along with various other details like timing, Venue etc.")
st.sidebar.markdown("<h4 style='text-align: center; '>You can check all the scheduled matches of IPL 2020, along with various other details like timing, Venue etc.</h4>", unsafe_allow_html=True)
st.sidebar.markdown("_________________________________________________________________________________")
# st.sidebar.markdown(" ")
# st.sidebar.markdown(" ")
# st.sidebar.subheader("Want to check all scheduled Matches?")
st.sidebar.markdown("<h2 style='text-align: center; color: #BD08D3;'><strong>Want to check all scheduled Matches?</strong></h2>", unsafe_allow_html=True)
if st.sidebar.checkbox("Show IPL 2020 scheduled matches", False):
st.markdown(" ")
# st.markdown(" ")
# st.header("IPL 2020 Scheduled Matches")
st.markdown("<h2 style='text-align: center; color: #BD08D3;'><strong><u>All Scheduled matches of IPL 2020</u></strong></h2>", unsafe_allow_html=True)
# st.header("Bowling Stats of top Players")
all_match = st.slider("Adjust the slider if you want to check more Scheduled Matches?", 20, 60)
st.markdown("<h4 style='text-align: center;'><i>You can view in fullscreen for better visibility of Schedule</i></h4>", unsafe_allow_html=True)
# st.markdown("You can expand the view for better visibility of Time table")
image = pd.read_csv('ipl_schedule.csv')
# st.image(image, use_column_width=True)
imag= image[['Match No', 'Match Center', 'Date', 'Day', 'Time India (IST)', 'Venuue']]
# imag = imag.style.hide_index()
# imag.set_index('column', inplace=True)
st.write(image.head(all_match))
st.markdown("_________________________________________________________________________________")
# st.table(imag.assign(hack='').set_index('hack').head(all_match))
# st.sidebar.markdown(" ")
# st.sidebar.markdown(" ")
# st.sidebar.subheader("Want to check scheduled Matches of your favourite team?")
st.sidebar.markdown("_________________________________________________________________________________")
st.sidebar.markdown("<h2 style='text-align: center; color: #5A2553;'><strong>Want to check scheduled Matches of your favourite team?</strong></h2>", unsafe_allow_html=True)
favourite_team = st.sidebar.selectbox("Which's your favourite team?", ['--Select--', 'Chennai Super Kings (CSK)', 'Mumbai Indians (MI)', 'Royal Challengers Benglore (RCB)', 'Sunrisers Hyderabad (SRH)', 'Delhi Capitals (DC)', 'Kings Eleven Punjab (KXIP)', 'Rajasthan Royals (RR)', 'Kolkata knight Riders (KKR)'])
if favourite_team == 'Chennai Super Kings (CSK)':
# @st.cache(persist=True)
csk = pd.read_csv('csk_schedule.csv')
# st.image(image, use_column_width=True)
csk= csk[['Match No', 'Match Center', 'Date', 'Day', 'Time India (IST)', 'Venuue']]
# imag = imag.style.hide_index()
# imag.set_index('column', inplace=True)
# st.write(csk)
st.markdown(" ")
# st.markdown(" ")
# st.header("Chennai Super Kings full Schedule of Matches")
st.markdown("<h2 style='text-align: center; color: #DBB000;'><strong><u>Chennai Super Kings full Schedule of Matches</strong></h2>", unsafe_allow_html=True)
st.table(csk.assign(hack='').set_index('hack'))
# st.markdown("_________________________*****___________________________")
st.markdown("<h2 style='text-align: center; color: #DBB000;'><strong><u>_____________________*****_____________________</strong></h2>", unsafe_allow_html=True)
#
elif favourite_team == 'Mumbai Indians (MI)':
mi = pd.read_csv('mi_schedule.csv')
mi= mi[['Match No', 'Match Center', 'Date', 'Day', 'Time India (IST)', 'Venuue']]
st.markdown(" ")
# st.markdown(" ")
# st.header("Mumbai Indians full Schedule of Matches")
st.markdown("<h2 style='text-align: center; color: #006EC9;'><strong><u>Mumbai Indians full Schedule of Matches</u></strong></h2>", unsafe_allow_html=True)
st.table(mi.assign(hack='').set_index('hack'))
st.markdown("<h2 style='text-align: center; color: #006EC9;'><strong><u>_____________________*****_____________________</strong></h2>", unsafe_allow_html=True)
elif favourite_team == 'Royal Challengers Benglore (RCB)':
rcb = pd.read_csv('rcb_schedule.csv')
rcb= rcb[['Match No', 'Match Center', 'Date', 'Day', 'Time India (IST)', 'Venuue']]
st.markdown(" ")
# st.markdown(" ")
# st.header("Royal Challengers Benglore full Schedule of Matches")
st.markdown("<h2 style='text-align: center; color: #F30922;'><strong><u>Royal Challengers Benglore full Schedule of Matches</u></strong></h2>", unsafe_allow_html=True)
st.table(rcb.assign(hack='').set_index('hack'))
st.markdown("<h2 style='text-align: center; color: #F30922;'><strong><u>_____________________*****_____________________</strong></h2>", unsafe_allow_html=True)
elif favourite_team == 'Sunrisers Hyderabad (SRH)':
srh = pd.read_csv('srh_schedule.csv')
srh= srh[['Match No', 'Match Center', 'Date', 'Day', 'Time India (IST)', 'Venuue']]
st.markdown(" ")
# st.markdown(" ")
# st.header("Sunrisers Hyderabad full Schedule of Matches")
st.markdown("<h2 style='text-align: center; color: #F32C09;'><strong><u>Sunrisers Hyderabad full Schedule of Matches</u></strong></h2>", unsafe_allow_html=True)
st.table(srh.assign(hack='').set_index('hack'))
st.markdown("<h2 style='text-align: center; color: #F32C09;'><strong><u>_____________________*****_____________________</strong></h2>", unsafe_allow_html=True)
elif favourite_team == 'Delhi Capitals (DC)':
dc = pd.read_csv('dc_schedule.csv')
dc= dc[['Match No', 'Match Center', 'Date', 'Day', 'Time India (IST)', 'Venuue']]
st.markdown(" ")
# st.markdown(" ")
# st.header("Delhi Capitals full Schedule of Matches")
st.markdown("<h2 style='text-align: center; color: #4403E3;'><strong><u>Delhi Capitals full Schedule of Matches</u></strong></h2>", unsafe_allow_html=True)
st.table(dc.assign(hack='').set_index('hack'))
st.markdown("<h2 style='text-align: center; color: #4403E3;'><strong><u>_____________________*****_____________________</strong></h2>", unsafe_allow_html=True)
elif favourite_team == 'Kings Eleven Punjab (KXIP)':
punjab = pd.read_csv('punjab_schedule.csv')
punjab= punjab[['Match No', 'Match Center', 'Date', 'Day', 'Time India (IST)', 'Venuue']]
st.markdown(" ")
# st.markdown(" ")
# st.header("RKings Eleven Punjab full Schedule of Matches")
st.markdown("<h2 style='text-align: center; color: #E10000;'><strong><u>Kings Eleven Punjab full Schedule of Matches</u></strong></h2>", unsafe_allow_html=True)
st.table(punjab.assign(hack='').set_index('hack'))
st.markdown("<h2 style='text-align: center; color: #E10000;'><strong><u>_____________________*****_____________________</strong></h2>", unsafe_allow_html=True)
elif favourite_team == 'Rajasthan Royals (RR)':
rr = pd.read_csv('rajasthan_schedule.csv')
rr= rr[['Match No', 'Match Center', 'Date', 'Day', 'Time India (IST)', 'Venuue']]
st.markdown(" ")
# st.markdown(" ")
# st.header("Rajasthan Royals full Schedule of Matches")
st.markdown("<h2 style='text-align: center; color: #F519AC;'><strong><u>Rajasthan Royals full Schedule of Matches</u></strong></h2>", unsafe_allow_html=True)
st.table(rr.assign(hack='').set_index('hack'))
st.markdown("<h2 style='text-align: center; color: #F519AC;'><strong><u>_____________________*****_____________________</strong></h2>", unsafe_allow_html=True)
elif favourite_team == 'Kolkata knight Riders (KKR)':
kkr = | pd.read_csv('kkr_schedule.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import decimal
import json
import multiprocessing as mp
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as tm
import pytest
import pyarrow as pa
import pyarrow.types as patypes
from pyarrow.compat import PY2
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, use_threads=True,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=2 if use_threads else 1)
result = table.to_pandas(use_threads=use_threads)
if expected_schema:
# all occurences of _check_pandas_roundtrip passes expected_schema
# without the pandas generated key-value metadata, so we need to
# add it before checking schema equality
expected_schema = expected_schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None, expected_pa_type=None):
arr = pa.array(s, from_pandas=True, type=type_)
if type_ is not None and expected_pa_type is None:
expected_pa_type = type_
if expected_pa_type is not None:
assert arr.type == expected_pa_type
result = pd.Series(arr.to_pandas(), name=s.name)
if patypes.is_timestamp(arr.type) and arr.type.tz is not None:
result = (result.dt.tz_localize('utc')
.dt.tz_convert(arr.type.tz))
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if mask is None:
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected,
check_names=False)
def _check_array_from_pandas_roundtrip(np_array, type=None):
arr = pa.array(np_array, from_pandas=True, type=type)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.column(0).name == '0'
def test_from_pandas_with_columns(self):
df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]})
table = pa.Table.from_pandas(df, columns=[0, 1])
expected = pa.Table.from_pandas(df[[0, 1]])
assert expected.equals(table)
record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])
record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])
assert record_batch_expected.equals(record_batch_table)
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_name, foo_name = js['index_columns']
assert idx0_name == '__index_level_0__'
assert idx0['field_name'] == idx0_name
assert idx0['name'] is None
assert foo_name == 'foo'
assert foo['field_name'] == foo_name
assert foo['name'] == foo_name
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')
assert column_indexes['numpy_type'] == 'object'
md = column_indexes['metadata']
if not PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas()
with pytest.raises(pa.ArrowInvalid):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_unicode_column_names(self):
df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])
# TODO(phillipc): Should this raise?
with pytest.raises(AssertionError):
_check_pandas_roundtrip(df, preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
key = u'あ'.encode('utf8')
data = {key: column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == key
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
df = pd.DataFrame({
'a': [1, 2, 3],
'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']])
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df, preserve_index=True)
md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
},
{
'name': None,
'field_name': '__index_level_0__',
'metadata': None,
'numpy_type': 'int64',
'pandas_type': 'int64',
}
]
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
names = ['f2', 'f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_nulls_to_ints(self):
# ARROW-2135
df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]})
schema = pa.schema([pa.field("a", pa.int16(), nullable=True)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
assert table[0].to_pylist() == [1, 2, None]
tm.assert_frame_equal(df, table.to_pandas())
def test_integer_no_nulls(self):
data = OrderedDict()
fields = []
numpy_dtypes = [
('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('longlong', pa.int64()), ('ulonglong', pa.uint64())
]
num_values = 100
for dtype, arrow_dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(max(info.min, np.iinfo(np.int_).min),
min(info.max, np.iinfo(np.int_).max),
size=num_values)
data[dtype] = values.astype(dtype)
fields.append(pa.field(dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_integer_types(self):
# Test all Numpy integer aliases
data = OrderedDict()
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',
'int_', 'uint', 'longlong', 'ulonglong']
for dtype in numpy_dtypes:
data[dtype] = np.arange(12, dtype=dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df)
# Do the same with pa.array()
# (for some reason, it doesn't use the same code paths at all)
for np_arr in data.values():
arr = pa.array(np_arr)
assert arr.to_pylist() == np_arr.tolist()
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
arrays = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arr = pa.array(values, mask=null_mask)
arrays.append(arr)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
table = pa.Table.from_arrays(arrays, int_dtypes)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_array_from_pandas_type_cast(self):
arr = np.arange(10, dtype='int64')
target_type = pa.int8()
result = pa.array(arr, type=target_type)
expected = pa.array(arr.astype('int8'))
assert result.equals(expected)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
arr = pa.array(values, mask=mask)
expected = values.astype(object)
expected[mask] = None
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
ex_frame = pd.DataFrame({'bools': expected})
table = pa.Table.from_arrays([arr], ['bools'])
assert table.schema.equals(schema)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_object_nulls(self):
arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)
df = pd.DataFrame({'floats': arr})
expected = pd.DataFrame({'floats': pd.to_numeric(arr)})
field = pa.field('floats', pa.float64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_int_object_nulls(self):
arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)
df = pd.DataFrame({'ints': arr})
expected = pd.DataFrame({'ints': pd.to_numeric(arr)})
field = pa.field('ints', pa.int64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_nulls_cast_numeric(self):
arr = np.array([None], dtype=object)
def _check_type(t):
a2 = pa.array(arr, type=t)
assert a2.type == t
assert a2[0].as_py() is None
_check_type(pa.int32())
_check_type(pa.float64())
def test_half_floats_from_numpy(self):
arr = np.array([1.5, np.nan], dtype=np.float16)
a = pa.array(arr, type=pa.float16())
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert isinstance(y, np.float16)
assert np.isnan(y)
a = pa.array(arr, type=pa.float16(), from_pandas=True)
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert y is None
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_array_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
result = array.to_pandas(integer_object_nulls=True)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_table_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
expected = pd.DataFrame({dtype: expected})
table = pa.Table.from_arrays([array], [dtype])
result = table.to_pandas(integer_object_nulls=True)
tm.assert_frame_equal(result, expected)
class TestConvertDateTimeLikeTypes(object):
"""
Conversion tests for datetime- and timestamp-like types (date64, etc.).
"""
def test_timestamps_notimezone_no_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_notimezone_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_with_timezone(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123',
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
_check_series_roundtrip(df['datetime64'])
# drop-in a null and ns instead of ms
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
def test_python_datetime(self):
# ARROW-2106
date_array = [datetime.today() + timedelta(days=x) for x in range(10)]
df = pd.DataFrame({
'datetime': pd.Series(date_array, dtype=object)
})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({
'datetime': date_array
})
tm.assert_frame_equal(expected_df, result)
def test_python_datetime_subclass(self):
class MyDatetime(datetime):
# see https://github.com/pandas-dev/pandas/issues/21142
nanosecond = 0.0
date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]
df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({"datetime": date_array})
# https://github.com/pandas-dev/pandas/issues/21142
expected_df["datetime"] = pd.to_datetime(expected_df["datetime"])
tm.assert_frame_equal(expected_df, result)
def test_python_date_subclass(self):
class MyDate(date):
pass
date_array = [MyDate(2000, 1, 1)]
df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.Date32Array)
result = table.to_pandas()
expected_df = pd.DataFrame(
{"date": np.array(["2000-01-01"], dtype="datetime64[ns]")}
)
tm.assert_frame_equal(expected_df, result)
def test_datetime64_to_date32(self):
# ARROW-1718
arr = pa.array([date(2017, 10, 23), None])
c = pa.Column.from_array("d", arr)
s = c.to_pandas()
arr2 = pa.Array.from_pandas(s, type=pa.date32())
assert arr2.equals(arr.cast('date32'))
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False]),
])
def test_pandas_datetime_to_date64(self, mask):
s = pd.to_datetime([
'2018-05-10T00:00:00',
'2018-05-11T00:00:00',
'2018-05-12T00:00:00',
])
arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
data = np.array([
date(2018, 5, 10),
date(2018, 5, 11),
date(2018, 5, 12)
])
expected = pa.array(data, mask=mask, type=pa.date64())
assert arr.equals(expected)
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False])
])
def test_pandas_datetime_to_date64_failures(self, mask):
s = pd.to_datetime([
'2018-05-10T10:24:01',
'2018-05-11T10:24:01',
'2018-05-12T10:24:01',
])
expected_msg = 'Timestamp value had non-zero intraday milliseconds'
with pytest.raises(pa.ArrowInvalid, match=expected_msg):
pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
def test_array_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
assert arr.equals(pa.array(expected))
result = arr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(arr.to_pandas(), expected)
result = arr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_chunked_array_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
carr = pa.chunked_array([data])
result = carr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(carr.to_pandas(), expected)
result = carr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_column_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
column = pa.column('date', arr)
result = column.to_pandas()
npt.assert_array_equal(column.to_pandas(), expected)
result = column.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_table_convert_date_as_object(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
df_datetime = table.to_pandas()
df_object = table.to_pandas(date_as_object=True)
tm.assert_frame_equal(df.astype('datetime64[ns]'), df_datetime,
check_dtype=True)
tm.assert_frame_equal(df, df_object, check_dtype=True)
def test_date_infer(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
field = pa.field('date', pa.date32())
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
result = table.to_pandas()
expected = df.copy()
expected['date'] = pd.to_datetime(df['date'])
tm.assert_frame_equal(result, expected)
def test_date_mask(self):
arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],
dtype='datetime64[D]')
mask = [True, False]
result = pa.array(arr, mask=np.array(mask))
expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')
expected = pa.array(expected, from_pandas=True)
assert expected.equals(result)
def test_date_objects_typed(self):
arr = np.array([
date(2017, 4, 3),
None,
date(2017, 4, 4),
date(2017, 4, 5)], dtype=object)
arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')
arr_i8 = arr_i4.astype('int64') * 86400000
mask = np.array([False, True, False, False])
t32 = pa.date32()
t64 = pa.date64()
a32 = pa.array(arr, type=t32)
a64 = pa.array(arr, type=t64)
a32_expected = pa.array(arr_i4, mask=mask, type=t32)
a64_expected = pa.array(arr_i8, mask=mask, type=t64)
assert a32.equals(a32_expected)
assert a64.equals(a64_expected)
# Test converting back to pandas
colnames = ['date32', 'date64']
table = pa.Table.from_arrays([a32, a64], colnames)
table_pandas = table.to_pandas()
ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',
'2017-04-05'],
dtype='datetime64[D]')
.astype('datetime64[ns]'))
ex_values[1] = pd.NaT.value
expected_pandas = pd.DataFrame({'date32': ex_values,
'date64': ex_values},
columns=colnames)
tm.assert_frame_equal(table_pandas, expected_pandas)
def test_dates_from_integers(self):
t1 = pa.date32()
t2 = pa.date64()
arr = np.array([17259, 17260, 17261], dtype='int32')
arr2 = arr.astype('int64') * 86400000
a1 = pa.array(arr, type=t1)
a2 = pa.array(arr2, type=t2)
expected = date(2017, 4, 3)
assert a1[0].as_py() == expected
assert a2[0].as_py() == expected
@pytest.mark.xfail(reason="not supported ATM",
raises=NotImplementedError)
def test_timedelta(self):
# TODO(jreback): Pandas only support ns resolution
# Arrow supports ??? for resolution
df = pd.DataFrame({
'timedelta': np.arange(start=0, stop=3 * 86400000,
step=86400000,
dtype='timedelta64[ms]')
})
pa.Table.from_pandas(df)
def test_pytime_from_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356)]
# microseconds
t1 = pa.time64('us')
aobjs = np.array(pytimes + [None], dtype=object)
parr = pa.array(aobjs)
assert parr.type == t1
assert parr[0].as_py() == pytimes[0]
assert parr[1].as_py() == pytimes[1]
assert parr[2] is pa.NA
# DataFrame
df = pd.DataFrame({'times': aobjs})
batch = pa.RecordBatch.from_pandas(df)
assert batch[0].equals(parr)
# Test ndarray of int64 values
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
a1 = pa.array(arr, type=pa.time64('us'))
assert a1[0].as_py() == pytimes[0]
a2 = pa.array(arr * 1000, type=pa.time64('ns'))
assert a2[0].as_py() == pytimes[0]
a3 = pa.array((arr / 1000).astype('i4'),
type=pa.time32('ms'))
assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)
a4 = pa.array((arr / 1000000).astype('i4'),
type=pa.time32('s'))
assert a4[0].as_py() == pytimes[0].replace(microsecond=0)
def test_arrow_time_to_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356),
time(0, 0, 0)]
expected = np.array(pytimes[:2] + [None])
expected_ms = np.array([x.replace(microsecond=1000)
for x in pytimes[:2]] +
[None])
expected_s = np.array([x.replace(microsecond=0)
for x in pytimes[:2]] +
[None])
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
null_mask = np.array([False, False, True], dtype=bool)
a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))
a2 = pa.array(arr * 1000, mask=null_mask,
type=pa.time64('ns'))
a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,
type=pa.time32('ms'))
a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,
type=pa.time32('s'))
names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']
batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)
arr = a1.to_pandas()
assert (arr == expected).all()
arr = a2.to_pandas()
assert (arr == expected).all()
arr = a3.to_pandas()
assert (arr == expected_ms).all()
arr = a4.to_pandas()
assert (arr == expected_s).all()
df = batch.to_pandas()
expected_df = pd.DataFrame({'time64[us]': expected,
'time64[ns]': expected,
'time32[ms]': expected_ms,
'time32[s]': expected_s},
columns=names)
tm.assert_frame_equal(df, expected_df)
def test_numpy_datetime64_columns(self):
datetime64_ns = np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
_check_array_from_pandas_roundtrip(datetime64_ns)
datetime64_us = np.array([
'2007-07-13T01:23:34.123456',
None,
'2006-01-13T12:34:56.432539',
'2010-08-13T05:46:57.437699'],
dtype='datetime64[us]')
_check_array_from_pandas_roundtrip(datetime64_us)
datetime64_ms = np.array([
'2007-07-13T01:23:34.123',
None,
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
_check_array_from_pandas_roundtrip(datetime64_ms)
datetime64_s = np.array([
'2007-07-13T01:23:34',
None,
'2006-01-13T12:34:56',
'2010-08-13T05:46:57'],
dtype='datetime64[s]')
_check_array_from_pandas_roundtrip(datetime64_s)
@pytest.mark.parametrize('dtype', [pa.date32(), pa.date64()])
def test_numpy_datetime64_day_unit(self, dtype):
datetime64_d = np.array([
'2007-07-13',
None,
'2006-01-15',
'2010-08-19'],
dtype='datetime64[D]')
_check_array_from_pandas_roundtrip(datetime64_d, type=dtype)
def test_array_from_pandas_date_with_mask(self):
m = np.array([True, False, True])
data = pd.Series([
date(1990, 1, 1),
date(1991, 1, 1),
date(1992, 1, 1)
])
result = pa.Array.from_pandas(data, mask=m)
expected = pd.Series([None, date(1991, 1, 1), None])
assert pa.Array.from_pandas(expected).equals(result)
def test_fixed_offset_timezone(self):
df = pd.DataFrame({
'a': [
pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT
]
})
_check_pandas_roundtrip(df)
_check_serialize_components_roundtrip(df)
# ----------------------------------------------------------------------
# Conversion tests for string and binary types.
class TestConvertStringLikeTypes(object):
def test_pandas_unicode(self):
repeats = 1000
values = [u'foo', None, u'bar', u'mañana', np.nan]
df = pd.DataFrame({'strings': values * repeats})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_bytes_to_binary(self):
values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]
df = pd.DataFrame({'strings': values})
table = pa.Table.from_pandas(df)
assert table[0].type == pa.binary()
values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]
expected = pd.DataFrame({'strings': values2})
_check_pandas_roundtrip(df, expected)
@pytest.mark.large_memory
def test_bytes_exceed_2gb(self):
v1 = b'x' * 100000000
v2 = b'x' * 147483646
# ARROW-2227, hit exactly 2GB on the nose
df = pd.DataFrame({
'strings': [v1] * 20 + [v2] + ['x'] * 20
})
arr = pa.array(df['strings'])
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
arr = None
table = pa.Table.from_pandas(df)
assert table[0].data.num_chunks == 2
def test_fixed_size_bytes(self):
values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
table = pa.Table.from_pandas(df, schema=schema)
assert table.schema[0].type == schema[0].type
assert table.schema[0].name == schema[0].name
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_fixed_size_bytes_does_not_accept_varying_lengths(self):
values = [b'foo', None, b'ba', None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_pandas(df, schema=schema)
def test_variable_size_bytes(self):
s = pd.Series([b'123', b'', b'a', None])
_check_series_roundtrip(s, type_=pa.binary())
def test_binary_from_bytearray(self):
s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),
None])
# Explicitly set type
_check_series_roundtrip(s, type_=pa.binary())
# Infer type from bytearrays
_check_series_roundtrip(s, expected_pa_type=pa.binary())
def test_table_empty_str(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result1 = table.to_pandas(strings_to_categorical=False)
expected1 = pd.DataFrame({'strings': values})
tm.assert_frame_equal(result1, expected1, check_dtype=True)
result2 = table.to_pandas(strings_to_categorical=True)
expected2 = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result2, expected2, check_dtype=True)
def test_selective_categoricals(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
expected_str = pd.DataFrame({'strings': values})
expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})
result1 = table.to_pandas(categories=['strings'])
tm.assert_frame_equal(result1, expected_cat, check_dtype=True)
result2 = table.to_pandas(categories=[])
tm.assert_frame_equal(result2, expected_str, check_dtype=True)
result3 = table.to_pandas(categories=('strings',))
tm.assert_frame_equal(result3, expected_cat, check_dtype=True)
result4 = table.to_pandas(categories=tuple())
tm.assert_frame_equal(result4, expected_str, check_dtype=True)
def test_table_str_to_categorical_without_na(self):
values = ['a', 'a', 'b', 'b', 'c']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
def test_table_str_to_categorical_with_na(self):
values = [None, 'a', 'b', np.nan]
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
# Regression test for ARROW-2101
def test_array_of_bytes_to_strings(self):
converted = pa.array(np.array([b'x'], dtype=object), pa.string())
assert converted.type == pa.string()
# Make sure that if an ndarray of bytes is passed to the array
# constructor and the type is string, it will fail if those bytes
# cannot be converted to utf-8
def test_array_of_bytes_to_strings_bad_data(self):
with pytest.raises(
pa.lib.ArrowInvalid,
match="was not a utf8 string"):
pa.array(np.array([b'\x80\x81'], dtype=object), pa.string())
def test_numpy_string_array_to_fixed_size_binary(self):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
converted = pa.array(arr, type=pa.binary(3))
expected = pa.array(list(arr), type=pa.binary(3))
assert converted.equals(expected)
mask = np.array([True, False, True])
converted = pa.array(arr, type=pa.binary(3), mask=mask)
expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))
assert converted.equals(expected)
with pytest.raises(pa.lib.ArrowInvalid,
match=r'Got bytestring of length 3 \(expected 4\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
pa.array(arr, type=pa.binary(4))
with pytest.raises(
pa.lib.ArrowInvalid,
match=r'Got bytestring of length 12 \(expected 3\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')
pa.array(arr, type=pa.binary(3))
class TestConvertDecimalTypes(object):
"""
Conversion test for decimal types.
"""
decimal32 = [
decimal.Decimal('-1234.123'),
decimal.Decimal('1234.439')
]
decimal64 = [
decimal.Decimal('-129934.123331'),
decimal.Decimal('129534.123731')
]
decimal128 = [
decimal.Decimal('394092382910493.12341234678'),
decimal.Decimal('-314292388910493.12343437128')
]
@pytest.mark.parametrize(('values', 'expected_type'), [
pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),
pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),
pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')
])
def test_decimal_from_pandas(self, values, expected_type):
expected = pd.DataFrame({'decimals': values})
table = pa.Table.from_pandas(expected, preserve_index=False)
field = pa.field('decimals', expected_type)
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
@pytest.mark.parametrize('values', [
pytest.param(decimal32, id='decimal32'),
pytest.param(decimal64, id='decimal64'),
pytest.param(decimal128, id='decimal128')
])
def test_decimal_to_pandas(self, values):
expected = pd.DataFrame({'decimals': values})
converted = pa.Table.from_pandas(expected)
df = converted.to_pandas()
tm.assert_frame_equal(df, expected)
def test_decimal_fails_with_truncation(self):
data1 = [decimal.Decimal('1.234')]
type1 = pa.decimal128(10, 2)
with pytest.raises(pa.ArrowInvalid):
pa.array(data1, type=type1)
data2 = [decimal.Decimal('1.2345')]
type2 = pa.decimal128(10, 3)
with pytest.raises(pa.ArrowInvalid):
pa.array(data2, type=type2)
def test_decimal_with_different_precisions(self):
data = [
decimal.Decimal('0.01'),
decimal.Decimal('0.001'),
]
series = pd.Series(data)
array = pa.array(series)
assert array.to_pylist() == data
assert array.type == pa.decimal128(3, 3)
array = pa.array(data, type=pa.decimal128(12, 5))
expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')]
assert array.to_pylist() == expected
def test_decimal_with_None_explicit_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
# Test that having all None values still produces decimal array
series = pd.Series([None] * 2)
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
def test_decimal_with_None_infer_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2))
def test_strided_objects(self, tmpdir):
# see ARROW-3053
data = {
'a': {0: 'a'},
'b': {0: decimal.Decimal('0.0')}
}
# This yields strided objects
df = pd.DataFrame.from_dict(data)
_check_pandas_roundtrip(df)
class TestListTypes(object):
"""
Conversion tests for list<> types.
"""
def test_column_of_arrays(self):
df, schema = dataframe_with_arrays()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_arrays_to_py(self):
# Test regression in ARROW-1199 not caught in above test
dtype = 'i1'
arr = np.array([
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
])
type_ = pa.list_(pa.int8())
parr = pa.array(arr, type=type_)
assert parr[0].as_py() == list(range(10))
assert parr[1].as_py() == list(range(5))
assert parr[2].as_py() is None
assert parr[3].as_py() == [0]
def test_column_of_lists(self):
df, schema = dataframe_with_lists()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_lists_first_empty(self):
# ARROW-2124
num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]
series = pd.Series([np.array(s, dtype=float) for s in num_lists])
arr = pa.array(series)
result = pd.Series(arr.to_pandas())
tm.assert_series_equal(result, series)
def test_column_of_lists_chunked(self):
# ARROW-1357
df = pd.DataFrame({
'lists': np.array([
[1, 2],
None,
[2, 3],
[4, 5],
[6, 7],
[8, 9]
], dtype=object)
})
schema = pa.schema([
pa.field('lists', pa.list_(pa.int64()))
])
t1 = pa.Table.from_pandas(df[:2], schema=schema)
t2 = pa.Table.from_pandas(df[2:], schema=schema)
table = pa.concat_tables([t1, t2])
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_column_of_lists_chunked2(self):
data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],
[12, 13], [14, 15], [16, 17]]
data2 = [[8, 9], [18, 19]]
a1 = pa.array(data1)
a2 = pa.array(data2)
t1 = pa.Table.from_arrays([a1], names=['a'])
t2 = pa.Table.from_arrays([a2], names=['a'])
concatenated = pa.concat_tables([t1, t2])
result = concatenated.to_pandas()
expected = pd.DataFrame({'a': data1 + data2})
tm.assert_frame_equal(result, expected)
def test_column_of_lists_strided(self):
df, schema = dataframe_with_lists()
df = pd.concat([df] * 6, ignore_index=True)
arr = df['int64'].values[::3]
assert arr.strides[0] != 8
_check_array_roundtrip(arr)
def test_nested_lists_all_none(self):
data = np.array([[None, None], None], dtype=object)
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
data2 = np.array([None, None, [None, None],
np.array([None, None], dtype=object)],
dtype=object)
arr = pa.array(data2)
expected = pa.array([None, None, [None, None], [None, None]])
assert arr.equals(expected)
def test_nested_lists_all_empty(self):
# ARROW-2128
data = pd.Series([[], [], []])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
def test_nested_list_first_empty(self):
# ARROW-2711
data = pd.Series([[], [u"a"]])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.string())
def test_nested_smaller_ints(self):
# ARROW-1345, ARROW-2008, there were some type inference bugs happening
# before
data = pd.Series([np.array([1, 2, 3], dtype='i1'), None])
result = pa.array(data)
result2 = pa.array(data.values)
expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8()))
assert result.equals(expected)
assert result2.equals(expected)
data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None])
result3 = pa.array(data3)
expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32()))
assert result3.equals(expected3)
def test_infer_lists(self):
data = OrderedDict([
('nan_ints', [[None, 1], [2, 3]]),
('ints', [[0, 1], [2, 3]]),
('strs', [[None, u'b'], [u'c', u'd']]),
('nested_strs', [[[None, u'b'], [u'c', u'd']], None])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('nan_ints', pa.list_(pa.int64())),
pa.field('ints', pa.list_(pa.int64())),
pa.field('strs', pa.list_(pa.string())),
pa.field('nested_strs', pa.list_(pa.list_(pa.string())))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
def test_infer_numpy_array(self):
data = OrderedDict([
('ints', [
np.array([0, 1], dtype=np.int64),
np.array([2, 3], dtype=np.int64)
])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('ints', pa.list_(pa.int64()))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
@pytest.mark.parametrize('t,data,expected', [
(
pa.int64,
[[1, 2], [3], None],
[None, [3], None]
),
(
pa.string,
[[u'aaa', u'bb'], [u'c'], None],
[None, [u'c'], None]
),
(
pa.null,
[[None, None], [None], None],
[None, [None], None]
)
])
def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):
m = np.array([True, False, True])
s = pd.Series(data)
result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))
assert pa.Array.from_pandas(expected,
type=pa.list_(t())).equals(result)
def test_empty_list_roundtrip(self):
empty_list_array = np.empty((3,), dtype=object)
empty_list_array.fill([])
df = pd.DataFrame({'a': np.array(['1', '2', '3']),
'b': empty_list_array})
tbl = pa.Table.from_pandas(df)
result = tbl.to_pandas()
tm.assert_frame_equal(result, df)
def test_array_from_nested_arrays(self):
df, schema = dataframe_with_arrays()
for field in schema:
arr = df[field.name].values
expected = pa.array(list(arr), type=field.type)
result = pa.array(arr)
assert result.type == field.type # == list<scalar>
assert result.equals(expected)
class TestConvertStructTypes(object):
"""
Conversion tests for struct types.
"""
def test_to_pandas(self):
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array([u'a', None, u'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = pd.Series([
{'ints': None, 'strs': u'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': u'c', 'bools': None},
])
series = pd.Series(arr.to_pandas())
tm.assert_series_equal(series, expected)
def test_from_numpy(self):
dt = np.dtype([('x', np.int32),
(('y_title', 'y'), np.bool_)])
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(42, True), (43, False)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True},
{'x': 43, 'y': False}]
# With mask
arr = pa.array(data, mask=np.bool_([False, True]), type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True}, None]
# Trivial struct type
dt = np.dtype([])
ty = pa.struct([])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(), ()], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{}, {}]
def test_from_numpy_nested(self):
dt = np.dtype([('x', np.dtype([('xx', np.int8),
('yy', np.bool_)])),
('y', np.int16)])
ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()),
pa.field('yy', pa.bool_())])),
pa.field('y', pa.int16())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([((1, True), 2), ((3, False), 4)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': {'xx': 1, 'yy': True}, 'y': 2},
{'x': {'xx': 3, 'yy': False}, 'y': 4}]
@pytest.mark.large_memory
def test_from_numpy_large(self):
# Exercise rechunking + nulls
target_size = 3 * 1024**3 # 4GB
dt = np.dtype([('x', np.float64), ('y', 'object')])
bs = 65536 - dt.itemsize
block = b'.' * bs
n = target_size // (bs + dt.itemsize)
data = np.zeros(n, dtype=dt)
data['x'] = np.random.random_sample(n)
data['y'] = block
# Add implicit nulls
data['x'][data['x'] < 0.2] = np.nan
ty = pa.struct([pa.field('x', pa.float64()),
pa.field('y', pa.binary(bs))])
arr = pa.array(data, type=ty, from_pandas=True)
assert arr.num_chunks == 2
def iter_chunked_array(arr):
for chunk in arr.iterchunks():
for item in chunk:
yield item
def check(arr, data, mask=None):
assert len(arr) == len(data)
xs = data['x']
ys = data['y']
for i, obj in enumerate(iter_chunked_array(arr)):
try:
d = obj.as_py()
if mask is not None and mask[i]:
assert d is None
else:
x = xs[i]
if np.isnan(x):
assert d['x'] is None
else:
assert d['x'] == x
assert d['y'] == ys[i]
except Exception:
print("Failed at index", i)
raise
check(arr, data)
del arr
# Now with explicit mask
mask = np.random.random_sample(n) < 0.2
arr = pa.array(data, type=ty, mask=mask, from_pandas=True)
assert arr.num_chunks == 2
check(arr, data, mask)
del arr
def test_from_numpy_bad_input(self):
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
dt = np.dtype([('x', np.int32),
('z', np.bool_)])
data = np.array([], dtype=dt)
with pytest.raises(TypeError,
match="Missing field 'y'"):
pa.array(data, type=ty)
data = np.int32([])
with pytest.raises(TypeError,
match="Expected struct array"):
pa.array(data, type=ty)
class TestZeroCopyConversion(object):
"""
Tests that zero-copy conversion works with some types.
"""
def test_zero_copy_success(self):
result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True)
npt.assert_array_equal(result, [0, 1, 2])
def test_zero_copy_dictionaries(self):
arr = pa.DictionaryArray.from_arrays(
np.array([0, 0]),
np.array([5]))
result = arr.to_pandas(zero_copy_only=True)
values = | pd.Categorical([5, 5]) | pandas.Categorical |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = | Index([1, 2, 3], dtype='int64', name='idx') | pandas.core.api.Index |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import random
import time
import warnings
warnings.filterwarnings('ignore')
sns.set(style='darkgrid', palette='deep')
#Analysing dataset with padas profiling
#from pandas_profiling import ProfileReport
#profile = ProfileReport(df, title='Medical Cost Personal Datasets', html={'style':{'full_width':True}})
#Importing Dataset
df_raw = pd.read_excel('titanic3.xls')
new_columns = ['class','survival', 'name', 'sex', 'age', 'siblings/spouses',
'parents/children', 'ticket', 'fare', 'cabin', 'embarked', 'lifeboat',
'body number', 'home/destination']
df_raw.info()
#Feature Engineering
df = pd.DataFrame(df_raw.values, columns= new_columns )
df_user = pd.DataFrame(np.arange(0, len(df)), columns=['passanger'])
df = pd.concat([df_user, df], axis=1)
df['family'] = df['siblings/spouses'] + df['parents/children'] + 1
df = df.drop(['siblings/spouses','parents/children'], axis=1)
df['embarked'].value_counts()
df['embarked'].replace(['S', 'C', 'Q'],
['southampton', 'cherbourg', 'quennstone'], inplace= True )
df.info()
df.columns
df[['class', 'survival', 'age', 'fare',
'body number', 'family']] = df[['class', 'survival', 'age', 'fare',
'body number', 'family']].apply(pd.to_numeric)
#Converting columns to Datatime
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
time_new = df['Timestamp'].iloc[0]
df['Hour'] = df['Timestamp'].apply(lambda time_new: time_new.hour)
df['Month'] = df['Timestamp'].apply(lambda time_new: time_new.month)
df['Day'] = df['Timestamp'].apply(lambda time_new: time_new.dayofweek)
df["hour"] = df.hour.str.slice(1, 3).astype(int)
#Visualising Dataset
bins = range(0,100,10)
ax = sns.distplot(df.age[df.y=='yes'],
color='red', kde=False, bins=bins, label='Have Subscribed')
sns.distplot(df.age[df.y=='no'],
ax=ax, # Overplots on first plot
color='blue', kde=False, bins=bins, label="Haven't Subscribed")
plt.legend()
plt.show()
g = pd.crosstab(df.sex, df.survival).plot(kind='bar', figsize=(10,5))
ax = g.axes
for p in ax.patches:
ax.annotate(f"{p.get_height() * 100 / df.shape[0]:.2f}%", (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', fontsize=11, color='gray', rotation=0, xytext=(0, 10),
textcoords='offset points')
plt.grid(b=True, which='major', linestyle='--')
plt.title('Survival Frequency for Genre')
plt.legend(['Not Survived', 'Survived'])
plt.xlabel('Genre')
plt.ylabel('Quantity')
plt.show()
df.groupby(pd.cut(df.age, bins))['age'].count().plot(kind='bar', figsize=(10,10))
plt.grid(b=True, which='major', linestyle='--')
plt.title('Frequency of Age')
plt.grid(b=True, which='major', linestyle='--')
plt.xlabel('Age')
plt.ylabel('Quantity')
plt.show()
pd.crosstab(pd.cut(df.age, bins), df.survival).plot(kind='bar', figsize=(10,10))
plt.grid(b=True, which='major', linestyle='--')
plt.title('Survival Frequency for Age')
plt.legend(['Not Survival', 'Survival'])
plt.yticks(np.arange(0,250,50))
plt.xlabel('Age')
plt.ylabel('Quantity')
plt.show()
age_notsurvival = (df.groupby(pd.cut(df.age, bins))['age'].count()/ len(df[df.survival==0]))*100
age_survival = (df.groupby(pd.cut(df.age, bins))['age'].count()/ len(df[df.survival==1]))*100
age_notsurvival.plot(kind='bar', figsize=(10,10))
plt.grid(b=True, which='major', linestyle='--')
plt.title('Percentage of Age for Passanger Not Survived')
plt.yticks(np.arange(0,110,10))
plt.xlabel('Age')
plt.ylabel('Percentage')
plt.show()
age_survival.plot(kind='bar', figsize=(10,10))
plt.grid(b=True, which='major', linestyle='--')
plt.title('Percentage of Age for Passanger Survived')
plt.yticks(np.arange(0,110,10))
plt.xlabel('Age')
plt.ylabel('Percentage')
plt.show()
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True, sharey=True, figsize=(10,10))
plt.subplots_adjust(hspace=0)
plt.suptitle('Age Frequency')
ax1 = sns.countplot(pd.cut(df.age, bins), data= df,
color='darkblue', ax=axes[0], saturation=0.5)
ax2 = sns.countplot( | pd.cut(df.age, bins) | pandas.cut |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 16:14:12 2019
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import graphviz
import os
import seaborn as sns
from scipy.stats import chi2_contingency
os.chdir("E:\PYTHON NOTES\projects\cab fare prediction")
dataset_train=pd.read_csv("train_cab.csv")
dataset_test=pd.read_csv("test.csv")
dataset_train.describe()
# dimension of data
# dimension of data
dataset_train.shape
# Number of rows
dataset_train.shape[0]
# number of columns
dataset_train.shape[1]
# name of columns
list(dataset_train)
# data detailat
dataset_train.info()
dataset_train.isnull().sum()
dataset_test.isnull().sum()
sns.heatmap(dataset_train.isnull(),yticklabels=False,cbar=False, cmap='coolwarm')
#datetime change into reqired format
data=[dataset_train,dataset_test]
for i in data:
i["pickup_datetime"]=pd.to_datetime(i["pickup_datetime"],errors="coerce")
dataset_train.info()
dataset_test.info()
dataset_train.isnull().sum()
dataset_test.isna().sum()
dataset_train=dataset_train.dropna(subset=["pickup_datetime"],how="all")
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
np.where(dataset_train["fare_amount"]=="430-")
dataset_train["fare_amount"].loc[1123]=430
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
#we will convery passanger count in to catogorical varibale ,cause passangor caount is not contineous varibale
dataset_obj=["passenger_count"]
dataset_int=["fare_amount","pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
# data visulization
import seaborn as sns
import matplotlib.pyplot as plt
#$stting up the sns for plots
sns.set(style="darkgrid",palette="Set1")
#some histogram plot from seaborn lib
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.distplot(dataset_train["fare_amount"],bins=50)
plt.subplot(322)
_=sns.distplot(dataset_train["pickup_longitude"],bins=50)
plt.subplot(323)
_=sns.distplot(dataset_train["pickup_latitude"],bins=50)
plt.subplot(324)
_ = sns.distplot(dataset_train['dropoff_longitude'],bins=50)
plt.subplot(325)
_ = sns.distplot(dataset_train['dropoff_latitude'],bins=50)
plt.show()
plt.savefig('hist.png')
import scipy.stats as stats
#Some Bee Swarmplots
# plt.title('Cab Fare w.r.t passenger_count')
plt.figure(figsize=(25,25))
#_=sns.swarmplot(x="passenger_count",y="fare_amount",data=dataset_train)
#Jointplots for Bivariate Analysis.
#Here Scatter plot has regression line between 2 variables along with separate Bar plots of both variables.
#Also its annotated with pearson correlation coefficient and p value.
_=sns.jointplot(x="fare_amount",y="pickup_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
#plt.savefig("jointfplo.png")
plt.show()
_=sns.jointplot(x="fare_amount",y="pickup_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
# some violineplots to see spread d variable
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.violinplot(y="fare_amount",data=dataset_train)
plt.subplot(322)
_=sns.violinplot(y="pickup_longitude",data=dataset_train)
plt.subplot(323)
_ = sns.violinplot(y='pickup_latitude',data=dataset_train)
plt.subplot(324)
_ = sns.violinplot(y='dropoff_longitude',data=dataset_train)
plt.subplot(325)
_ = sns.violinplot(y='dropoff_latitude',data=dataset_train)
plt.savefig("violine.png")
plt.show()
#pairplot for all numeric varibale
_=sns.pairplot(dataset_train.loc[:,dataset_int],kind="scatter",dropna=True)
_.fig.suptitle("pairwise plot all numeric varibale")
#plt.savefig("pairwise.png")
plt.show()
#removing values which are not within the desired range outlier depanding upon basic understanding of dataset
#1.Fare amount has a negative value, which doesn't make sense. A price amount cannot be -ve and also cannot be 0. So we will remove these fields.
sum(dataset_train["fare_amount"]<1)
dataset_train[dataset_train["fare_amount"]<1]
dataset_train=dataset_train.drop(dataset_train[dataset_train["fare_amount"]<1].index,axis=0)
#dataset_train.loc[dataset_train["fare_amount"]<1,"fare_amount"]=np.nan
#2. passanger count varibale /// passanger count cound not increse more than 6
sum(dataset_train["passenger_count"]>6)
for i in range (4,11):
print("passanger_count_above"+ str(i)+ "={}".format(sum(dataset_train["passenger_count"]>i)))
# so 20 observations of passenger_count is consistenly above from 6,7,8,9,10 passenger_counts, let's check them.
dataset_train[dataset_train["passenger_count"]>6]
#Also we need to see if there are any passenger_count<1
dataset_train[dataset_train["passenger_count"]<1]
len(dataset_train[dataset_train["passenger_count"]<1])
dataset_test["passenger_count"].unique()
# We will remove 20 observation which are above 6 value because a cab cannot hold these number of passengers.
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]<1].index,axis=0)
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]>6].index,axis=0)
#dataset_train.loc[dataset_train["passenger_count"]<1,"passenger_count"]=np.nan
#dataset_train.loc[dataset_train["passenger_count"]>6,"passenger_count"]=np.nan
sum(dataset_train["passenger_count"]<1)
#3.Latitudes range from -90 to 90.Longitudes range from -180 to 180. Removing which does not satisfy these ranges
print("pickup_longitude above 180 ={}".format(sum(dataset_train["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_train["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_train["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_train["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_train['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_train['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_train['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_train['dropoff_latitude']>90)))
#for test data
print("pickup_longitude above 180 ={}".format(sum(dataset_test["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_test["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_test["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_test["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_test['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_test['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_test['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_test['dropoff_latitude']>90)))
#There's only one outlier which is in variable pickup_latitude.So we will remove it with nan.
#Also we will see if there are any values equal to 0.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_train[i]==0)))
#for test data
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_test[i]==0)))
#there are values which are equal to 0. we will remove them.
# There's only one outlier which is in variable pickup_latitude.So we will remove it with nan
dataset_train=dataset_train.drop(dataset_train[dataset_train["pickup_latitude"]>90].index,axis=0)
#there are values which are equal to 0. we will remove them.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
dataset_train=dataset_train.drop(dataset_train[dataset_train[i]==0].index,axis=0)
# for i in ['pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude']:
# train.loc[train[i]==0,i] = np.nan
# train.loc[train['pickup_latitude']>90,'pickup_latitude'] = np.nan
dataset_train.shape
#Missing Value Analysis
missing_value=dataset_train.isnull().sum()
missing_value = missing_value.reset_index()
missing_value = missing_value.rename(columns = {'index': 'Variables', 0: 'Missing_percentage'})
missing_value
#find out percentage of null value
missing_value['Missing_percentage'] = (missing_value['Missing_percentage']/len(dataset_train))*100
missing_value = missing_value.sort_values('Missing_percentage', ascending = False).reset_index(drop = True)
dataset_train.info()
dataset_train["fare_amount"]=dataset_train["fare_amount"].fillna(dataset_train["fare_amount"].median())
dataset_train["passenger_count"]=dataset_train["passenger_count"].fillna(dataset_train["passenger_count"].mode()[0])
dataset_train.isnull().sum()
dataset_train["passenger_count"]=dataset_train["passenger_count"].round().astype(object)
dataset_train["passenger_count"].unique()
#outliers analysis by box plot
plt.figure(figsize=(20,5))
plt.xlim(0,100)
sns.boxplot(x=dataset_train["fare_amount"],data=dataset_train,orient="h")
# sum(dataset_train['fare_amount']<22.5)/len(dataset_train['fare_amount'])*100
#Bivariate Boxplots: Boxplot for Numerical Variable Vs Categorical Variable.
plt.figure(figsize=(20,10))
plt.xlim(0,100)
_=sns.boxplot(x=dataset_train["fare_amount"],y=dataset_train["passenger_count"],data=dataset_train,orient="h")
def outlier_detect(df):
for i in df.describe().columns:
q1=df.describe().at["25%",i]
q3=df.describe().at["75%",i]
IQR=(q3-q1)
ltv=(q1-1.5*IQR)
utv=(q3+1.5*IQR)
x=np.array(df[i])
p=[]
for j in x:
if j<ltv:
p.append(ltv)
elif j>utv:
p.append(utv)
else:
p.append(j)
df[i]=p
return (df)
dataset_int1=outlier_detect(dataset_train.loc[:,dataset_int])
dataset_test_obj=["passenger_count"]
dataset_test_int=["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
dataset_test1=outlier_detect(dataset_test.loc[:,dataset_test_int])
dataset_test1=pd.concat([dataset_test1,dataset_test["passenger_count"]],axis=1)
dataset_test=pd.concat([dataset_test1,dataset_test["pickup_datetime"]],axis=1)
#determine corr
corr=dataset_int1.corr()
f,ax=plt.subplots(figsize=(7,5))
sns.heatmap(corr,mask=np.zeros_like(corr,dtype=np.bool),cmap=sns.diverging_palette(220,10,as_cmap=True),square=True,ax=ax)
# """feature engineering"""
#1.we will derive new features from pickup_datetime variable
#new features will be year,month,day_of_week,hour
dataset_train1=pd.concat([dataset_int1,dataset_train["passenger_count"]],axis=1)
dataset_train2=pd.concat([dataset_train1,dataset_train["pickup_datetime"]],axis=1)
#dataset_train2.isna().sum()
data=[dataset_train2,dataset_test]
for i in data:
i["year"]=i["pickup_datetime"].apply(lambda row:row.year)
i["month"]=i["pickup_datetime"].apply(lambda row:row.month)
i["day_of_week"] = i["pickup_datetime"].apply(lambda row: row.dayofweek)
i["hour"] = i["pickup_datetime"].apply(lambda row: row.hour)
# train2_nodummies=dataset_train2.copy()
# dataset_train2=train2_nodummies.copy()
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2["year"])
# plt.savefig('year.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['month'])
# plt.savefig('month.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['day_of_week'])
# plt.savefig('day_of_week.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['hour'])
# plt.savefig('hour.png')
plt.show
#Now we will use month,day_of_week,hour to derive new features like sessions in a day,seasons in a year,week:weekend/weekday
# for sessions in a day using hour columns
def f(x):
if(x>=5) and (x<=11):
return "morning"
elif (x>=12) and (x<=16):
return "afternoon"
elif (x>=17) and (x<=20):
return "evening"
elif (x>=21) and (x<=23):
return "night pm"
elif (x>=0) and (x<=4):
return "night am"
dataset_train2["sessions"]=dataset_train2["hour"].apply(f)
dataset_test['session'] = dataset_test['hour'].apply(f)
#for seasons in a year using month column
def g(x):
if (x>=3) and (x<=5):
return "spring"
elif (x>=6) and (x<=8):
return "summer"
elif (x>=9) and (x<=11):
return "fall"
else :
return "winter"
dataset_train2['seasons'] = dataset_train2['month'].apply(g)
dataset_test['seasons'] = dataset_test['month'].apply(g)
#for week / weekend in a day of week columns
def h(x):
if (x>=0) and (x<=4):
return "weekday"
elif (x>=5) and (x<=6):
return "weekend"
dataset_train2['week'] = dataset_train2['day_of_week'].apply(h)
dataset_test['week'] = dataset_test['day_of_week'].apply(h)
dataset_train2['passenger_count'].describe()
dataset_train2.isnull().sum()
dataset_test.isna().sum()
#creating dummy varibale
temp=pd.get_dummies(dataset_train2["passenger_count"],prefix="passenger_count")
dataset_train2=dataset_train2.join(temp)
temp = pd.get_dummies(dataset_test['passenger_count'], prefix = 'passenger_count')
dataset_test = dataset_test.join(temp)
temp = pd.get_dummies(dataset_test['seasons'], prefix = 'seasons')
dataset_test = dataset_test.join(temp)
temp=pd.get_dummies(dataset_train2["seasons"],prefix = "season" )
dataset_train2=pd.concat([dataset_train2,temp],axis=1)
temp = pd.get_dummies(dataset_train2['week'], prefix = 'week')
dataset_train2=pd.concat([dataset_train2,temp],axis=1)
temp = pd.get_dummies(dataset_test['week'], prefix = 'week')
dataset_test = dataset_test.join(temp)
temp = pd.get_dummies(dataset_train2['sessions'], prefix = 'sessions')
dataset_train2=pd.concat([dataset_train2,temp],axis=1)
temp = pd.get_dummies(dataset_test['session'], prefix = 'session')
dataset_test = dataset_test.join(temp)
temp = pd.get_dummies(dataset_train2['year'], prefix = 'year')
dataset_train2=pd.concat([dataset_train2,temp],axis=1)
temp = pd.get_dummies(dataset_test['year'], prefix = 'year')
dataset_test = dataset_test.join(temp)
#we will drop one column from each one-hot-encoded variables
dataset_train2.columns
dataset_test.columns
dataset_train2.info()
dataset_train2=dataset_train2.drop(['passenger_count_1.0','season_fall','week_weekday','sessions_afternoon','year_2009'],axis=1)
dataset_test=dataset_test.drop(['passenger_count_1','seasons_fall','week_weekday','session_afternoon','year_2009'],axis=1)
#3.Feature Engineering for latitude and longitude variable
#As we have latitude and longitude data for pickup and dropoff, we will find the distance the cab travelled from pickup and dropoff location.
#def haversine(coord1,coord2):
# data=[dataset_train2,dataset_test]
# for i in data:
# lon1,lat1=coord1
# lon2,lat2=coord2
# r=6371000 #randius of earth in meters
# phi_1=np.radians(i[lat1])
# phi_2=np.radians(i[lat2])
# delta_phi=np.radians(i[lat2]-i[lat1])
# delta_lambda=np.radians(i[lon2]-i[lon1])
# a=np.sin(delta_phi/2.0)**2+np.cos(phi_1)*np.cos(phi_2)*np.sin(delta_lambda/2.0)**2
# c=2*np.arctan2(np.sqrt(a),np.sqrt(1-a))
# meters=c*r #output distance in meter
# km=meters/1000.0
# miles=round(km,3)/1.609344
# i["distance"]=miles
# print(f"distance:{miles} miles")
# return miles
#
#haversine(['pickup_longitude','pickup_latitude'],['dropoff_longitude','dropoff_latitude'])
#As Vincenty is more accurate than haversine. Also vincenty is prefered for short distances.
#Therefore we will drop great_circle. we will drop them together with other variables which were used to feature engineer.
from geopy.distance import geodesic
from geopy.distance import great_circle
#from sklearn.externals import joblib
data=[dataset_train2,dataset_test]
for i in data:
i["great_circle"]=i.apply(lambda x : great_circle((x["pickup_latitude"],x["pickup_longitude"]),(x["dropoff_latitude"],x["dropoff_longitude"])).miles,axis=1)
i["geodesic"]=i.apply(lambda x: geodesic((x["pickup_latitude"],x["pickup_longitude"]),(x["dropoff_latitude"],x["dropoff_longitude"])).miles,axis=1)
#We will remove the variables which were used to feature engineer new variable
dataset_train2=dataset_train2.drop(['pickup_datetime','pickup_longitude', 'pickup_latitude',
'dropoff_longitude', 'dropoff_latitude', 'passenger_count', 'year',
'month', 'day_of_week', 'hour', 'sessions', 'seasons', 'week','great_circle'],axis=1)
dataset_test=dataset_test.drop(['pickup_datetime','pickup_longitude', 'pickup_latitude',
'dropoff_longitude', 'dropoff_latitude', 'passenger_count', 'year',
'month', 'day_of_week', 'hour', 'session', 'seasons', 'week','great_circle'],axis=1)
plt.figure(figsize=(20,5))
sns.boxplot(x=dataset_train2["geodesic"],data=dataset_train2,orient="h")
plt.figure(figsize=(20,5))
plt.xlim(0,100)
sns.boxplot(x=dataset_train2['geodesic'],data=dataset_train2,orient='h')
plt.title('Boxplot of geodesic ')
# plt.savefig('bp geodesic.png')
plt.show()
dataset_train2.isnull().sum()
#outlier in geodesic
def outlier_detect(df):
for i in df.describe().columns:
q1=df.describe().at["25%",i]
q3=df.describe().at["75%",i]
IQR=(q3-q1)
ltv=(q1-1.5*IQR)
utv=(q3+1.5*IQR)
x=np.array(df[i])
p=[]
for j in x:
if j<ltv:
p.append(ltv)
elif j>utv:
p.append(utv)
else:
p.append(j)
df[i]=p
return (df)
dataset_train11=pd.DataFrame(dataset_train2["geodesic"])
dataset_11=outlier_detect(dataset_train11)
dataset_train2=dataset_train2.drop(["geodesic"],axis=1)
dataset_train2=pd.concat([dataset_train2,dataset_11],axis=1)
dataset_train2.info()
#*****************************************************
#for test data
dataset_test1=pd.DataFrame(dataset_test["geodesic"])
dataset_test11=outlier_detect(dataset_test1)
dataset_test2=dataset_test.drop(["geodesic"],axis=1)
dataset_test=pd.concat([dataset_test2,dataset_test11],axis=1)
#**************************************************************
plt.boxplot(dataset_test["geodesic"])
dataset_train_num=["fare_amount","geodesic"]
dataset_train_obj=["passenger_count_2.0","passenger_count_3.0","passenger_count_4.0","passenger_count_5.0","passenger_count_6.0","season_spring","season_summer","season_winter","week_weekend","sessions_evening","sessions_morning","sessions_night am","sessions_night pm","year_2010","year_2011","year_2012","year_2013","year_2014","year_2015"]
len(dataset_train_obj)
dataset_train2[dataset_train_obj]=dataset_train2[dataset_train_obj].apply(lambda x: x.astype("category"))
dataset_test.info()
dataset_test_obj=["passenger_count_2","passenger_count_3","passenger_count_4","passenger_count_5","passenger_count_6","seasons_spring","seasons_summer","seasons_winter","week_weekend","session_evening","session_morning","session_night am","session_night pm","year_2010","year_2011","year_2012","year_2013","year_2014","year_2015"]
dataset_test[dataset_test_obj]=dataset_test[dataset_test_obj].apply(lambda x: x.astype("category"))
dataset_test.columns
#correlation
plt.figure(figsize=(15,15))
_=sns.heatmap(dataset_train2[dataset_train_num].corr(),square=True,cmap="RdYlGn",linewidth=0.5,linecolor="w",annot=True)
plt.savefig('correlation.png')# plt.savefig('correlation.png')
#As we can see from above correlation plot fare_amount and geodesic is correlated to each other.
#Jointplots for Bivariate Analysis.
#Here Scatter plot has regression line between 2 variables along with separate Bar plots of both variables.
#Also its annotated with pearson correlation coefficient and p value.
_=sns.jointplot(x="fare_amount",y="geodesic",data=dataset_train2,kind="reg")
_.annotate(stats.pearsonr)
plt.savefig('jointct.png')
#Chi-square test of Independence for Categorical Variables/Features
#Hypothesis testing :
#Null Hypothesis: 2 variables are independent.
#Alternate Hypothesis: 2 variables are not independent.
#If p-value is less than 0.05 then we reject the null hypothesis saying that 2 variables are dependent.
#And if p-value is greater than 0.05 then we accept the null hypothesis saying that 2 variables are independent.
#There should be no dependencies between Independent variables.
#So we will remove that variable whose p-value with other variable is low than 0.05.
#And we will keep that variable whose p-value with other variable is high than 0.05
#loop for chi2 test
for i in dataset_train_obj:
for j in dataset_train_obj:
if (i!=j):
chi2,p,dof,ex=chi2_contingency(pd.crosstab(dataset_train2[i],dataset_train2[j]))
if(p<0.05):
print(i, "and ",j ,"are depandent to eath other",p,"---remove")
else:
print(i,"and",j,"are indepandent",p,"----keep")
#Analysis of Variance(Anova) Test
#it is carried out to comapre between each group in a categorical varibale
#ANOVA only lets us know the means for different groups are same or not. It doesn’t help us identify which mean is different.
#Hypothesis testing :
#Null Hypothesis: mean of all categories in a variable are same.
#Alternate Hypothesis: mean of at least one category in a variable is different.
#If p-value is less than 0.05 then we reject the null hypothesis.
#And if p-value is greater than 0.05 then we accept the null hypothesis.
from statsmodels.formula.api import ols
dataset_train2.columns=["fare_amount","passenger_count_2","passenger_count_3","passenger_count_4","passenger_count_5","passenger_count_6","seasons_spring","seasons_summer","seasons_winter","week_weekend","session_evening","session_morning","session_night_am","session_night_pm","year_2010","year_2011","year_2012","year_2013","year_2014","year_2015","geodesic"]
import statsmodels.api as sm
model=ols("fare_amount ~ C(passenger_count_2)+C(passenger_count_3)+C(passenger_count_4)+C(passenger_count_5)+C(passenger_count_6)+C(seasons_spring)+C(seasons_summer)+C(seasons_winter)+C(week_weekend)+C(session_evening)+C(session_morning)+C(session_night_am)+C(session_night_pm)+C(year_2010)+C(year_2011)+C(year_2012)+C(year_2013)+C(year_2014)+C(year_2015)",data=dataset_train2).fit()
anova_table=sm.stats.anova_lm(model)
#Multicollinearity Test
#VIF is always greater or equal to 1.
#if VIF is 1 --- Not correlated to any of the variables.
#if VIF is between 1-5 --- Moderately correlated.
#if VIF is above 5 --- Highly correlated.
#If there are multiple variables with VIF greater than 5, only remove the variable with the highest VIF.
from patsy import dmatrices
from statsmodels.stats.outliers_influence import variance_inflation_factor
outcome,predictors=dmatrices("fare_amount ~ geodesic+passenger_count_2+passenger_count_3+passenger_count_4+passenger_count_5+passenger_count_6+seasons_spring+seasons_summer+seasons_winter+week_weekend+session_evening+session_morning+session_night_am+session_night_pm+year_2010+year_2011+year_2012+year_2013+year_2014+year_2015",dataset_train2,return_type="dataframe")
vif= | pd.DataFrame() | pandas.DataFrame |
from sympy import *
import pandas as pd
from random import random
def random_optimization(xl, xu, n, function):
x = Symbol('x')
f = parse_expr(function)
iteration = 0
data = pd.DataFrame(columns=['iteration','xl','xu','x','f(x)','max_x','max_f(x)'])
max_f = -1E9
for i in range(n):
r = random()
x0 = xl + (xu - xl)*r
df = f.diff(x)
fx = f.subs(x, x0)
dfx = df.subs(x, x0)
if fx > max_f:
max_f = fx
max_x = x0
data = data.append(pd.DataFrame({'iteration':[iteration], 'xl':[xl], 'xu':[xu], 'x':[x0], 'f(x)':[fx], 'max_x':[max_x], 'max_f(x)':[max_f], 'error':[dfx]}), ignore_index = True)
iteration += 1
return data
def multivariable_random_optimization(xl, xu, yl, yu, n, function):
x = Symbol('x')
y = Symbol('y')
f = parse_expr(function)
iteration = 0
data = pd.DataFrame(columns=['iteration','xl','xu','x','yl','yu','y','f(x,y)','max_x','max_y','max_f(x,y)'])
max_f = -1E9
for i in range(n):
r = random()
x0 = xl + (xu - xl)*r
r = random()
y0 = yl + (yu - yl)*r
df = f.diff(x)
fx = f.subs(x, x0)
fxy = fx.subs(y, y0)
if fxy > max_f:
max_f = fxy
max_x = x0
max_y = y0
data = data.append( | pd.DataFrame({'iteration':[iteration], 'xl':[xl], 'xu':[xu], 'x':[x0], 'yl':[yl], 'yu':[yu], 'y':[y0], 'f(x,y)':[fxy], 'max_x':[max_x], 'max_y':[max_y], 'max_f(x,y)':[max_f]}) | pandas.DataFrame |
# <NAME>
# 5/12/20
import pandas as pd
def save_files(outputfolder, merged):
"""
:param outputfolder: The folder where all of the merged files will be saved
:param merged: The merged dictionaries
:return: None
"""
keys = list(merged.keys())
for i in range(len(keys)):
df = | pd.DataFrame(merged[keys[i]]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
import pytest
from pandas.compat import PY3, StringIO, u
from pandas.errors import ParserError
from pandas import DataFrame
import pandas.util.testing as tm
class QuotingTests(object):
def test_bad_quote_char(self):
data = '1,2,3'
# Python 2.x: "...must be an 1-character..."
# Python 3.x: "...must be a 1-character..."
msg = '"quotechar" must be a(n)? 1-character string'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar='foo')
msg = 'quotechar must be set if quoting enabled'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
msg = '"quotechar" must be string, not int'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar=2)
def test_bad_quoting(self):
data = '1,2,3'
msg = '"quoting" must be an integer'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quoting='foo')
# quoting must in the range [0, 3]
msg = 'bad "quoting" value'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quoting=5)
def test_quote_char_basic(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
def test_quote_char_various(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
quote_chars = ['~', '*', '%', '$', '@', 'P']
for quote_char in quote_chars:
new_data = data.replace('"', quote_char)
result = self.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
def test_null_quote_char(self):
data = 'a,b,c\n1,2,3'
# sanity checks
msg = 'quotechar must be set if quoting enabled'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_MINIMAL)
# no errors should be raised if quoting is None
expected = DataFrame([[1, 2, 3]],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
def test_quoting_various(self):
data = '1,2,"foo"'
cols = ['a', 'b', 'c']
# QUOTE_MINIMAL and QUOTE_ALL apply only to
# the CSV writer, so they should have no
# special effect for the CSV reader
expected = DataFrame([[1, 2, 'foo']], columns=cols)
# test default (afterwards, arguments are all explicit)
result = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_MINIMAL, names=cols)
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = Series(dti)
tds = Series(tdi)
res = dts._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1._append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts3 = Series(dti3)
res = dts1._append(dts3)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.Period("2012-02-01", freq="D"),
],
dtype=object,
)
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_mixed_dt_to_object(self):
# GH 13221
# different datetimelike
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
],
dtype=object,
)
res = pi1.append(tdi)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = ps1._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# inverse
exp = Index(
[
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
],
dtype=object,
)
res = tdi.append(pi1)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = tds._append(ps1)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([tds, ps1])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concat_categorical(self):
# GH 13524
# same categories -> category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2], dtype="category")
exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# partially different categories => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1], dtype="category")
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# completely different categories (same dtype) => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series([np.nan, 1, 3, 2], dtype="category")
exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
def test_union_categorical_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19096
a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
result = pd.concat([a, b], ignore_index=True)
expected = Series(
Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"])
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_coercion(self):
# GH 13524
# category + not-category => not-category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2])
exp = Series([1, 2, np.nan, 2, 1, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# result shouldn't be affected by 1st elem dtype
exp = Series([2, 1, 2, 1, 2, np.nan], dtype=np.float64)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# all values are not in category => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1])
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series([2, 1, 3, 2])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# completely different categories => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series([1, 3, 2])
exp = Series([10, 11, np.nan, 1, 3, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series([1, 3, 2, 10, 11, np.nan], dtype=np.float64)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# different dtype => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series(["a", "b", "c"])
exp = Series([10, 11, np.nan, "a", "b", "c"])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series(["a", "b", "c", 10, 11, np.nan])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# if normal series only contains NaN-likes => not-category
s1 = Series([10, 11], dtype="category")
s2 = Series([np.nan, np.nan, np.nan])
exp = Series([10, 11, np.nan, np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series([np.nan, np.nan, np.nan, 10, 11])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
def test_concat_categorical_3elem_coercion(self):
# GH 13524
# mixed dtypes => not-category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2], dtype="category")
s3 = Series([1, 2, 1, 2, np.nan])
exp = Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float")
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
exp = Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float")
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
s1 = Series([4, 5, 6], dtype="category")
s2 = Series([1, 2, 3], dtype="category")
s3 = Series([1, 3, 4])
exp = Series([4, 5, 6, 1, 2, 3, 1, 3, 4])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
exp = Series([1, 3, 4, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
s1 = Series([4, 5, 6], dtype="category")
s2 = Series([1, 2, 3], dtype="category")
s3 = Series([10, 11, 12])
exp = Series([4, 5, 6, 1, 2, 3, 10, 11, 12])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
exp = Series([10, 11, 12, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
def test_concat_categorical_multi_coercion(self):
# GH 13524
s1 = Series([1, 3], dtype="category")
s2 = Series([3, 4], dtype="category")
s3 = Series([2, 3])
s4 = Series([2, 2], dtype="category")
s5 = Series([1, np.nan])
s6 = Series([1, 3, 2], dtype="category")
# mixed dtype, values are all in categories => not-category
exp = Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2])
res = pd.concat([s1, s2, s3, s4, s5, s6], ignore_index=True)
tm.assert_series_equal(res, exp)
res = s1._append([s2, s3, s4, s5, s6], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3])
res = pd.concat([s6, s5, s4, s3, s2, s1], ignore_index=True)
tm.assert_series_equal(res, exp)
res = s6._append([s5, s4, s3, s2, s1], ignore_index=True)
tm.assert_series_equal(res, exp)
def test_concat_categorical_ordered(self):
# GH 13524
s1 = Series(Categorical([1, 2, np.nan], ordered=True))
s2 = Series(Categorical([2, 1, 2], ordered=True))
exp = Series(Categorical([1, 2, np.nan, 2, 1, 2], ordered=True))
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series(Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True))
tm.assert_series_equal(pd.concat([s1, s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s1._append([s2, s1], ignore_index=True), exp)
def test_concat_categorical_coercion_nan(self):
# GH 13524
# some edge cases
# category + not-category => not category
s1 = Series(np.array([np.nan, np.nan], dtype=np.float64), dtype="category")
s2 = Series([np.nan, 1])
exp = Series([np.nan, np.nan, np.nan, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
s1 = Series([1, np.nan], dtype="category")
s2 = Series([np.nan, np.nan])
exp = Series([1, np.nan, np.nan, np.nan], dtype="float")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# mixed dtype, all nan-likes => not-category
s1 = Series([np.nan, np.nan], dtype="category")
s2 = Series([np.nan, np.nan])
exp = Series([np.nan, np.nan, np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# all category nan-likes => category
s1 = Series([np.nan, np.nan], dtype="category")
s2 = | Series([np.nan, np.nan], dtype="category") | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.