prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
__author__ = 'lucabasa'
__version__ = '1.1.0'
__status__ = 'development'
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, NuSVC
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import VarianceThreshold
import utility as ut
def train_all(df_train, df_test, n_folds, pca=False):
train = df_train.copy()
test = df_test.copy()
oof_svc = np.zeros(len(train))
oof_nusvc = np.zeros(len(train))
oof_logit = np.zeros(len(train))
oof_knn = np.zeros(len(train))
oof_qda = np.zeros(len(train))
preds_svc = np.zeros(len(test))
preds_nusvc = np.zeros(len(test))
preds_logit = np.zeros(len(test))
preds_knn = np.zeros(len(test))
preds_qda = np.zeros(len(test))
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i].copy()
test2 = test[test['wheezy-copper-turtle-magic']==i].copy()
if len(train2) == 0:
continue
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
if pca:
data = pd.concat([ | pd.DataFrame(train2[cols]) | pandas.DataFrame |
import dash
import numpy as np
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import dash_table
from app import app
import plotly.graph_objs as go
import json, codecs
from scipy.integrate import simps
import pandas as pd
from functions import vpp_solve
@app.callback(Output('dimension-boa', 'children'), [Input('bwl-new', 'value')])
def dimensionshull(bwlnew):
boa = np.float(bwlnew)*1.3
return html.Div([
dbc.Label("Beam [m]"), html.Br(),
dbc.Input(type='text', id='boa', bs_size="sm", value=format(round(boa,2)), className='boxinput'),
])
@app.callback(Output('dimensions-sail', 'children'),
[Input('sailset', 'value'), Input('disp-new', 'value'), Input('lwl-new', 'value')])
def dimensionssail(sailset, disp, lwl):
lwl = np.float(lwl)
ar = 2.95
sa = 18*np.float(disp)**(2/3)
sm = sa*0.6
p = (2*sm*ar)**0.5
e=2*sm/p
if sailset == "1" or sailset == "3":
i=p*0.9
j=2*(sa-sm)/i
elif sailset == "2" or sailset == "4":
i=0
j=0
lpg = 1.5*j
spl = 1.1*p
mastheight = 1.2+np.float(p)+0.2
mastpos = lwl*0.6
return html.Div([
dbc.Label("Mainsail hoist - P [m]"), html.Br(),
dbc.Input(type='text', id='psail', bs_size="sm", value=format(round(p,2)), className='boxinput'), html.Br(),
dbc.Label("Mainsail foot - E [m]"), html.Br(),
dbc.Input(type='text', id='esail', bs_size="sm", value=format(round(e,2)), className='boxinput'), html.Br(),
dbc.Label("Jib height - I [m]"), html.Br(),
dbc.Input(type='text', id='isail', bs_size="sm", value=format(round(i,2)), className='boxinput'), html.Br(),
dbc.Label("Jib base - J [m]"), html.Br(),
dbc.Input(type='text', id='jsail', bs_size="sm", value=format(round(j,2)), className='boxinput'), html.Br(),
dbc.Label("Perpendicular of longest jib [m]"), html.Br(),
dbc.Input(type='text', id='lpg', bs_size="sm", value=format(round(lpg,2)), className='boxinput'), html.Br(),
dbc.Label("Spinnaker leech length [m]"), html.Br(),
dbc.Input(type='text', id='spl', bs_size="sm", value=format(round(spl,2)), className='boxinput'), html.Br(),
dbc.Label("Mast average diameter [m]"), html.Br(),
dbc.Input(type='text', id='mast-diameter', bs_size="sm", value=format(round(0.2,2)), className='boxinput'), html.Br(),
dbc.Label("Height of main boom above sheer [m]"), html.Br(),
dbc.Input(type='text', id='boom-height', bs_size="sm", value=format(round(1.2,2)), className='boxinput'), html.Br(),
dbc.Label("Mast height above sheerline [m]"), html.Br(),
dbc.Input(type='text', id='mast-height', bs_size="sm", value=format(round(mastheight,2)), className='boxinput'), html.Br(),
dbc.Label("Mast longitudinal position [m]"), html.Br(),
dbc.Input(type='text', id='mastpos', bs_size="sm", value=format(round(mastpos,2)), className='boxinput'), html.Br(),
])
@app.callback(Output('dimensions-rudder', 'children'),
[Input('lwl-new', 'value'), Input('disp-new', 'value')])
def dimensionsrudder(lwl, disp):
lwl = np.float(lwl)
t =lwl/(0.19*lwl+4.0533)
spanrudder = t*0.8
sa = 18*np.float(disp)**(2/3)
surfacerudder = 0.01*sa
tiprudder=0.7*surfacerudder/spanrudder
rootrudder=1.1*surfacerudder/spanrudder
return html.Div([
dbc.Label("Root Chord"), html.Br(),
dbc.Input(type='text', id='rootchord-rudder', bs_size="sm", value=format(round(rootrudder,2)), className='boxinput'), html.Br(),
dbc.Label("Tip chord"), html.Br(),
dbc.Input(type='text', id='tipchord-rudder', bs_size="sm", value=format(round(tiprudder,2)), className='boxinput'), html.Br(),
dbc.Label("Span"), html.Br(),
dbc.Input(type='text', id='span-rudder', bs_size="sm", value=format(round(spanrudder,2)), className='boxinput'), html.Br(),
dbc.Label("Sweep angle [degrees]"), html.Br(),
dbc.Input(type='text', id='sweep-rudder', bs_size="sm", value=format(round(15,2)), className='boxinput'), html.Br(),
dbc.Label("Height above or below waterline"), html.Br(),
dbc.Input(type='text', id='heightsurface-rudder', bs_size="sm", value=format(round(-0.05,2)), className='boxinput'), html.Br(),
dbc.Label("Root Centerline"), html.Br(),
dbc.Input(type='text', id='pos-rudder', bs_size="sm", value=format(round(1,2)), className='boxinput'), html.Br(),
dbc.Label("Root Chord Thickness"), html.Br(),
dbc.Input(type='text', id='rootchord-rudder-tcks', bs_size="sm", value=format(round(0.175,2)), className='boxinput'), html.Br(),
dbc.Label("Tip Chord Thickness"), html.Br(),
dbc.Input(type='text', id='tipchord-rudder-tcks', bs_size="sm", value=format(round(0.105,2)), className='boxinput'), html.Br(),
])
@app.callback(Output('dimensions-keel', 'children'),
[Input('lwl-new', 'value'), Input('tc-new', 'value'), Input('disp-new', 'value')])
def dimensionskeel(lwl, tc, disp):
lwl = np.float(lwl)
cekeel = lwl/2
lwl = np.float(lwl)
t =lwl/(0.19*lwl+4.0533)
spankeel = t-np.float(tc)
sa = 18*np.float(disp)**(2/3)
surfacekeel = 0.03*sa
tipkeel=0.8*surfacekeel/spankeel
rootkeel=1.2*surfacekeel/spankeel
return html.Div([
dbc.Label("Root Chord"), html.Br(),
dbc.Input(type='text', id='rootchord-keel', bs_size="sm", value=format(round(rootkeel,2)), className='boxinput'), html.Br(),
dbc.Label("Tip Chord"), html.Br(),
dbc.Input(type='text', id='tipchord-keel', bs_size="sm", value=format(round(tipkeel,2)), className='boxinput'), html.Br(),
dbc.Label("Span"), html.Br(),
dbc.Input(type='text', id='span-keel', bs_size="sm", value=format(round(spankeel,2)), className='boxinput'), html.Br(),
dbc.Label("Sweep angle [degrees]"), html.Br(),
dbc.Input(type='text', id='sweep-keel', bs_size="sm", value=format(round(35,2)), className='boxinput'), html.Br(),
dbc.Label("Root Centerline"), html.Br(),
dbc.Input(type='text', id='pos-keel', bs_size="sm", value=format(round(cekeel,2)), className='boxinput'), html.Br(),
dbc.Label("Root Chord Thickness"), html.Br(),
dbc.Input(type='text', id='rootchord-keel-tcks', bs_size="sm", value=format(round(0.175,2)), className='boxinput'), html.Br(),
dbc.Label("Tip Chord Thickness"), html.Br(),
dbc.Input(type='text', id='tipchord-keel-tcks', bs_size="sm", value=format(round(0.1,2)), className='boxinput'), html.Br(),
])
@app.callback(Output('dimensions-mizzen', 'children'),
[Input('mzn-check', 'value')])
def mzncheck(mzncheck):
if mzncheck == '0':
return html.Div([
dbc.Label("Mizzen Hoist"),
dbc.Input(type='text', id='pmz', bs_size="sm", value=0),
dbc.Label("Mizzen Foot"),
dbc.Input(type='text', id='emz', bs_size="sm", value=0),
dbc.Label("Boom height"),
dbc.Input(type='text', id='badmz', bs_size="sm", value=0)
])
if mzncheck == '1':
return html.Div([
dbc.Label("Mizzen Hoist"), html.Br(),
dbc.Input(type='text', id='pmz', bs_size="sm", value=format(round(4,2)), className='boxinput'), html.Br(),
dbc.Label("Mizzen Foot"), html.Br(),
dbc.Input(type='text', id='emz', bs_size="sm", value=format(round(2,2)), className='boxinput'), html.Br(),
dbc.Label("Boom height"), html.Br(),
dbc.Input(type='text', id='badmz', bs_size="sm", value=format(round(1,2)), className='boxinput'), html.Br(),
])
@app.callback(Output('dimensions-bulbo', 'children'),
[Input('bulbo-check', 'value')])
def bulbocheck(bulbocheck):
if bulbocheck == '0':
return html.Div([
dbc.Label("Keel bulbous length"),
dbc.Input(type='text', id='lbk', bs_size="sm", value=0),
dbc.Label("Keel bulbous lateral area"),
dbc.Input(type='text', id='abk', bs_size="sm", value=0),
dbc.Label("Keel bulbous wetted area"),
dbc.Input(type='text', id='sbk', bs_size="sm", value=0)
])
if bulbocheck == '1':
return html.Div([
dbc.Label("Keel bulbous length"), html.Br(),
dbc.Input(type='text', id='lbk', bs_size="sm", value=format(round(1,2)), className='boxinput'), html.Br(),
dbc.Label("Keel bulbous lateral area"), html.Br(),
dbc.Input(type='text', id='abk', bs_size="sm", value=format(round(1,2)), className='boxinput'), html.Br(),
dbc.Label("Keel bulbous wetted area"), html.Br(),
dbc.Input(type='text', id='sbk', bs_size="sm", value=format(round(1,2)), className='boxinput'), html.Br(),
])
@app.callback(Output('dimension-loa', 'children'), [Input('lwl-new', 'value'), Input('overhang', 'value'), Input('bowangle', 'value'), Input('freeboard', 'value'), Input('disp-new', 'value'), Input('ballast-ratio', 'value'), Input('tc-new', 'value'), Input('bwl-new', 'value')])
def dimensionloa(lwl, overhang, bowangle, freeboard, disp, br, tc, bwl):
loa = np.float(lwl)+np.float(overhang)+np.tan(np.radians(np.float(bowangle)))*np.float(freeboard)
loaft = loa/0.3048
boa = np.float(bwl)*1.1
dispmass = np.float(disp)*1025
br = np.float(br)/100
ssv = boa**2/(br*np.float(tc)*np.float(disp)**(1/3))
avs = 110+(400/(ssv-10))
cs = boa*3.28084/(dispmass*2.20462/64)**(1/3)
cr = np.float(disp)*1025*2.20462/((boa*3.28084)**(4/3)*0.65*(0.7*np.float(lwl)*3.28084+0.3*loa*3.28084))
data = {'Parameters' : ['Angle of Vanishing Stability', 'Capsize Screening Factor', 'Comfort Ratio'], 'Values' : [round(avs,2), round(cs,2), round(cr,2)], 'Recommendation' : ['> 110', '< 2', '> 30'], 'Unit' : ['degrees', '-', '-']}
df = pd.DataFrame(data)
return html.Div([
dbc.Row(
dbc.Col([
dbc.Label("The overall lenght of the vessel is {} feet, equivalent to {} meters.".format(np.round(loaft,0), np.round(loa,2))),
html.Br(), html.Br(),
dash_table.DataTable(
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict("rows"),
style_cell={'textAlign': 'center', 'minWidth': '0px', 'maxWidth': '150px', 'whiteSpace': 'normal', 'font_family': 'Source Sans Pro', 'font-size': '10pt',},
style_cell_conditional=[{'if': {'column_id': 'Parameters'}, 'textAlign': 'left'}],
style_as_list_view=True,
style_header={'fontWeight': 'bold'},
)
]),
),
]),
@app.callback(Output('dimension-area', 'children'), [Input('rootchord-rudder', 'value'), Input('tipchord-rudder', 'value'), Input('span-rudder', 'value'), Input('rootchord-keel', 'value'), Input('tipchord-keel', 'value'), Input('span-keel', 'value'), Input('psail', 'value'), Input('esail', 'value'), Input('isail', 'value'), Input('jsail', 'value')])
def dimensionloa(rootrudder, tiprudder, spanrudder, rootkeel, tipkeel, spankeel, psail, esail, isail, jsail):
arearudder = np.float(spanrudder)*(np.float(rootrudder)+np.float(tiprudder))/2
areakeel = np.float(spankeel)*(np.float(rootkeel)+np.float(tipkeel))/2
areasail = np.float(psail)*np.float(esail)/2+np.float(isail)*np.float(jsail)/2
data = {'Dimensions' : ['Rudder Area', 'Keel Area', 'Sail Area'], 'Values' : [round(arearudder,2), round(areakeel,2), round(areasail,2)], 'Unit' : ['m2', 'm2', 'm2']}
df = | pd.DataFrame(data) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
"""
for class_archivo in archivos:
f = open(os.path.abspath(os.path.join(path,class_archivo)),'r')
lineas = f.read().splitlines()
#print(lineas,"\n")
f.close()
"""
def leer_predicciones(archivos):
lista_archivos = []
for file_archivos in archivos:
data = pd.read_csv(os.path.abspath(os.path.join(path,file_archivos)), sep=" ", header=None)
data.columns = ["imagen", "prob", "xmin_pred", "ymin_pred", "xmax_pred", "ymax_pred"]
lista_archivos.append(data)
return lista_archivos
def read_data_cfg(datacfg):
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(datacfg, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
key,value = line.split('=')
key = key.strip()
value = value.strip()
options[key] = value
return options
def leer_target_cord(lineas_archivo_valid):
target = []
for linea in lineas_archivo_valid:
linea = linea.replace("imagenes","labels").replace(".jpg",".txt")
nombre = os.path.basename(linea).split('.')[0]
data_nombre = pd.DataFrame(columns=['imagen'])
data_nombre.loc[0] = [nombre]
data = pd.read_csv(linea, sep=" ", header=None)
data = pd.concat([data_nombre,data],axis=1, ignore_index=True)
data.columns = ["imagen","class", "xmin", "ymin", "xmax", "ymax"]
target.append(data)
return pd.concat(target)
def IOU(df):
copy = df.copy()
df_iou = pd.DataFrame(columns=['imagen','iou'])
idx = 0
for index, row in df.iterrows():
xmin_inter = max(row["xmin"], row["xmin_pred"])
ymin_inter = max(row["ymin"], row["ymin_pred"])
xmax_inter = min(row["xmax"], row["xmax_pred"])
ymax_inter = min(row["ymax"], row["ymax_pred"])
# Calculo de area de intersecion de rectangulos
inter_area = max(0, xmax_inter - xmin_inter + 1) * max(0, ymax_inter - ymin_inter + 1)
# Calculo de area objetivo y area de prediccion
actual_area = (row["xmax"] - row["xmin"] + 1) * (row["ymax"]- row["ymin"] + 1)
pred_area = (row["xmax_pred"] - row["xmin_pred"] + 1) * (row["ymax_pred"] - row["ymin_pred"] + 1)
# Calculo interseccion sobre union
iou = inter_area / float(actual_area + pred_area - inter_area)
df_iou.loc[idx] = [row["imagen"], iou]
idx+=1
merge = | pd.merge(df, df_iou, on='imagen') | pandas.merge |
#!/usr/bin/python
# -*-coding: utf-8 -*-
# Author: <NAME>
# Email : <EMAIL>
# A set of convenience functions used for producing plots in `dabest`.
from .misc_tools import merge_two_dicts
def halfviolin(v, half='right', fill_color='k', alpha=1,
line_color='k', line_width=0):
import numpy as np
for b in v['bodies']:
V = b.get_paths()[0].vertices
mean_vertical = np.mean(V[:, 0])
mean_horizontal = np.mean(V[:, 1])
if half == 'right':
V[:, 0] = np.clip(V[:, 0], mean_vertical, np.inf)
elif half == 'left':
V[:, 0] = np.clip(V[:, 0], -np.inf, mean_vertical)
elif half == 'bottom':
V[:, 1] = np.clip(V[:, 1], -np.inf, mean_horizontal)
elif half == 'top':
V[:, 1] = np.clip(V[:, 1], mean_horizontal, np.inf)
b.set_color(fill_color)
b.set_alpha(alpha)
b.set_edgecolor(line_color)
b.set_linewidth(line_width)
# def align_yaxis(ax1, v1, ax2, v2):
# """adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
# # Taken from
# # http://stackoverflow.com/questions/7630778/
# # matplotlib-align-origin-of-right-axis-with-specific-left-axis-value
# _, y1 = ax1.transData.transform((0, v1))
# _, y2 = ax2.transData.transform((0, v2))
# inv = ax2.transData.inverted()
# _, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
# miny, maxy = ax2.get_ylim()
# ax2.set_ylim(miny+dy, maxy+dy)
#
#
#
# def rotate_ticks(axes, angle=45, alignment='right'):
# for tick in axes.get_xticklabels():
# tick.set_rotation(angle)
# tick.set_horizontalalignment(alignment)
def get_swarm_spans(coll):
"""
Given a matplotlib Collection, will obtain the x and y spans
for the collection. Will return None if this fails.
"""
import numpy as np
x, y = np.array(coll.get_offsets()).T
try:
return x.min(), x.max(), y.min(), y.max()
except ValueError:
return None
def gapped_lines(data, x, y, type='mean_sd', offset=0.2, ax=None,
line_color="black", gap_width_percent=1,
**kwargs):
'''
Convenience function to plot the standard devations as vertical
errorbars. The mean is a gap defined by negative space.
This style is inspired by <NAME>'s redesign of the boxplot.
See The Visual Display of Quantitative Information (1983), pp.128-130.
Keywords
--------
data: pandas DataFrame.
This DataFrame should be in 'long' format.
x, y: string.
x and y columns to be plotted.
type: ['mean_sd', 'median_quartiles'], default 'mean_sd'
Plots the summary statistics for each group. If 'mean_sd', then the
mean and standard deviation of each group is plotted as a gapped line.
If 'median_quantiles', then the median and 25th and 75th percentiles of
each group is plotted instead.
offset: float (default 0.3) or iterable.
Give a single float (that will be used as the x-offset of all
gapped lines), or an iterable containing the list of x-offsets.
line_color: string (matplotlib color, default "black") or iterable of
matplotlib colors.
The color of the vertical line indicating the stadard deviations.
gap_width_percent: float, default 5
The width of the gap in the line (indicating the central measure),
expressed as a percentage of the y-span of the axes.
ax: matplotlib Axes object, default None
If a matplotlib Axes object is specified, the gapped lines will be
plotted in order on this axes. If None, the current axes (plt.gca())
is used.
kwargs: dict, default None
Dictionary with kwargs passed to matplotlib.lines.Line2D
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
if gap_width_percent < 0 or gap_width_percent > 100:
raise ValueError("`gap_width_percent` must be between 0 and 100.")
if ax is None:
ax = plt.gca()
ax_ylims = ax.get_ylim()
ax_yspan = np.abs(ax_ylims[1] - ax_ylims[0])
gap_width = ax_yspan * gap_width_percent/100
keys = kwargs.keys()
if 'clip_on' not in keys:
kwargs['clip_on'] = False
if 'zorder' not in keys:
kwargs['zorder'] = 5
if 'lw' not in keys:
kwargs['lw'] = 2.
# # Grab the order in which the groups appear.
# group_order = pd.unique(data[x])
# Grab the order in which the groups appear,
# depending on whether the x-column is categorical.
if isinstance(data[x].dtype, pd.CategoricalDtype):
group_order = pd.unique(data[x]).categories
else:
group_order = | pd.unique(data[x]) | pandas.unique |
import pandas as pd
from pathlib import Path
import numpy as np
def getTeams(teamColumn ,gameLogs):
teams = {}
for team in gameLogs[teamColumn].unique():
teams[team] = gameLogs[gameLogs[teamColumn]==team]
return teams
def getWinRatio(teamType, team, window=10):
if teamType=="Home":
return team.loc[:,'Home team win'].rolling(window).mean().shift(1)
else:
return (1-team.loc[:,'Home team win']).rolling(window).mean().shift(1)
def getScoreRatio(teamType, team, window=10):
if teamType=="Home":
return team.loc[:,'Home score'].rolling(window).mean().shift(1)
else:
return team.loc[:,'Visiting score'].rolling(window).mean().shift(1)
def getOddRatio(teamType, team, window=10):
if teamType=="Home":
return team.loc[:,'Home team odd'].rolling(window).mean().shift(1)
else:
return (1/team.loc[:,'Home team odd']).rolling(window).mean().shift(1)
path = Path
gameLogs = | pd.read_csv(path+r'\Filtered\_mlb_filtered_GameLogs.csv', index_col=False) | pandas.read_csv |
import os
os.chdir("D:/George/Projects/PaperTrends/src")
import tweepy
from tqdm import tqdm
import pandas as pd
import sys
disableTQDM = False
class TwitterParser():
def __init__(self, user='arxivtrends'):
print("> Twitter Parser initialized")
keys = self._readAPIKeys("env.json", user)
auth = tweepy.OAuthHandler(keys['consumer_key'], keys['consumer_secret'])
auth.set_access_token(keys['access_token'], keys['access_token_secret'])
self.api = tweepy.API(auth)
self.df = None
self.debug = None
def parse(self, keyword="arxiv.org/", regex="\d\d\d\d\.[0-9A-z]*", feed='popular', n=1000):
print(f"> Parsing [{keyword}] keyword from [{feed}] feed for [{n}] tweets.")
public_tweets = tweepy.Cursor(self.api.search, q=keyword, result_type=feed, tweet_mode="extended").items(n)
self.tweets = []
total = 0
pbar = tqdm(public_tweets, disable=disableTQDM)
try:
for tweet in pbar:
pbar.set_description(f"Parsing tweet [❤️ :{tweet.favorite_count} ↪️ :{tweet.retweet_count}]")
total += 1
if tweet.favorite_count > 0 and tweet.retweet_count > 0:
self.tweets.append(tweet)
if 'retweeted_status' in tweet.__dict__.keys():
self.tweets.append(tweet.retweeted_status)
except Exception as e:
print(f"{sys.exc_info()[0]} ~ {str(e)}")
print(f">> Total tweets: {total}. Filtered tweets (❤️ >0 or ↪️ >0): {len(self.tweets)}")
print(f"> Generating dataframe from raw tweets for regex scheme /{regex}/")
import re
import urllib
dfList = []
pbar = tqdm(self.tweets, disable=disableTQDM)
for t in pbar:
try:
if t.entities['urls'] != []:
url = urllib.request.urlopen(t.entities['urls'][0]['url']).geturl()
else:
url = urllib.request.urlopen(t.retweeted_status.entities['urls'][0]['url']).geturl()
key = re.findall(re.compile(regex), url)
if key != []:
key = key[0]
else:
continue
pbar.set_description(f"Key found: [{key}]")
except urllib.error.HTTPError as e:
print(f"https://twitter.com/{t.user.screen_name}/status/{t.id} {sys.exc_info()[0]} ~ {str(e)}")
continue
except Exception as e:
print(str(e))
print(f"https://twitter.com/{t.user.screen_name}/status/{t.id} {sys.exc_info()[0]}")
self.debug = t
pbar.set_description(f"Key was not found. https://twitter.com/{t.user.screen_name}/status/{t.id}")
continue
key = key.split('v')[0]
dfList.append({
'key': 'A:'+key,
'id':t.id,
'user': t.user.screen_name,
'favorited': t.favorite_count,
'retweeted': t.retweet_count,
'created_at': t.created_at,
'time_delta': pd.datetime.now()-pd.to_datetime(t.created_at),
'url': url,
'text': self._cleanText(t.full_text),
})
if self.df is not None:
oldlen = len(self.df)
newDF = | pd.DataFrame(dfList, columns=['key', 'id', 'user', 'favorited', 'retweeted', 'created_at', 'url', 'text']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import collections
from datetime import datetime
from datetime import timedelta
import os
''' THIS CLASS HAS MULTIPLE FUNCTIONS FOR DATA LOADING AND STORING '''
class DataHandler(object):
''' This function splits the data in train/test/dev sets and slices it into "game collections" '''
def get_data_7d_3split(self, include_weekends, n_episodes, start_year, start_month, start_day):
n_episodes = n_episodes
total_game_count = 0
train_count = 0
dev_count = 0
test_count = 0
c = 0
start_date = datetime(start_year, start_month, start_day, 12, 0, 0)
end_date = start_date + timedelta(days=8) + timedelta(minutes=-1)
# Collection to save sliced data for each game
train_collection = {}
dev_collection = {}
test_collection = {}
full_collection = {}
''' Data is loaded from multiple csv files, since unfortunately there are some issues with certain columns in certain files '''
df = | pd.read_csv('data/data_prices_daycat_2.csv', sep=None, decimal='.', engine='python') | pandas.read_csv |
from src.config.logger import AppLogger
from scipy import stats
import pandas as pd
import numpy as np
class OutlierRemoval(AppLogger):
def __init__(self):
super(OutlierRemoval, self).__init__()
self.cur_file_path = self.get_working_file_location()(__file__)
def log_transformation(self, data: pd.Series()):
self.log(f"{self.cur_file_path}\t\tInfo: log_transformation method invoked!")
return np.log(data)
def reverse_log_transformation(self, data: | pd.Series() | pandas.Series |
import xarray as xr
import pandas as pd
import numpy as np
import cdsapi
import xarray as xr
from pathlib import Path
from typing import List
import logging
def find_nearest_datapoint(lat, lon, ds):
"""Find the point in the dataset closest to the given latitude and longitude"""
datapoint_lats = ds.coords.indexes["latitude"]
datapoint_lons = ds.coords.indexes["longitude"]
lat_nearest = min(datapoint_lats, key=lambda x: abs(x - lat))
lon_nearest = min(datapoint_lons, key=lambda x: abs(x - lon))
return lat_nearest, lon_nearest
def _download_era5_data_single_location(cds, era5_variables, lat, lon, year, file_path):
"""Download data for a specific lat/lon location for a range of years"""
# It does not seem to work to pick a single lat/lon. Need an area large enough
# that we pick up two points (or more). Since resolution is 0.25 deg, we use that
epsilon = 0.25
params = {
"product_type": "reanalysis",
"format": "grib",
"variable": era5_variables,
"date": f"{year}-01-01/{year}-12-31",
"time": [f"{x:02d}:00" for x in range(24)], # hours 00-23
"area": [lat + epsilon, lon, lat, lon + epsilon], # [north, west, south, east]
}
cds.retrieve("reanalysis-era5-single-levels", params, file_path)
def _download_era5_atm_grid(cds, file_path):
"""Download data for a single hour to get lat/lon coordinates"""
params = {
"product_type": "reanalysis",
"format": "grib",
"variable": "100m_u_component_of_wind", # arbitrary variable
"date": "2022-01-01", # arbitrary date
"time": "00:00", # arbitrary time
}
cds.retrieve("reanalysis-era5-single-levels", params, file_path)
def get_default_variable_list():
variables = [
"100m_u_component_of_wind", # u100
"100m_v_component_of_wind", # v100
"10m_u_component_of_wind", # "nice to have" for wind scaling?
"10m_v_component_of_wind",
"surface_pressure",
"surface_solar_radiation_downwards", # ssrd
"total_sky_direct_solar_radiation_at_surface", # fdir (diffuse = ssrd - fdir)
"2m_temperature",
"total_precipitation",
]
return variables
# TODO: Remove this - inefficient way of downloading data.
def INEFFICIENT_download_era5_data(
locations: pd.DataFrame, years: List, data_path: Path, replace_existing=False, era5_variables=None
):
"""Download selected ERA5 data to grib files (one for each location)"""
if era5_variables is None:
era5_variables = get_default_variable_list()
cds = cdsapi.Client()
# make path if it does not exist:
data_path.mkdir(parents=True, exist_ok=True)
# Get grid - download entire area for a single hour
era5_atm_grid_file_name = "era5_atm_grid.grib"
file_grid = data_path / era5_atm_grid_file_name
if (not Path(file_grid).is_file()) or replace_existing:
logging.info(f"Grid file: {file_grid}")
_download_era5_atm_grid(cds, file_grid)
ds_grid = xr.open_dataset(file_grid, engine="cfgrib")
# TODO - check what is more efficient
# Request single location, or area containing all locations in single request?
# Request all years, single year or single month per request?
# 2 locations (x4 poins) and 2 years -> Copernicus running time ca 40 min (+queing) (but because server is busy?)
for i in locations.index:
lat_req = locations.loc[i, "lat"]
lon_req = locations.loc[i, "lon"]
lat_data, lon_data = find_nearest_datapoint(lat_req, lon_req, ds_grid)
filename_part1 = f"era5data_lat={lat_data}_lon={lon_data}"
locations.loc[i, "datafile"] = filename_part1 # excluding the last part (eg. "_year=2021.grib")
for year in years:
filename = f"{filename_part1}_year={year}.grib"
file_data = data_path / filename
logging.info(f"{i}: Data file: {file_data}")
if (not Path(file_data).is_file()) or replace_existing:
logging.info(f"Downloading data to: {file_data}")
_download_era5_data_single_location(cds, era5_variables, lat_data, lon_data, year, file_data)
return locations
def _download_era5_data_single_month(cds, era5_variables, area, year, month, file_path):
params = {
"product_type": "reanalysis",
"format": "grib",
"variable": era5_variables,
"year": year,
"month": month,
"day": [x + 1 for x in range(31)], # days 1-31
"time": [f"{x:02d}:00" for x in range(24)], # hours 00-23
"area": area, # [north, west, south, east]
}
cds.retrieve("reanalysis-era5-single-levels", params, file_path)
def download_era5_data_area(
area: List, years: List, months, data_path: Path, replace_existing=False, era5_variables=None
):
"""Download selected ERA5 data to grib files (one for each location)"""
if era5_variables is None:
era5_variables = get_default_variable_list()
cds = cdsapi.Client()
# make path if it does not exist:
data_path.mkdir(parents=True, exist_ok=True)
for year in years:
for month in months:
file_data = data_path / f"era5data_month={year}-{month:02d}.grib"
logging.info(file_data)
if (not Path(file_data).is_file()) or replace_existing:
_download_era5_data_single_month(cds, era5_variables, area, year, month, file_data)
return
def dataarray_to_dataframe(da):
"""Change forecast time and steps to a single hourly time index"""
if da.step.size == 1:
df = da.to_pandas()
if da.step.size > 1:
df = pd.DataFrame(da.values, index=da.time.values, columns=da.step.values).stack()
ind2 = df.index.get_level_values(0) + df.index.get_level_values(1)
df.index = ind2
return df
def extract_solar_radiation_at_locations(file_pattern, locations):
"""Extract radiation data from era5 grib files for selected locations
Nearest datapoint is used
file_pattern : str
which files to read (include * to read many files)
locations : pandas.DataFrame
columns "lat" and "lon" give locations of panel
Returns : dictionary of pandas.DataFrames. The keys are the indices in the locations input dataframe
"""
ds_ssrd = xr.open_mfdataset(file_pattern, engine="cfgrib", backend_kwargs={"filter_by_keys": {"shortName": "ssrd"}})
ds_fdir = xr.open_mfdataset(file_pattern, engine="cfgrib", backend_kwargs={"filter_by_keys": {"shortName": "fdir"}})
data_dict = {}
for i, row in locations.iterrows():
logging.info(i)
lat = row["lat"]
lon = row["lon"]
data_lat, data_lon = find_nearest_datapoint(lat, lon, ds_ssrd)
da_ssrd = ds_ssrd.sel(latitude=data_lat, longitude=data_lon).ssrd
da_fdir = ds_fdir.sel(latitude=data_lat, longitude=data_lon).fdir
df_rad = pd.DataFrame()
df_rad["fdir"] = dataarray_to_dataframe(da_fdir)
df_rad["ssdr"] = dataarray_to_dataframe(da_ssrd)
df_rad["diffuse"] = df_rad["ssdr"] - df_rad["fdir"]
data_dict[i] = df_rad
return data_dict
def extract_wind_speed_at_locations(file_pattern, locations):
"""Extract wind speed data from era5 grib files for selected locations
Nearest datapoint is used
file_pattern : pathlib.Path
which files to read
locations : pandas.DataFrame
columns "lat" and "lon" give locations of wind power plant
Returns : dictionary of pandas.DataFrames. The keys are the indices in the locations input dataframe
"""
ds_u_wind = xr.open_mfdataset(
file_pattern, engine="cfgrib", backend_kwargs={"filter_by_keys": {"shortName": "100u"}}
)
ds_v_wind = xr.open_mfdataset(
file_pattern, engine="cfgrib", backend_kwargs={"filter_by_keys": {"shortName": "100v"}}
)
data_dict = {}
for i, row in locations.iterrows():
logging.info(i)
lat = row["lat"]
lon = row["lon"]
data_lat, data_lon = find_nearest_datapoint(lat, lon, ds_u_wind)
da_u100 = ds_u_wind.sel(latitude=data_lat, longitude=data_lon).u100
da_v100 = ds_v_wind.sel(latitude=data_lat, longitude=data_lon).v100
df = | pd.DataFrame() | pandas.DataFrame |
import functools
import unittest
from typing import Sequence, Optional
import numpy as np
import pandas as pd
from parameterized import parameterized
import torch
from scipy.special import expit
from torch_hlm.mixed_effects_model import MixedEffectsModel
from torch_hlm.simulate import simulate_raneffects
SEED = 2021 - 8 - 1
class TestTraining(unittest.TestCase):
@parameterized.expand([
('gaussian', [0, 0]),
('binary', [0, 0])
], skip_on_empty=True)
def test_training_multiple_gf(self,
response_type: str,
num_res: Sequence[int],
intercept: float = -1.,
noise: float = 1.0):
print("\n`test_training_multiple_gf()` with config `{}`".
format({k: v for k, v in locals().items() if k != 'self'}))
torch.manual_seed(SEED)
np.random.seed(SEED)
# SIMULATE DATA -----
df_train = []
df_raneff_true = []
for i, num_res_g in enumerate(num_res):
df_train_g, df_raneff_true_g = simulate_raneffects(
num_groups=40,
obs_per_group=1,
num_raneffects=num_res_g + 1
)
df_train.append(df_train_g.rename(columns={'y': f"g{i + 1}_y", 'group': f'g{i + 1}'}))
df_raneff_true.append(df_raneff_true_g.assign(gf=f"g{i + 1}"))
#
df_train = functools.reduce(lambda x, y: x.merge(y, how='cross'), df_train)
df_train['y'] = intercept
for i in range(len(num_res)):
df_train['y'] += df_train.pop(f"g{i + 1}_y")
#
df_raneff_true = pd.concat(df_raneff_true).reset_index(drop=True)
if response_type == 'binary':
df_train['y'] = np.random.binomial(p=expit(df_train['y'].values), n=1)
elif response_type == 'binomial':
raise NotImplementedError("TODO")
else:
df_train['y'] += noise * np.random.randn(df_train.shape[0])
# FIT MODEL -----
covariance = {}
for gf, df_raneff_true_g in df_raneff_true.groupby('gf'):
covariance[gf] = torch.as_tensor(df_raneff_true_g.drop(columns=['group', 'gf']).cov().values)
raneff_design = {f"g{i + 1}": [] for i in range(len(num_res))}
for gf in list(raneff_design):
raneff_design[gf] = df_train.columns[df_train.columns.str.startswith(gf + '_x')].tolist()
model = MixedEffectsModel(
fixeff_cols=[],
response_type='binomial' if response_type.startswith('bin') else 'gaussian',
raneff_design=raneff_design,
response_col='y',
covariance=covariance,
loss_type='iid'
)
model.fit(df_train)
# COMPARE TRUE vs. EST -----
with torch.no_grad():
res_per_g = model.module_.get_res(*model.build_model_mats(df_train))
df_raneff_est = []
for gf, re_mat in res_per_g.items():
df_raneff_est.append(
pd.DataFrame(re_mat.numpy(), columns=[f'x{i}' for i in range(num_res[i] + 1)]).assign(gf=gf)
)
df_raneff_est[-1]['group'] = np.unique(df_train[gf])
df_raneff_est = | pd.concat(df_raneff_est) | pandas.concat |
import pickle
import os
import cv2
import numpy as np
import pandas as pd
from tqdm import tqdm
from gazenet.utils.registrar import *
from gazenet.utils.helpers import extract_width_height_thumbnail_from_image
from gazenet.utils.sample_processors import SampleReader, SampleProcessor, ImageCapture
# TODO (fabawi): support annotation reading
@ReaderRegistrar.register
class DataSampleReader(SampleReader):
def __init__(self, video_dir="datasets/processed/Grouped_frames",
annotations_dir=None,
extract_thumbnails=True,
thumbnail_image_file="captured_1.jpg",
pickle_file="temp/processed.pkl", mode=None, **kwargs):
self.short_name = "processed"
self.video_dir = video_dir
self.annotations_dir = annotations_dir
self.extract_thumbnails = extract_thumbnails
self.thumbnail_image_file = thumbnail_image_file
super().__init__(pickle_file=pickle_file, mode=mode, **kwargs)
def read_raw(self):
video_groups = [video_group for video_group in sorted(os.listdir(self.video_dir))]
video_names = [os.path.join(video_group, video_name) for video_group in video_groups
for video_name in sorted(os.listdir(os.path.join(self.video_dir, video_group)))]
for video_name in tqdm(video_names, desc="Samples Read"):
id = video_name
try:
len_frames = len([name for name in os.listdir(os.path.join(self.video_dir, video_name))
if os.path.isdir(os.path.join(self.video_dir, video_name))])
width, height, thumbnail = extract_width_height_thumbnail_from_image(
os.path.join(self.video_dir, video_name, "1", self.thumbnail_image_file))
self.samples.append({"id": id,
"audio_name": '',
"video_name": os.path.join(self.video_dir, video_name),
"video_fps": 25, # 30
"video_width": width,
"video_height":height,
"video_thumbnail": thumbnail,
"len_frames": len_frames,
"has_audio": False,
"annotation_name": os.path.join('videogaze', id),
"annotations": {}
})
self.video_id_to_sample_idx[id] = len(self.samples) - 1
self.len_frames += self.samples[-1]["len_frames"]
except:
print("Error: Access non-existent annotation " + id)
@staticmethod
def dataset_info():
return {"summary": "TODO",
"name": "Processed Dataset",
"link": "TODO"}
@SampleRegistrar.register
class DataSample(SampleProcessor):
def __init__(self, reader, index=-1, frame_index=0, width=640, height=480, **kwargs):
assert isinstance(reader, DataSampleReader)
self.short_name = reader.short_name
self.reader = reader
self.index = index
if frame_index > 0:
self.goto_frame(frame_index)
kwargs.update(enable_audio=False)
super().__init__(width=width, height=height,
video_reader=(ImageCapture, {"extension": "jpg",
"sub_directories": True,
"image_file": "captured_1"}), **kwargs)
next(self)
def __next__(self):
with self.read_lock:
self.index += 1
self.index %= len(self.reader.samples)
curr_metadata = self.reader.samples[self.index]
self.load(curr_metadata)
return curr_metadata
def __len__(self):
return len(self.reader)
def next(self):
return next(self)
def goto(self, name, by_index=True):
if by_index:
index = name
else:
index = self.reader.video_id_to_sample_idx[name]
with self.read_lock:
self.index = index
curr_metadata = self.reader.samples[self.index]
self.load(curr_metadata)
return curr_metadata
def frames_per_sec(self):
if self.video_cap is not None:
return self.reader.samples[self.index]["video_fps"]
else:
return 0
def annotate_frame(self, input_data, plotter,
show_gaze=False, show_gaze_label=False, img_names_list=None,
**kwargs):
grabbed_video, grouped_video_frames, grabbed_audio, audio_frames, info, _ = input_data
properties = {}
info = {**info, "frame_annotations": {}}
# info["frame_info"]["dataset_name"] = self.reader.short_name
# info["frame_info"]["video_id"] = self.reader.samples[self.index]["id"]
# info["frame_info"]["frame_height"] = self.reader.samples[self.index]["video_height"]
# info["frame_info"]["frame_width"] = self.reader.samples[self.index]["video_width"]
grouped_video_frames = {**grouped_video_frames,
"PLOT": [["captured"]]
}
try:
frame_index = self.frame_index()
frame_name = self.video_cap.frames[frame_index-1]
frame_dir = os.path.join(self.video_cap.directory, os.path.dirname(frame_name))
if grabbed_video and img_names_list is not None:
for img_name in img_names_list:
try:
img = cv2.imread(os.path.join(frame_dir, img_name + "_1.jpg"))
except cv2.Error:
img = np.zeros_like(grouped_video_frames["captured"])
grouped_video_frames[img_name] = img
except:
pass
return grabbed_video, grouped_video_frames, grabbed_audio, audio_frames, info, properties
def get_participant_frame_range(self,participant_id):
raise NotImplementedError
class DataSplitter(object):
"""
Reads and writes the split (Train, validation, and Test) sets and stores the groups for training and
evaluation. The file names are stored in csv files and are not split automatically. This provides an interface
for manually adding videos to the assigned lists
"""
def __init__(self, train_csv_file="datasets/processed/train.csv",
val_csv_file="datasets/processed/validation.csv",
test_csv_file="datasets/processed/test.csv",
mode="d", **kwargs):
if (train_csv_file is None and val_csv_file is None and test_csv_file is None) or mode is None:
raise AttributeError("Specify atleast 1 csv file and/or choose a supported mode (r,w,x,d)")
self.train_csv_file = train_csv_file
self.val_csv_file = val_csv_file
self.test_csv_file = test_csv_file
self.mode = mode
self.columns = ["video_id", "fps", "scene_type", "dataset"]
self.samples = pd.DataFrame(columns=self.columns + ["split"])
self.open()
def read(self, csv_file, split):
if csv_file is not None:
if self.mode == "r": # read or append
samples = | pd.read_csv(csv_file, names=self.columns, header=0) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from scipy import sparse
import sklearn.datasets
import sklearn.model_selection
from autoPyTorch.data.tabular_validator import TabularInputValidator
@pytest.mark.parametrize('openmlid', [2, 40975, 40984])
@pytest.mark.parametrize('as_frame', [True, False])
def test_data_validation_for_classification(openmlid, as_frame):
x, y = sklearn.datasets.fetch_openml(data_id=openmlid, return_X_y=True, as_frame=as_frame)
validator = TabularInputValidator(is_classification=True)
if as_frame:
# NaN is not supported in categories, so
# drop columns with them.
nan_cols = [i for i in x.columns if x[i].isnull().any()]
cat_cols = [i for i in x.columns if x[i].dtype.name in ['category', 'bool']]
unsupported_columns = list(set(nan_cols) & set(cat_cols))
if len(unsupported_columns) > 0:
x.drop(unsupported_columns, axis=1, inplace=True)
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
x, y, test_size=0.33, random_state=0)
validator.fit(X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test)
X_train_t, y_train_t = validator.transform(X_train, y_train)
assert np.shape(X_train) == np.shape(X_train_t)
# Leave columns that are complete NaN
# The sklearn pipeline will handle that
if as_frame and np.any(pd.isnull(X_train).values.all(axis=0)):
assert np.any( | pd.isnull(X_train_t) | pandas.isnull |
import pandas as pd
import numpy as np
import sys, os, pytest
path = "../pytrendseries/"
sys.path.append(path)
path2 = "../pytrendseries/tests/resource"
sys.path.append(path2)
import detecttrend
import maxtrend
import vizplot
class TestClass():
def __init__(self):
self.year = 2020
self.trend = "downtrend"
self.window = 30
prices = pd.read_csv(os.path.join(path2, "stock_prices.csv"), index_col=0)
prices = prices[["period",'close']]
prices = prices.rename(columns={"period":"date", "close":"close_price"})
prices["date"] = pd.to_datetime(prices["date"])
self.df_prices = prices
self.price = self.df_prices.values
self.stock = 'close_price'
self.test_detecttrend()
self.test_max_trend()
self.test_raises()
self.plots()
def test_detecttrend(self):
output1, _ = detecttrend.detecttrend(self.df_prices, trend=self.trend, window=self.window, year=self.year)
self.output1 = output1
assert (output1["from"] < output1["to"]).unique().shape[0] == 1
assert (output1["from"] < output1["to"]).unique()[0] == True
assert (output1["price0"] > output1["price1"]).unique().shape[0] == 1
assert (output1["price0"] > output1["price1"]).unique()[0] == True
output1_1, _ = detecttrend.detecttrend(self.df_prices, trend="uptrend", window=self.window)
assert (output1_1["from"] < output1_1["to"]).unique().shape[0] == 1
assert (output1_1["from"] < output1_1["to"]).unique()[0] == True
assert (output1_1["price0"] < output1_1["price1"]).unique().shape[0] == 1
assert (output1_1["price0"] < output1_1["price1"]).unique()[0] == True
def test_max_trend(self):
output2 = maxtrend.getmaxtrend(self.df_prices, self.stock, self.trend, self.year)
self.output2 = output2
assert round(output2['maxdrawdown'].values[0], 5) == 0.63356
assert round(maxtrend.getmaxtrend(self.df_prices, self.stock, 'uptrend', self.year)['maxrunup'].values[0], 5) == 1.75642
assert output2.shape[0] == 1
assert (output2["peak_date"] < output2["valley_date"]).unique()[0] == True
assert (output2["peak_price"] > output2["valley_price"]).unique()[0] == True
def plots(self):
vizplot.plot_trend(self.df_prices, self.output1, self.stock, trend=self.trend, year=self.year)
vizplot.plot_maxdrawdown(self.df_prices, self.output2, self.stock, trend=self.trend, year=self.year, style="shadow")
vizplot.plot_maxdrawdown(self.df_prices, self.output2, self.stock, trend=self.trend, year=self.year, style="area")
vizplot.plot_maxdrawdown(self.df_prices, self.output2, self.stock, trend=self.trend, year=self.year, style="plotly")
def test_raises(self):
with pytest.raises(Exception) as error1:
detecttrend.detecttrend(self.df_prices, trend=self.trend, window=self.window, year=self.year, limit='11')
with pytest.raises(Exception) as error2:
detecttrend.detecttrend(self.df_prices, trend=self.trend, window=self.window, year=self.year, limit=11.2)
with pytest.raises(Exception) as error3:
detecttrend.detecttrend(self.df_prices, trend=self.trend, window=self.window, year=self.year, quantile=2)
with pytest.raises(Exception) as error4:
detecttrend.detecttrend(self.df_prices, trend=self.trend, window=self.window, year=self.year, quantile=2.3)
with pytest.raises(Exception) as error5:
detecttrend.detecttrend(self.df_prices, trend=self.trend, window=self.window, year=self.year, quantile='0.5')
with pytest.raises(Exception) as error6:
detecttrend.detecttrend(self.df_prices, trend=self.trend, window=0, year=self.year)
with pytest.raises(Exception) as error7:
detecttrend.detecttrend(self.df_prices, trend=self.trend, window=4, year=self.year)
with pytest.raises(Exception) as error8:
detecttrend.detecttrend(self.df_prices, trend=self.trend, window=30.2, year=self.year)
with pytest.raises(Exception) as error9:
detecttrend.detecttrend(self.df_prices, trend=self.trend, window='30', year=self.year)
with pytest.raises(Exception) as error10:
detecttrend.detecttrend(self.df_prices, trend=self.trend, window=self.window, year=-1)
with pytest.raises(Exception) as error11:
detecttrend.detecttrend(self.df_prices, trend=self.trend, window=self.window, year=2020.0)
with pytest.raises(Exception) as error12:
detecttrend.detecttrend(self.df_prices, trend=self.trend, window=self.window, year='2020')
with pytest.raises(Exception) as error13:
detecttrend.detecttrend(self.df_prices, trend='test', window=self.window, year=self.year)
with pytest.raises(Exception) as error14:
detecttrend.detecttrend(self.df_prices, trend=1, window=self.window, year=self.year)
with pytest.raises(Exception) as error15:
detecttrend.detecttrend([1,2,3,4], trend=self.trend, window=self.window, year=self.year)
with pytest.raises(Exception) as error16:
detecttrend.detecttrend(pd.DataFrame([1,2,3]), trend=self.trend, window=self.window, year=self.year)
with pytest.raises(Exception) as error17:
detecttrend.detecttrend( | pd.DataFrame([1,2,3],columns=["date"]) | pandas.DataFrame |
"""Runs experiments on CICIDS-2017 dataset."""
import itertools
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFE
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.naive_bayes import BernoulliNB
from sklearn import tree
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
import sklearn
import tqdm
from tqdm import tqdm
from tqdm import tqdm_notebook
#import xgboost as xgb
from incremental_trees.models.classification.streaming_rfc import StreamingRFC
import time
import tensorflow as tf
import sys
import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import os # accessing directory structure
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from keras.models import Sequential
from keras.layers import Dense
import pickle as pkl
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
nRowsRead = None
# Some hardcoded parameters:
tf.compat.v1.flags.DEFINE_integer('sample', 10000, '')
tf.compat.v1.flags.DEFINE_boolean('notebook', False, '')
tf.compat.v1.flags.DEFINE_integer('num_steps', 1, 'number of training step per new batch in online learning.')
tf.compat.v1.flags.DEFINE_integer('n_batch_to_retrain', 1, 'number of old batch to retrain in online learning.')
tf.compat.v1.flags.DEFINE_integer('batch_size', 256, '')
tf.compat.v1.flags.DEFINE_string('run', '8,9,10,11', '')
FLAGS = tf.compat.v1.flags.FLAGS
progress_bar = tqdm
df_cache = None
# A little hack
print_sys = print
def print(s):
print_sys(s)
with open('log.txt', 'a') as f:
f.write(s + '\n')
def load_data(sampled_instances=10000):
"""Returns sampled cicids data as pd.df."""
global df_cache
if df_cache is not None:
return df_cache
df1 = pd.read_csv("Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv")
df2 = pd.read_csv("Friday-WorkingHours-Afternoon-PortScan.pcap_ISCX.csv")
df3 = | pd.read_csv("Friday-WorkingHours-Morning.pcap_ISCX.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import os
'''首先读入grid2loc.csv确定每个基站数据放入一维向量的位置'''
coordi = pd.read_csv('grid2loc.csv',index_col=0)
#print(coordi.shape)
gridlocdict = {} #字典:基站坐标--1d向量位置
for i in range(len(coordi)):
grid = coordi.index[i]
# 二维展成一维后的坐标, x_cor是列数,y_cor是行数
loc1d = coordi.at[grid, 'x_cor'] + coordi.at[grid, 'y_cor'] * 53
gridlocdict[grid] = loc1d
zerocoordi = [i for i in range(53 * 54) if i not in gridlocdict.values()] # 将没有基站的1d坐标计入列表
zerocoordi_pd = | pd.Series(zerocoordi,name='zerocoordinates') | pandas.Series |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import object
from past.utils import old_div
import os
import numpy as np
import pandas as pd
from threeML.io.rich_display import display
from threeML.io.file_utils import sanitize_filename
from ..serialize import Serialization
from .from_root_file import from_root_file
from .from_hdf5_file import from_hdf5_file
import astropy.units as u
def map_tree_factory(map_tree_file, roi):
# Sanitize files in input (expand variables and so on)
map_tree_file = sanitize_filename(map_tree_file)
if os.path.splitext(map_tree_file)[-1] == '.root':
return MapTree.from_root_file(map_tree_file, roi)
else:
return MapTree.from_hdf5(map_tree_file, roi)
class MapTree(object):
def __init__(self, analysis_bins, roi):
self._analysis_bins = analysis_bins
self._roi = roi
@classmethod
def from_hdf5(cls, map_tree_file, roi):
data_analysis_bins = from_hdf5_file(map_tree_file, roi)
return cls(data_analysis_bins, roi)
@classmethod
def from_root_file(cls, map_tree_file, roi):
"""
Create a MapTree object from a ROOT file and a ROI. Do not use this directly, use map_tree_factory instead.
:param map_tree_file:
:param roi:
:return:
"""
data_analysis_bins = from_root_file(map_tree_file, roi)
return cls(data_analysis_bins, roi)
def __iter__(self):
"""
This allows to loop over the analysis bins as in:
for analysis_bin in maptree:
... do something ...
:return: analysis bin_name iterator
"""
for analysis_bin in self._analysis_bins:
yield analysis_bin
def __getitem__(self, item):
"""
This allows to access the analysis bins by name:
first_analysis_bin = maptree["bin_name 0"]
:param item: string for access by name
:return: the analysis bin_name
"""
try:
return self._analysis_bins[item]
except IndexError:
raise IndexError("Analysis bin_name with index %i does not exist" % (item))
def __len__(self):
return len(self._analysis_bins)
@property
def analysis_bins_labels(self):
return list(self._analysis_bins.keys())
def display(self):
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
train = | pd.read_csv('../data/train_mapped.tsv', sep='\t', header=0) | pandas.read_csv |
#!/usr/bin/env python3
import pandas as pd
import sys
import Bio
from Bio import SeqIO
import tagmatch
import os
from collections import defaultdict
import sqlite3
import gzip
enzymes =os.environ['mn_enzymes'].split(';')
specificity =os.environ['mn_specificity']
max_mc = int(os.environ['mn_max_missed_cleavages'])
f = open(sys.argv[1])
lines = f.readlines()
f.close()
db = sqlite3.connect(sys.argv[2], timeout=50000)
cursor = db.cursor()
try:
query = 'delete from proteins where File=?'
cursor.execute(query, (sys.argv[1],))
db.commit()
except:
pass
seqs = {}
f = sys.argv[1].split('.csv')[0] + '.gz'
with gzip.open(f, "rt") as handle:
for i in SeqIO.parse(handle, "fasta"):
try:
acc = i.id.split('|')[1]
except:
acc = i.description
seqs[acc] = i
passed_tags = set()
matched_tags = defaultdict(set)
matched_peptides = defaultdict(set)
for line in lines:
l = line.split(',')
tag = ','.join(l[:-3])
peptide = l[-3]
acc = l[-2]
seq = str(seqs[acc].seq)
pos = int(l[-1])
if (pos == 2) and (seq[0] == 'M'):
seq = seq[1:]
pos = pos - 1
start = pos-1
end = start + len(peptide)
if start > 0:
amino_acid_before = seq[start-1]
else:
amino_acid_before = ""
first_amino_acid = seq[start]
try:
last_amino_acid = seq[end-1]
except:
continue
try:
amino_acid_after = seq[end]
except:
amino_acid_after = ""
valid = tagmatch.valid_cleavage(amino_acid_before, first_amino_acid, last_amino_acid, amino_acid_after, enzymes, specificity)
mc = tagmatch.missed_cleavages(peptide, enzymes)
if (valid == True) and (mc <= max_mc):
passed_tags.add(tag)
matched_tags[acc].add(tag)
matched_peptides[acc].add(peptide)
passed_tags = list(passed_tags)
ids = defaultdict(set)
from itertools import zip_longest
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
for group in grouper(passed_tags, 500):
t = [i for i in group if i is not None ]
cursor.execute('SELECT DISTINCT scanid, tag FROM tags WHERE tag in ({0})'.format(', '.join('?' for _ in t)), t)
scanids = cursor.fetchall()
for _ in scanids:
ids[_[1]].add(_[0])
del scanids
count = 0
def get_organism(val):
try:
val = val.split('OS=')[1]
if '=' in val:
val = val.split('=')[0]
val = ' '.join(val.split()[:-1]).strip().rstrip()
return val
except:
return "Unspecified"
_replicates = {}
_accession=[]
_record=[]
_protein_id=[]
_peptides=[]
_peptide_count=[]
_scans=[]
_scan_count=[]
_organism=[]
_seq_length=[]
def create_replicates(df):
_ = _replicates[df['Accession']]
for sample in _:
count = len(_[sample])
df['Sample {} (msms)'.format(sample)] = count
df['SAF {}'.format(sample)] = df['Sample {} (msms)'.format(sample)] / df['Length']
return df
for protein in matched_tags:
rec = seqs[protein]
_record.append(rec.format('fasta'))
organism = get_organism(rec.description)
tags = matched_tags[protein]
peptide_set = matched_peptides[protein]
reps = defaultdict(set)
scan_set = set()
for tag in tags:
scans = ids[tag]
scan_set.update(scans)
for scan in scans:
replicate = scan.split('.mgf')[0]
reps[replicate].add(scan)
peptide_count = len(peptide_set)
peptides = '\n'.join(peptide_set)
scans='\n'.join(list(scan_set))
scan_count = len(scan_set)
accession = protein
protein_id = rec.id
seq = str(rec.seq)
seq_length = len(seq)
_replicates[protein]=reps
_accession.append(accession)
_protein_id.append(protein_id)
_peptides.append(peptides)
_peptide_count.append(peptide_count)
_scans.append(scans)
_scan_count.append(scan_count)
_organism.append(organism)
_seq_length.append(seq_length)
count += 1
summary = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os,sys
# In[2]:
sys.path.insert(0,"./../") #so we can import our modules properly
# In[3]:
get_ipython().run_line_magic('matplotlib', 'notebook')
#auto reload changed modules
from IPython import get_ipython
ipython = get_ipython()
ipython.magic("pylab")
ipython.magic("load_ext autoreload")
ipython.magic("autoreload 2")
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
import pandas as pd
from src.const import * # defines many of the variables used below
from src.db import *
from src.utils import *
from src.preprocessing import *
from pathlib import Path
from mysql.connector import MySQLConnection, Error
# In[4]:
if PUBLIC:
cursor = None
else:
config = read_db_config('./../program_config.ini', section='mysql_nonshiftable')
conn = {}
cursor = {}
conn = MySQLConnection(**config)
cursor = conn.cursor()
# ## House A
# In[5]:
filteredSensorListDB = ['_additional_power', '_dishwasher_power', '_exp_power', '_hp_power', '_imp_power', '_sauna_power', '_stove_power', '_washing_machine_power']
dfsA = []
filteredSensorListA = []
capPeriodsA = []
getRawData(dfsA, filteredSensorListA, capPeriodsA,rawDataBaseDir, startDate=startDateA, endDate=endDateA,cursor=cursor, key=keyA, cip=cipA, filteredSensorListDB=filteredSensorListDB)
filteredSensorListA_OG = filteredSensorListA.copy()
# further prepocessing (indexing, rounding, interpolating, all in one df)
# In[6]:
alreadyProcessed = []
#capPeriodsA = [getCapturePeriodForSensorName(cursor, name) for name in filteredSensorListA_OG]
roundDuplicateEliminateInterpolateResample(dfsA,filteredSensorListA, alreadyProcessed,capPeriodsA)
# In[7]:
dfA_300s = combineDfs(dfsA,filteredSensorListA,startDateA,endDateA,"300s",300,capPeriodsA)
# rename 'A_imp_power' to 'A_total_cons_power' for consistency
# In[8]:
dfA_300s.rename(columns={'A_imp_power' : 'A_total_cons_power'}, inplace=True)
# In[9]:
pathA5min = "datasets/dfA_300s.hdf"
if os.path.exists(pathA5min):
dfA_300s = pd.read_hdf(pathA5min, 'data')
else:
if not os.path.exists('datasets'):
os.mkdir('datasets')
dfA_300s.to_hdf(pathA5min, key='data')
# missing data analysis
# In[10]:
if not os.path.exists('missingData'):
os.mkdir('missingData')
reportMissingData(dfA_300s,300)
# generate heatmap
# In[11]:
generateHeatmap(dfA_300s,(20,7),.0,1.,"3D","A300s")
# ## House B
# In[12]:
filteredSensorListDB = ['_batt_state', '_boilertemp_bottom', '_boilertemp_top', '_boiler_heater_1_on', '_boiler_heater_2_on', '_boiler_heater_3_on', '_boiler_on_thermostat', '_boiler_power', '_direct_cons_energy', '_from_batt_energy', '_from_net_energy', '_pv_prod_energy', '_total_cons_energy', '_to_batt_energy', '_to_net_energy']
dfsB = []
filteredSensorListB = []
capPeriodsB = []
getRawData(dfsB, filteredSensorListB, capPeriodsB,rawDataBaseDir, startDate=startDateB, endDate=endDateB,cursor=cursor, key=keyB, cip=cipB, filteredSensorListDB=filteredSensorListDB)
filteredSensorListB_OG = filteredSensorListB.copy()
convToPowerB=['B_direct_cons_energy', 'B_from_batt_energy', 'B_from_net_energy', 'B_pv_prod_energy', 'B_total_cons_energy', 'B_to_batt_energy', 'B_to_net_energy']
# Convert from energy to power
# In[13]:
convertWhToW(dfsB, filteredSensorListB, convToPowerB, capPeriodsB)
# Calculate intergral for average power consumption for boiler
# In[14]:
boilerDfIdx = filteredSensorListB.index('B_boiler_power')
dfsB[boilerDfIdx] = preprocessBoilerPower(dfsB[boilerDfIdx],'B_boiler_power')
# further prepocessing (indexing, rounding, interpolating, all in one df)
# In[15]:
alreadyProcessed = ['B_boiler_power']
#capPeriodsB = [getCapturePeriodForSensorName(cursor, name) for name in filteredSensorListB_OG]
roundDuplicateEliminateInterpolateResample(dfsB,filteredSensorListB, alreadyProcessed,capPeriodsB)
# In[16]:
dfB_300s = combineDfs(dfsB,filteredSensorListB,startDateB,endDateB,"300s",300,capPeriodsB)
# In[17]:
pathB5min = "datasets/dfB_300s.hdf"
if os.path.exists(pathB5min) :
dfB_300s = pd.read_hdf(pathB5min, 'data')
else:
if not os.path.exists('datasets'):
os.mkdir('datasets')
dfB_300s.to_hdf(pathB5min, key='data')
# missing data analysis
# In[18]:
if not os.path.exists('missingData'):
os.mkdir('missingData')
reportMissingData(dfB_300s,300)
# generate heatmap
# In[19]:
generateHeatmap(dfB_300s,(30,7),.0,1.,"3D","B300s")
# ## House C
#
# conversion to power:
# `['C_direct_cons_energy','C_from_batt_energy', 'C_from_net_energy','C_pv_prod_energy,'C_total_cons_energy', 'C_to_batt_energy', 'C_to_net_energy']`
#
# merge: `C_hh_power` and `C_total_cons_energy` to `C_total_cons_power`
#
# special preprocessing:
# - `'C_boiler_power'`
# - `'C_hp_on_utility'`
# In[20]:
if not os.path.exists(rawDataBaseDir):
os.mkdir(rawDataBaseDir)
# `C_hp_on_utility` was provided by the utility. Do seperate preprocessing...
# In[ ]:
path1 = 'WP-RippleCtrl-houseC_Jan16_bis_Nov16.xlsx'
path2 = 'WP-RippleCtrl-houseC_Dez16_bis_Aug19.xlsx'
name = 'C_hp_on_utility'
path_2_hdf = os.path.join(os.getcwd(), rawDataBaseDir, cipC)
os.makedirs(path_2_hdf, exist_ok=True)
path_2_hdf = os.path.join(path_2_hdf, name)+".hdf"
if os.path.exists(path1) and os.path.exists(path1):
# Read files from the utility
df_ripc_1 = pd.read_excel(path1, usecols=['Datum', 'Uhrzeit', 'Text'], parse_dates={'timestamp': ['Datum', 'Uhrzeit']})
df_ripc_2 = pd.read_excel(path2, usecols=['Datum', 'Uhrzeit', 'Text'], parse_dates={'timestamp': ['Datum', 'Uhrzeit']})
df_ripc = pd.concat([df_ripc_1, df_ripc_2])
df_ripc.set_index('timestamp', inplace=True)
# extract 'on' commands
on_pattern = 'EIN'
df_ripc[name] = df_ripc['Text'].str.contains(on_pattern)
df_ripc[name] = df_ripc['C_hp_on_utility'].apply( lambda x: 1 if x else 0)
df_ripc = df_ripc.drop(columns=['Text'])
df_ripc.to_hdf(path_2_hdf, key="data")
# In[ ]:
df_ripc = pd.read_hdf(path_2_hdf)
# In[ ]:
# preprocess
capPerStr = '300s'
df_ripc.index= df_ripc.index.round(capPerStr)#round timestamps to capturePeriod
df_ripc = df_ripc[~df_ripc.index.duplicated(keep='first')]#remove possible duplicates
df_ripc = df_ripc.asfreq(capPerStr)
df_ripc.fillna(method='ffill', inplace=True)
# In[ ]:
filteredSensorListDB = ['_batt_state', '_boilertemp_bottom', '_boilertemp_top', '_boiler_heater_1_on', '_boiler_heater_2_on', '_boiler_heater_3_on', '_boiler_on_relay', '_boiler_on_thermostat', '_boiler_on_utility', '_boiler_power', '_direct_cons_energy', '_from_batt_energy', '_from_net_energy', '_hh_power', '_hp_power', '_pv_prod_energy', '_solarlog_radiation', '_temperature_out', '_total_cons_energy', '_to_batt_energy', '_to_net_energy', '_weather_humidity_in', '_weather_humidity_out', '_weather_pressure', '_weather_temperature_in', '_weather_temperature_out']
dfsC = []
filteredSensorListC = []
capPeriodsC = []
getRawData(dfsC, filteredSensorListC, capPeriodsC,rawDataBaseDir, startDate=startDateC, endDate=endDateC,cursor=cursor, key=keyC, cip=cipC, filteredSensorListDB=filteredSensorListDB)
filteredSensorListC_OG = filteredSensorListC.copy()
convToPowerC = ['C_direct_cons_energy','C_from_batt_energy', 'C_from_net_energy','C_pv_prod_energy','C_total_cons_energy', 'C_to_batt_energy', 'C_to_net_energy']
# Convert from energy to power
# In[ ]:
convertWhToW(dfsC, filteredSensorListC, convToPowerC, capPeriodsC)
# Calculate integral for average power consumption for boiler
# In[ ]:
boilerDfIdx = filteredSensorListC.index('C_boiler_power')
dfsC[boilerDfIdx] = preprocessBoilerPower(dfsC[boilerDfIdx],'C_boiler_power')
# Merge total consumption:
# merging of `'C_hh_energy'` & `'C_total_cons_energy'` (renamed to `'C_hh_power'` & `'C_total_cons_power'`), additionally `'C_hh_power'` needs resampling
# In[ ]:
mergeTotalConsumptionC(dfsC,filteredSensorListC,filteredSensorListC_OG,capPeriodsC)
# further prepocessing (indexing, rounding, interpolating, all in one df)
# In[ ]:
alreadyProcessed = ['C_total_cons_power','C_boiler_power', 'C_hp_on_utility']
#capPeriodsC = [getCapturePeriodForSensorName(cursor, name) for name in filteredSensorListC_OG]
roundDuplicateEliminateInterpolateResample(dfsC,filteredSensorListC, alreadyProcessed,capPeriodsC)
# In[ ]:
# add C_hp_on_utility to other dataframes
dfsC.append(df_ripc)
filteredSensorListC.append('C_hp_on_utility')
capPeriodsC.append(300)
# In[ ]:
dfC_300s = combineDfs(dfsC,filteredSensorListC,startDateC,endDateC,"300s",300,capPeriodsC)
dfC_3600s = combineDfs(dfsC,filteredSensorListC,startDateC,endDateC,"3600s",3600,capPeriodsC)
# In[ ]:
pathC5min = "datasets/dfC_300s.hdf"
pathC1h = "datasets/dfC_3600s.hdf"
if os.path.exists(pathC5min) and os.path.exists(pathC1h):
dfC_300s = pd.read_hdf(pathC5min, 'data')
dfC_3600s = pd.read_hdf(pathC1h, 'data')
else:
if not os.path.exists('datasets'):
os.mkdir('datasets')
dfC_300s.to_hdf(pathC5min, key='data')
dfC_3600s.to_hdf(pathC1h, key='data')
# missing data analysis
# In[ ]:
if not os.path.exists('missingData'):
os.mkdir('missingData')
reportMissingData(dfC_300s,300)
reportMissingData(dfC_3600s,3600)
# generate heatmap
# In[ ]:
generateHeatmap(dfC_300s,(40,7),.0,.1,"3D","C300s")
generateHeatmap(dfC_3600s,(40,7),.0,.1,"3D","C3600s")
# ## House D
# In[ ]:
filteredSensorListDB = ['_audio_wlan_og_power', '_dishwasher_power','_exp_power', '_hp_power', '_imp_power', '_rainwater_power', '_tumble_dryer_power', '_washing_machine_power']
dfsD = []
filteredSensorListD = []
capPeriodsD = []
getRawData(dfsD, filteredSensorListD, capPeriodsD,rawDataBaseDir, startDate=startDateD, endDate=endDateD,cursor=cursor, key=keyD, cip=cipD, filteredSensorListDB=filteredSensorListDB)
filteredSensorListD_OG = filteredSensorListD.copy()
# start washing machine later
# In[ ]:
startDateWM = '2016-10-03 13:00:00'
washingMachineIdx = filteredSensorListD.index("D_washing_machine_power")
dfWM = dfsD[washingMachineIdx]
mask = (dfWM['DateTime'] >= pd.to_datetime(startDateWM))
dfsD[washingMachineIdx] = dfWM.loc[mask]
# further prepocessing (indexing, rounding, interpolating, all in one df)
# In[ ]:
alreadyProcessed = []
#capPeriodsD = [getCapturePeriodForSensorName(cursor, name) for name in filteredSensorListD_OG]
roundDuplicateEliminateInterpolateResample(dfsD,filteredSensorListD, alreadyProcessed,capPeriodsD)
# delete tumble dryer and rainwater for some intervals since there's so much missing data
# In[ ]:
#rainwater
s = '2016-06-15 16:35'
e = '2016-09-21 09:35'
rainwaterIdx = filteredSensorListD.index('D_rainwater_power')
dfRainWater = dfsD[rainwaterIdx]
dfRainWater[s:e] = np.nan
#tumbledryer
s = '2016-06-15 17:30'
e = '2016-09-21 09:20'
tumbleDryerIdx = filteredSensorListD.index('D_tumble_dryer_power')
dftumble = dfsD[tumbleDryerIdx]
dftumble[s:e] = np.nan
# In[ ]:
dfD_300s = combineDfs(dfsD,filteredSensorListD,startDateD,endDateD,"300s",300,capPeriodsD)
# rename 'D_imp_power' to 'D_total_cons_power' for consistency
# In[ ]:
dfD_300s.rename(columns={'D_imp_power' : 'D_total_cons_power'}, inplace=True)
# In[ ]:
pathD5min = "datasets/dfD_300s.hdf"
if os.path.exists(pathD5min):
dfD_300s = | pd.read_hdf(pathD5min, 'data') | pandas.read_hdf |
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import geocat.viz.util as gvutil
path = r'H:\Python project 2021\climate_data_analysis_with_python\data\sst.mnmean.nc'
ds= xr.open_dataset(path)
# time slicing
sst = ds.sst.sel(time=slice('1920-01-01','2020-12-01'))
# anomaly with respect to 1971-2000 period
clm = ds.sst.sel(time=slice('1971-01-01','2000-12-01')).groupby('time.month').mean(dim='time')
anm = (sst.groupby('time.month') - clm)
time = anm.time
itime=np.arange(time.size)
def wgt_areaave(indat, latS, latN, lonW, lonE):
lat=indat.lat
lon=indat.lon
if ( ((lonW < 0) or (lonE < 0 )) and (lon.values.min() > -1) ):
anm=indat.assign_coords(lon=( (lon + 180) % 360 - 180) )
lon=( (lon + 180) % 360 - 180)
else:
anm=indat
iplat = lat.where( (lat >= latS ) & (lat <= latN), drop=True)
iplon = lon.where( (lon >= lonW ) & (lon <= lonE), drop=True)
# print(iplat)
# print(iplon)
wgt = np.cos(np.deg2rad(lat))
odat=anm.sel(lat=iplat,lon=iplon).weighted(wgt).mean(("lon", "lat"), skipna=True)
return(odat)
# bob sst
bob_anm = wgt_areaave(anm,5,25,80,100)
bob_ranm = bob_anm.rolling(time=7, center=True).mean('time')
##
# Create a list of colors based on the color bar values
colors = ['C1' if (value > 0) else 'C0' for value in bob_anm]
fig = plt.figure(figsize=[8,5])
ax1 = fig.add_subplot(111)
# Plot bar chart
ax1.bar(itime, bob_anm, align='edge', edgecolor="none", color=colors, width=1.0)
ax1.plot(itime, bob_ranm, color="black", linewidth=1.5)
ax1.legend(['7-month running mean'],fontsize=12)
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax1,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
# Use geocat.viz.util convenience function to set axes parameters
ystr = 1920
yend = 2020
dyr = 20
ist, = np.where(time == pd.Timestamp(year=ystr, month=1, day=1) )
iet, = np.where(time == pd.Timestamp(year=yend, month=1, day=1) )
gvutil.set_axes_limits_and_ticks(ax1,
ylim=(-1.5, 1),
yticks=np.linspace(-1.5, 1, 6),
yticklabels=np.linspace(-1.5, 1, 6),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr))
# Use geocat.viz.util convenience function to set titles and labels
gvutil.set_titles_and_labels(ax1,
maintitle="SSTA in BoB (ERSST)",
ylabel='Anomalies',
xlabel= 'Year',
maintitlefontsize=18,
labelfontsize=15)
plt.tight_layout()
plt.savefig("bob_anomalies.png",dpi = 300)
########## BoB SST with respect to ENSO and IOD (ERSST)
#nino 3.4 and dipole mode index plot together
nino = wgt_areaave(anm,-5,5,-170,-120)
nino = nino.rolling(time=7, center=True).mean('time')
#IOD west: 50 ° E to 70 ° E and 10 ° S to 10 ° N.
iod_west = wgt_areaave(anm,-10,10,50,70)
# IOD east: 90 ° E to 110 ° E and 10 ° S to 0 ° S.
iod_east = wgt_areaave(anm,-10,0,90,110)
dmi = iod_west - iod_east
dmi = dmi.rolling(time=7, center=True).mean('time')
### Figure Plot
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
ax1.set_title('BoB anomaly with repect to ENSO')
ax1.plot(time, bob_ranm, '-', linewidth=1)
ax1.plot(time, nino, '-', linewidth=1)
ax1.tick_params(length = 7,right=True,labelsize=12)
ax1.legend(['BoB anomaly','Nino3.4 Index'],fontsize=12,frameon=False)
ax1.set_ylabel('SSTA (°C)',fontsize=12)
ax2.set_title('BoB anomaly with respect to IOD')
ax2.plot(time, bob_ranm, '-', linewidth=1)
ax2.plot(time, dmi, '-', linewidth=1)
ax2.tick_params(length = 7,right=True,labelsize=12)
ax2.legend(['BoB anomaly','Dipole Mode Index'],fontsize=12,frameon=False)
ax2.set_ylabel('SSTA (°C)',fontsize=12)
# Show the plot
plt.draw()
plt.tight_layout()
plt.savefig("nino-bob-dmi.png",dpi = 300)
####################### (Ploting Nino 3.4 Index)
nino = wgt_areaave(anm,-5,5,-170,-120)
rnino = nino.rolling(time=7, center=True).mean('time')
#nino standard
ninoSD=nino/nino.std(dim='time')
rninoSD=ninoSD.rolling(time=7, center=True).mean('time')
# -- -- -- -- -- -- -- -- - -- - -- --- -- - -- - -- - - -- - -
# -- figure plot
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
# Create a list of colors based on the color bar values
colors = ['C1' if (value > 0) else 'C0' for value in ninoSD]
# Plot bar chart
ax1.bar(itime, nino, align='edge', edgecolor="none", color=colors, width=1.0)
ax1.plot(itime, rnino, color="black", linewidth=1.5)
ax1.legend(['7-month running mean'],fontsize=12,frameon=False)
ax2.bar(itime, ninoSD, align='edge', edgecolor="none", color=colors, width=1.0)
ax2.plot(itime, rninoSD, color="black", linewidth=1.5)
# Use geocat.viz.util convenience function to set axes parameters
ystr = 1920
yend = 2020
dyr = 20
ist, = np.where(time == pd.Timestamp(year=ystr, month=1, day=1) )
iet, = np.where(time == pd.Timestamp(year=yend, month=1, day=1) )
gvutil.set_axes_limits_and_ticks(ax1,
ylim=(-3, 3.5),
yticks=np.linspace(-3, 3, 7),
yticklabels=np.linspace(-3, 3, 7),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr) )
gvutil.set_axes_limits_and_ticks(ax2,
ylim=(-3, 3.5),
yticks=np.linspace(-3, 3, 7),
yticklabels=np.linspace(-3, 3, 7),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr) )
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax1,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
gvutil.add_major_minor_ticks(ax2,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
# Use geocat.viz.util convenience function to set titles and labels
gvutil.set_titles_and_labels(ax1,
maintitle="SSTA in Nino3.4 region",
ylabel='Anomalies',
maintitlefontsize=18,
labelfontsize=15)
gvutil.set_titles_and_labels(ax2,
maintitle="Nino3.4 Index",
ylabel='Standardized',
xlabel='Year',
maintitlefontsize=18,
labelfontsize=15)
plt.draw()
plt.tight_layout()
plt.savefig("nino3.4_ERSST.png",dpi=300)
############### (Ploting DMI Index)
iod_west = wgt_areaave(anm,-10,10,50,70)
# IOD east: 90 ° E to 110 ° E and 10 ° S to 0 ° S.
iod_east = wgt_areaave(anm,-10,0,90,110)
dmi = iod_west - iod_east
rdmi = dmi.rolling(time=7, center=True).mean('time')
colors = ['C1' if (value > 0) else 'C0' for value in dmi]
fig = plt.figure(figsize=[8,5])
ax1 = fig.add_subplot(111)
# Plot bar chart
ax1.bar(itime, dmi, align='edge', edgecolor="none", color=colors, width=1.0)
ax1.plot(itime, rdmi, color="black", linewidth=1.5)
ax1.legend(['7-month running mean'],fontsize=12,frameon=False)
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax1,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
# Use geocat.viz.util convenience function to set axes parameters
ystr = 1920
yend = 2020
dyr = 20
ist, = np.where(time == pd.Timestamp(year=ystr, month=1, day=1) )
iet, = np.where(time == | pd.Timestamp(year=yend, month=1, day=1) | pandas.Timestamp |
#!/usr/bin/env python
###
# File Created: Wednesday, February 6th 2019, 8:23:06 pm
# Author: <NAME> <EMAIL>
# Modified By: <NAME>
# Last Modified: Friday, February 8th 2019, 3:37:43 pm
###
import sys
import os
import csv
from os.path import isfile, join, split, exists
import glob
import ast
import pandas as pd
import pprint
import rospkg
import rospy
import moveit_commander
import moveit_msgs.msg
from utils.ros import check_rosparams
from utils.files import unique
from modules import Session
from modules import PlannerConfig
from modules import Scene
class Result(Session):
def __init__(self, planner_select, mode, planner):
# Initialise planner_select to avoid key error with PlannerConfig init
if rospy.get_param('~planner_select') == 'all':
rospy.set_param('~planner_select', 'Cano_etal')
super(Result, self).__init__()
self.PLANNER_SELECT = planner_select
self.MODE = mode
self.PLANNER = planner
self.PLANNER_ID = planner+'kConfigDefault'
rospy.loginfo(
'Initialising result session for %s %s in %s mode \n', self.PLANNER_SELECT, self.PLANNER, self.MODE)
# Overwriting the results path
self.OUTPUT_DIR = self.ROS_PKG_PATH+'/results/results_analysis/param_tests/'
if not exists(self.OUTPUT_DIR):
os.makedirs(self.OUTPUT_DIR)
self.RESULTS_PATH = self.OUTPUT_DIR+self.PLANNER_SELECT + \
'_'+self.MODE+'_'+self.PLANNER+'.csv'
def _set_best_params(self):
# Return list of all fps in result dir of selected planner select, planner and mode in run folders
fps = [y for x in os.walk(self.RESULTS_DIR)
for y in glob.glob(join(x[0], '*.csv'))
if (self.PLANNER_SELECT in y) and
(self.PLANNER in y) and
(self.MODE in y) ]
# Returns a dict of the best run out of all runs with the min loss
best_run = {'run': 0, 'min_loss': float('inf')}
for fp in fps:
df = pd.read_csv(fp, index_col=False)
df_best_run = df.iloc[df['loss'].idxmin(axis=1)]
if df_best_run['loss'] < best_run['min_loss']:
best_run['min_loss'] = df_best_run['loss']
best_run['min_run'] = df_best_run.to_dict()
best_run['run'] = int(fp.split(os.sep)[-2].split('_')[-1])
# Extracts the params set str as dict and sets planner params
params_set = ast.literal_eval(best_run['min_run']['params'])
rospy.loginfo("Setting to best params for %s %s %s in run_%d",
self.PLANNER_SELECT, self.PLANNER, self.MODE, best_run['run'])
self.planner_config_obj.set_planner_params(self.PLANNER_ID, params_set)
return best_run
def run_param_test(self, save=False):
best_run = self._set_best_params()
best_run_stats = best_run['min_run']
rospy.loginfo('BEST RUN STATS: t_avg_run_time: %.4f t_avg_plan_time: %.4f t_avg_dist: %.4f t_avg_path_length: %.4f t_avg_success: %.4f\n',
best_run_stats['t_avg_run_time'], best_run_stats['t_avg_plan_time'], best_run_stats[
't_avg_dist'], best_run_stats['t_avg_path_length'],
best_run_stats['t_avg_success'])
results_log, test_stats = self._run_problem_set(self.PLANNER_ID)
rospy.loginfo('TEST STATS: t_avg_run_time: %.4f t_avg_plan_time: %.4f t_avg_dist: %.4f t_avg_path_length: %.4f t_avg_success: %.4f\n',
test_stats['t_avg_run_time'], test_stats['t_avg_plan_time'], test_stats['t_avg_dist'], test_stats['t_avg_path_length'],
test_stats['t_avg_success'])
if save == True:
default_fps = [y for x in os.walk(self.RESULTS_DIR)
for y in glob.glob(join(x[0], '*.csv'))
if ('default' in y) and
(self.PLANNER_SELECT in y)]
default_df = pd.DataFrame()
for f in default_fps:
df = pd.read_csv(f, index_col=False, sep=',').dropna(axis=1)
df = df[df['planner'].str.contains(self.PLANNER)]
default_df = pd.concat([default_df, df])
avg_default = {'run': 'avg_default', 'avg_runs': default_df['avg_runs'].mean(),
't_avg_run_time': default_df['t_avg_run_time'].mean(),
't_avg_plan_time': default_df['t_avg_plan_time'].mean(),
't_avg_dist': default_df['t_avg_dist'].mean(),
't_avg_path_length': default_df['t_avg_path_length'].mean(),
't_avg_success': default_df['t_avg_success'].mean(),
'params': default_df.iloc[0]['params'] }
cols = ['avg_runs', 't_avg_run_time', 't_avg_plan_time',
't_avg_dist', 't_avg_path_length', 't_avg_success', 'params']
best_run_stats = {k: best_run_stats[k] for k in cols}
best_run_stats['run'] = 'best_run'
test_stats['run'] = 'test_run'
test_stats['avg_runs'] = self.AVG_RUNS
test_stats['params'] = best_run_stats['params']
cols = ['run'] + cols
df_avg_default = | pd.DataFrame(avg_default, index=[0]) | pandas.DataFrame |
import pandas as pd
import statsmodels.formula.api as api
from sklearn.preprocessing import scale, StandardScaler
from sklearn.linear_model import RidgeCV
from plotnine import *
import torch
import numpy as np
def sumcode(col):
return (col * 2 - 1).astype(int)
def massage(dat, scaleall=False):
dat['durationsum'] = dat['duration1'] + dat['duration2']
keep = ['samespeaker', 'sameepisode', 'sametype', 'semsim',
'durationdiff', 'durationsum', 'sim_1', 'sim_2']
data = dat[keep].dropna().query("semsim != 0.0").assign(
samespeaker = lambda x: scale(x.samespeaker) if scaleall else sumcode(x.samespeaker),
sameepisode = lambda x: scale(x.sameepisode) if scaleall else sumcode(x.sameepisode),
sametype = lambda x: scale(x.sametype) if scaleall else sumcode(x.sametype),
semsim = lambda x: scale(x.semsim),
durationdiff = lambda x: scale(x.durationdiff),
durationsum = lambda x: scale(x.durationsum),
sim_1 = lambda x: scale(x.sim_1),
sim_2 = lambda x: scale(x.sim_2))
return data
def standardize(data):
keep = ['samespeaker', 'sameepisode', 'sametype', 'semsim',
'distance', 'durationdiff', 'durationsum', 'sim_1', 'sim_2']
scaler = StandardScaler()
data = data[keep].astype(float)
return pd.DataFrame(scaler.fit_transform(data.values), columns=data.columns, index=data.index)
def rer(red, full):
return (red - full) / red
def partial_r2(model, data):
r2 = []
mse_full = model.fit().mse_resid
predictors = [ name for name in model.exog_names if name != 'Intercept' ]
# drop intercept
mse_red = model.from_formula(f"{model.endog_names} ~ {' + '.join(predictors)}",
drop_cols=['Intercept'],
data=data).fit().mse_resid
r2.append(rer(mse_red, mse_full))
for predictor in predictors:
exog = ' + '.join([ name for name in predictors if name != predictor ])
formula = f"{model.endog_names} ~ {exog}"
mse_red = model.from_formula(formula, data).fit().mse_resid
r2.append(rer(mse_red, mse_full))
return pd.DataFrame(index=['Intercept']+predictors, data=dict(partial_r2=r2))
def plot_coef(table, fragment_type, multiword):
data = table.query(f"multiword == {multiword} & fragment_type == '{fragment_type}'")
data['version'] = data['version'].map(str)
g = ggplot(data, aes('Variable', 'Coefficient')) + \
geom_hline(yintercept=0, color='gray', linetype='dashed') + \
geom_errorbar(aes(color='version', ymin='Lower', ymax='Upper', lwd=1, width=0.25)) + \
geom_point(aes(color='version')) + \
coord_flip()
ggsave(g, f"results/grsa_{fragment_type}_{'multi' if multiword else ''}word_coef.pdf")
def frameit(matrix, prefix="dim"):
return pd.DataFrame(matrix, columns=[f"{prefix}{i}" for i in range(matrix.shape[1])])
def backprobes(version):
for fragment_type in ['dialog', 'narration']:
data = torch.load(f"data/out/words_{version}_{fragment_type}.pt")
backprobe(data['words']).to_csv(f"results/backprobe_{version}_{fragment_type}.csv",
index=False,
header=True)
def backprobe(words):
rows = []
embedding_2 = frameit(scale(torch.stack([word.embedding_2 for word in words]).cpu().numpy()),
prefix="emb_2")
embedding_1 = frameit(scale(torch.stack([word.embedding_1 for word in words]).cpu().numpy()),
prefix="emb_1")
embedding_0 = frameit(scale(torch.stack([word.embedding_0 for word in words]).cpu().numpy()),
prefix="emb_0")
semsim = frameit(torch.stack([word.semsim for word in words]).cpu().numpy(),
prefix="semsim")
speaker = pd.get_dummies([word.speaker for word in words], prefix="speaker")
episode = pd.get_dummies([word.episode for word in words], prefix="episode")
duration = pd.DataFrame(dict(duration=[word.duration for word in words]))
train_ix = np.random.choice(embedding_2.index, int(len(embedding_2.index)/2), replace=False)
val_ix = embedding_2.index[~embedding_2.index.isin(train_ix)]
predictors = dict(semsim=semsim, speaker=speaker, episode=episode, duration=duration)
for outname, y in [('embedding_2', embedding_2), ('embedding_1', embedding_1), ('embedding_0', embedding_0)]:
X = pd.concat(list(predictors.values()), axis=1)
full = ridge(X.loc[train_ix], y.loc[train_ix], X.loc[val_ix], y.loc[val_ix])
rows.append(dict(var='NONE', outcome=outname, **full, rer=rer(full['mse'], full['mse'])))
for name, X in ablate(predictors):
red = ridge(X.loc[train_ix], y.loc[train_ix], X.loc[val_ix], y.loc[val_ix])
rows.append(dict(var=name,
outcome=outname,
**red,
rer=rer(red['mse'], full['mse'])))
return pd.DataFrame.from_records(rows)
def ridge_cv(X, y):
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_squared_error, r2_score
model = make_pipeline(StandardScaler(),
RidgeCV(alphas=[ 10**n for n in range(-3, 11) ],
fit_intercept=True, cv=None, scoring='neg_mean_squared_error',
alpha_per_target=False
))
model.fit(X, y)
return dict(mse= -model.steps[-1][1].best_score_,
alpha=model.steps[-1][1].alpha_)
def ridge(X, y, X_val, y_val):
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_squared_error, r2_score
model = make_pipeline(StandardScaler(),
RidgeCV(alphas=[ 10**n for n in range(-3, 11) ],
fit_intercept=True, cv=None, scoring='neg_mean_squared_error',
alpha_per_target=False
))
model.fit(X, y)
pred = model.predict(X_val)
return dict(mse=mean_squared_error(y_val, pred),
alpha=model.steps[-1][1].alpha_,
best_cv=-model.steps[-1][1].best_score_)
def ablate(variables):
"""Yield dataframe concatenating all variables, except for one each time."""
for this in variables:
yield this, pd.concat([ var for name, var in variables.items() if name != this ], axis=1)
def unpairwise_ols(rawdata):
data = standardize(rawdata)
m1 = api.ols(formula = f"sim_1 ~ semsim + distance + durationdiff + durationsum + samespeaker + sameepisode", data=data)
m2 = api.ols(formula = f"sim_2 ~ semsim + distance + durationdiff + durationsum + samespeaker + sameepisode", data=data)
result1 = m1.fit().summary2().tables[1].reset_index().rename(columns={'index':'Variable', 'Coef.': 'Value'})
result2 = m2.fit().summary2().tables[1].reset_index().rename(columns={'index':'Variable', 'Coef.': 'Value'})
result1['Dependent Var.'] = 'sim_1'
result2['Dependent Var.'] = 'sim_2'
return pd.concat([result1, result2])
def main():
# Load and process data
rawdata = | pd.read_csv("data/out/pairwise_similarities.csv") | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: olivergiesecke
1) Collect the data on the speakers and text for each alternative.
2) Do the regular pre-processing for each text entry.
3) Apply standard LDA
4) Provide summary statics how the probability mass lines up with the different alternatives.
5) Check alignment with the voting record.
"""
from nltk.corpus import stopwords
import pandas as pd
import numpy as np
from gensim.utils import simple_preprocess
import itertools
import os
import gensim
from gensim import corpora, models
from nltk.stem.porter import PorterStemmer
from sklearn.decomposition import TruncatedSVD
import matplotlib
import matplotlib.pyplot as plt
import re
import seaborn as sns
import create_lda_data
import provide_helperfunctions
from nltk.util import ngrams
from collections import Counter
from pprint import pprint
from gensim.models.coherencemodel import CoherenceModel
from mpl_toolkits.mplot3d import Axes3D
pd.set_option('mode.chained_assignment', None)
pd.set_option('display.max_rows', 20)
pd.set_option('display.max_columns', 20)
# Set random state for entire file
rnd_state=5
###############################################################################
### Import data ###
data = create_lda_data.main()
data.rename(columns={"start_date":"date"},inplace=True)
data.to_csv("../output/lda_dataset.csv",index=False)
### Data Selection ###
data['new']=1
df_balt=data[data['d_alt']==1].pivot_table(index="date",values='new',aggfunc=np.sum).reset_index()
df_summary = data.pivot_table(index="date",values='new',columns=['d_alt','votingmember'],aggfunc=np.sum)
# Keep only dates for which alternatives are available and speakers who are votingmembers
data_speakers=data[data['votingmember']==1].merge(df_balt,on='date',how='inner')
data_alternatives=data[data['d_alt']==1]
data_alternatives = data_alternatives[data_alternatives['content']!='[]'].copy()
#data_alternatives.to_csv("~/Desktop/alternativetext.csv")
### Check the coverage of the speaker data ###
alt_dates = pd.DataFrame(data[data['d_alt']==1].date.unique()).rename(columns={0:"date"})
alt_dates['alt']=1
date_speakers = pd.DataFrame(data[data['votingmember']==1].date.unique()).rename(columns={0:"date"})
date_speakers['speaker']=1
merge_df = pd.merge(alt_dates,date_speakers,on="date",how="outer")
print("Number of alternative dates: %d" % len(data_alternatives['date'].unique()))
print(f"Earliest meeting with alternatives: {data_alternatives['date'].min()}" )
print(f"Latest meeting with alternatives: {data_alternatives['date'].max()}" )
print("Number of speaker dates: %d" % len(data_speakers['date'].unique()))
print("Earliest date of speaker: %s" % data_speakers['date'].min())
print("Latest date of speaker: %s" % data_speakers['date'].max())
print("Number of words for the speakers is: {:.3f} million".format(len(" ".join(data_speakers['content'].tolist())) / 1e6))
print("Number of words for the alternatives is: {:.3f} million".format(len(" ".join(data_alternatives['content'].tolist())) / 1e6 ))
### Summary Statistics ###
with open("../output/file_basic_sumstats.tex","w") as file:
file.write("DOCUMENTS COLLECTED:\\\\\\\\")
file.write(f"Number of alternative dates: \t \t {len(data_alternatives['date'].unique())}\\\\")
file.write(f"Earliest meeting with alternatives:\t \t {data_alternatives['date'].min()} \\\\")
file.write(f"Latest meeting with alternatives:\t \t {data_alternatives['date'].max()} \\\\ \\\\" )
file.write(f"Number of speaker dates: {len(data_speakers['date'].unique())}\\\\")
file.write(f"Earliest date of speaker: {data_speakers['date'].min()}\\\\")
file.write(f"Latest date of speaker: {data_speakers['date'].max()}\\\\\\\\")
file.write("Number of words for the speakers is: {:.3f} million \\\\".format(len(" ".join(data_speakers['content'].tolist())) / 1e6))
file.write("Number of words for the alternatives is: {:.3f} million \\".format(len(" ".join(data_alternatives['content'].tolist())) / 1e6 ))
# =============================================================================
# # Subsample the speakers -- only to learn the model
# data_speakers_subsample = data_speakers.sample(frac =.1 ,random_state=5)
# print("Number of words for the subsample of speakers is: %s" % (len(" ".join(data_speakers_subsample ['content'].tolist())) / 1e6))
# data_sel = pd.concat([data_speakers_subsample,data_alternatives],axis=0, join='inner')
# data_sel = data_sel.reset_index()
# =============================================================================
### Learn the model based only on basis of the alternatives ###
print("\n### MODEL ESTIMATION - ALTERNATIVES ONLY ###\n")
data_sel = data_alternatives.reset_index()
# Do simple preprocessing
data_sel['parsed']=data_sel['content'].apply(provide_helperfunctions.extract_token)
data_sel['parsed'].loc[1]
### Revome stopwords and do stemming ###
stopwordsnltk = stopwords.words('english')
stopwordsnltk.extend(["mr","chairman","yes",'restrict', 'control','class','page',
'chart','strictli',"presid", "governor", "would","think",
"altern","could","committe","may",
"ty","yt","πt","bt","yt","na","na","gt","row","qiv","rtc","tip","dec","jul",
"confid","interv","ut","seven","confidenti","jun",
"jan","feb","mar","apr","aug","sep","oct","nov",'march','septemb','fr','june','april','nan'])
data_sel['parsed_cleaned']=data_sel['parsed'].apply(lambda x:
provide_helperfunctions.remove_stopwords(
provide_helperfunctions.do_stemming(
provide_helperfunctions.remove_stopwords(x,stopwordsnltk)),stopwordsnltk))
### Build corpus ###
texts=[]
for row_index,row in data_sel.iterrows():
item=row['parsed_cleaned']
texts.append(item)
### Extract tokens ###
tokens =[]
for text in texts:
for word in text:
tokens.append(word)
### Extract the top 100 common tokens ###
counter = Counter(tokens)
n_topwords=100
provide_helperfunctions.plot_wordlist(counter.most_common(n_topwords),n_topwords,n_percolumns=34,filename="../output/tab_tf_alternatives.tex")
### Extract the top 100 bigrams tokens ###
bi_grams = list(ngrams(tokens, 2))
counter = Counter(bi_grams)
n_topwords=100
provide_helperfunctions.plot_wordlist(counter.most_common(n_topwords),n_topwords,n_percolumns=34,filename="../output/tab_tf_bigrams.tex")
### Add bi-grams ###
n_bigrams = 100
bi_gram_mostcommon = ["_".join(ele[0]) for ele in counter.most_common(n_bigrams)]
texts = provide_helperfunctions.add_bigrams(texts,bi_gram_mostcommon)
### Extract the top 100 trigrams tokens ###
tri_grams = list(ngrams(tokens, 3))
counter = Counter(tri_grams)
n_topwords = 68
provide_helperfunctions.plot_wordlist(counter.most_common(n_topwords),n_topwords,n_percolumns=34,filename="../output/tab_tf_trigrams.tex")
### Add tri-grams ###
n_tri_grams = 50
tri_gram_mostcommon = ["_".join(ele[0]) for ele in counter.most_common(n_tri_grams)]
texts = provide_helperfunctions.add_trigrams(texts,tri_gram_mostcommon)
### Plot TF-IDF figure to decide on the terms ###
tokens =[]
for text in texts:
for word in text:
tokens.append(word)
# Unique words
unique_tokens =sorted(list(set(tokens)))
tf_idf = provide_helperfunctions.get_tdidf(tokens,unique_tokens,texts)
tf_idf_sort =np.sort(tf_idf)
tf_idf_invsort = tf_idf_sort[::-1]
plt.figure(figsize=(12,7))
plt.plot(np.arange(len(unique_tokens)),tf_idf_invsort)
plt.ylabel('Tf-idf weight')
plt.xlabel('Rank of terms ordered by tf-idf')
plt.savefig('../output/fig_alt_tfidf.pdf')
# print terms with the largest ranking
def merge(list1, list2):
merged_list = [(list1[i], list2[i]) for i in range(0, len(list1))]
return merged_list
n_topwords = 68
indices = tf_idf.argsort()[-n_topwords:][::-1]
tfidf_top = tf_idf[indices]
word_arr = np.asarray(unique_tokens)
word_top= word_arr[indices]
counter = merge(list(word_top),list(tfidf_top))
provide_helperfunctions.plot_wordlist(counter,n_topwords,n_percolumns=34,filename="../output/tab_tfidf_list.tex",columnnames=['#','term','tf-idf score'])
### Keep top x words ###
totaln_words=2200
texts = provide_helperfunctions.trim_texts(tf_idf,unique_tokens,texts,totaln_words)
### Build dictionary ###
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
###############################################################################
### Model Selection ###
run_modelsel = False
run_numtopic = False
if run_modelsel == True:
### Explore multiple dimension of the parameter space - TAKES A LONG TIME ###
alpha_v = np.array([0.001,0.01,0.1,0.5,1])
eta_v = np.array([0.001,0.01,0.1,0.5,1])
topic_v =np.array([5,10,15,20])
models_df = provide_helperfunctions.explore_parameterspace(totaln_words,corpus,dictionary,rnd_state,texts,alpha_v,eta_v,topic_v)
models_df=models_df.sort_values(by='coherence score (PMI)',ascending=False).reset_index().drop(columns="index")
models_df['model']=models_df.index
models_df['model']=models_df['model'].apply(lambda x:x+1)
models_df.to_latex("../output/tab_models.tex",index=False,float_format="%.3f")
# plot the parameter space
#provide_helperfunctions.plot_parameterspace(models_df)
# extract the parameter values for the highest coherence score
row = models_df.loc[models_df['coherence score (PMI)'].idxmax()]
row.to_pickle("../output/opt_parameter_coh")
row = models_df.loc[models_df['perplexity'].idxmax()]
row.to_pickle("../output/opt_parameter_perplexity")
row = pd.read_pickle("../output/opt_parameter_coh")
num_topics = int(row['# topics'])
eta_p = row['eta']
alpha_p = row['alpha']
provide_helperfunctions.output_number(num_topics,filename="../output/par_bs_numtopoics.tex",dec=0)
provide_helperfunctions.output_number(eta_p,filename="../output/par_bs_eta.tex",dec=3)
provide_helperfunctions.output_number(alpha_p,filename="../output/par_bs_alpha.tex",dec=3)
if run_numtopic == True:
### Number of topic evaluations ###
eta = eta_p
alpha = alpha_p
provide_helperfunctions.explore_numberoftopics(totaln_words,corpus,dictionary,texts,rnd_state, eta , alpha )
provide_helperfunctions.output_number(eta,filename="../output/par_topic_eta.tex",dec=3)
provide_helperfunctions.output_number(alpha,filename="../output/par_topic_alpha.tex",dec=3)
###############################################################################
### Model Estimation ###
### Do LDA ###
print(f"# Model parameter: Number of topics = {num_topics}, eta = {eta_p}, alpha = {alpha_p} random state = {rnd_state}\n")
ldamodel = models.ldamodel.LdaModel(corpus, num_topics, id2word = dictionary, passes=30 ,eta=eta_p ,alpha = alpha_p, random_state=rnd_state)
# Perplexity
logperplexity = ldamodel.log_perplexity(corpus, total_docs=None)
provide_helperfunctions.output_number(logperplexity,filename="../output/par_logperplexity.tex",dec=3)
# Coherence measure
cm = CoherenceModel(model=ldamodel, corpus=corpus, texts = texts, coherence='c_uci') # this is the pointwise mutual info measure.
coherence = cm.get_coherence() # get coherence value
provide_helperfunctions.output_number(coherence,filename="../output/par_coherence.tex",dec=3)
### Inspect the topics ###
n_words=10
x=ldamodel.show_topics(num_topics, num_words=n_words,formatted=False)
topics_words = [(tp[0], [wd[0] for wd in tp[1]]) for tp in x]
print("# These are the topic distributions for the estimated model:\n")
for topic,words in topics_words:
print(str(topic)+ "::"+ str(words))
### Visualize as a heatmap ###
provide_helperfunctions.draw_heatmap(x,n_words,params=(num_topics,eta_p,alpha_p), pmin = 0, pmax = 0.05)
###############################################################################
data = pd.concat([data_speakers,data_alternatives],axis=0, join='inner')
data = data.reset_index()
data_light = data[['d_alt','date', 'speaker', 'speaker_id', 'votingmember', 'ambdiss','tighterdiss', 'easierdiss']]
# Do simple preprocessing
data['parsed']=data['content'].apply(provide_helperfunctions.extract_token)
# Revome stopwords and do stemming
data['parsed_cleaned']=data['parsed'].apply(lambda x:
provide_helperfunctions.remove_stopwords(
provide_helperfunctions.do_stemming(
provide_helperfunctions.remove_stopwords(x,stopwordsnltk)),stopwordsnltk))
### Build corpus ###
texts=[]
for row_index,row in data.iterrows():
item=row['parsed_cleaned']
texts.append(item)
### Add bigrams and trigrams ###
texts = provide_helperfunctions.add_bigrams(texts,bi_gram_mostcommon)
texts = provide_helperfunctions.add_trigrams(texts,tri_gram_mostcommon)
### Build the dictionary ###
corpus = [dictionary.doc2bow(text) for text in texts]
### Extract topic vectors ###
sent_topics_df = provide_helperfunctions.extract_vectors(ldamodel,int(num_topics),corpus)
data_lda = pd.concat([data,sent_topics_df],axis=1, join='inner')
### Apply SVD for dimensionality reduction ##
print("\n### DIMENSIONALITY REDUCTION FOR VISUAL OUTPUT ###\n")
col_topics = [ col for col in data_lda.columns if re.match("^topic",col)]
dfvalues=data_lda[col_topics].values
twodim = provide_helperfunctions.reduce_to_k_dim(dfvalues)
df_pca=pd.DataFrame(twodim)
df_pca.rename(columns={0:'PCI1',1:'PCI2'},inplace=True)
data_lda_pca = pd.concat([data_lda,df_pca],axis=1, join='inner')
data_lda_pca.sort_values(by="date",inplace=True)
### Compute the preferred alternative for each speaker and contrast it with the voting outcome
# Individual date for all speakers
date = '1990-10-02'
print(F"\n### CONFUSION MATRIX FOR {date} - HELLINGER DISTANCE ###\n")
pref_distance = provide_helperfunctions.create_distance( date , data_lda_pca , col_topics )
col_alts = [ col for col in pref_distance.columns if re.match("^alt",col)]
pref_distance["pred_vote"] = pref_distance[col_alts].idxmin(axis=1)
pref_distance = pref_distance.merge(data_lda_pca[['date','speaker','act_vote']],on=['speaker','date'])
confusion_matrix = pd.crosstab(pref_distance["act_vote"], pref_distance["pred_vote"], rownames=['Actual'], colnames=['Predicted'])
dataexample = data_lda_pca[(data_lda_pca['d_alt']==1) | (data_lda_pca['votingmember']==1)][data_lda_pca['date']==date]
pref_distance.drop(columns="date",inplace=True)
print(pref_distance)
provide_helperfunctions.output_plot(date,dataexample)
# Latex output
data_lda_pca.rename(columns=dict(zip(col_topics,[f"t_{i+1}" for i in range(num_topics)]))).loc[data_lda_pca['date']==date,['speaker']+[f"t_{i+1}" for i in range(num_topics)]].sort_values(by="speaker").to_latex(f"../output/tab_topicdist_{date}.tex",index=False,float_format="%.2f")
pref_distance.rename(columns=dict(zip(col_alts,[f"hd_{col}" for col in col_alts]))).to_latex(f"../output/tab_pref_matrix_{date}.tex",index=False,float_format="%.2f")
confusion_matrix.reset_index().rename(columns={"Actual":"Actual \ Predicted"}).to_latex(f"../output/tab_conf_matrix_{date}.tex",index=False,float_format="%.2f")
# All dates and all speakers
print("\n### CONFUSION MATRIX ALL DATA - HELLINGER DISTANCE ###\n")
pref_distance = | pd.DataFrame() | pandas.DataFrame |
import pystan
import os
import pickle as pkl
import numpy as np
import pandas as pd
from .utils import do_ols
__dir__ = os.path.abspath(os.path.dirname(__file__))
class HierarchicalModel(object):
def __init__(self, X, subject_ids, subjectwise_errors=False, cauchy_priors=False):
self.X = pd.DataFrame(X)
self.subject_ids = np.array(subject_ids).squeeze()
self.subjectwise_errors = subjectwise_errors
self.cauchy_priors = cauchy_priors
if(self.subject_ids.shape[0] != self.X.shape[0]):
raise Exception("Number of subjects indices should"
"correspond to number of rows in the"
"design matrices.")
self._get_subj_idx()
def sample(self, signal, chains, *args, **kwargs):
measure = signal.squeeze()
if(len(measure) != self.X.shape[0]):
raise Exception("Signal should have same number of elements"
"as rows in the design matrix.")
def _get_subj_idx(self):
self.unique_subject_ids = np.sort(np.unique(self.subject_ids))
self.n_subjects = len(self.unique_subject_ids)
self.subj_idx = np.searchsorted(
self.unique_subject_ids, self.subject_ids)
def get_ols_estimates(self, signal):
print("Estimating parameters using OLS...")
signal = pd.DataFrame(signal, index=self.X.index)
matrix = pd.concat((signal, self.X), 1)
self.ols_betas = matrix.groupby(self.subject_ids).apply(do_ols)
index = [(e,) + t for e, t in zip(self.ols_betas.index.get_level_values(0),
self.ols_betas.index.get_level_values(1))]
self.ols_betas.index = pd.MultiIndex.from_tuples(index,
names=['subject_id',
'event_type',
'covariate',
'regressor'])
self.ols_betas_group = self.ols_betas.groupby(
level=[1, 2, 3], sort=False).mean()
self.ols_sd_group = self.ols_betas.groupby(
level=[1, 2, 3], sort=False).std()
if len(self.unique_subject_ids) == 1:
self.ols_sd_group.iloc[:] = 1
class HierarchicalStanModel(HierarchicalModel):
def __init__(self, X, subject_ids, subjectwise_errors=False, cauchy_priors=False, recompile=False, model_code=None):
super(HierarchicalStanModel, self).__init__(
X, subject_ids, subjectwise_errors)
if model_code is not None:
fn_string = model_code
else:
if subjectwise_errors:
fn_string = 'subjectwise_errors'
else:
fn_string = 'groupwise_errors'
if cauchy_priors:
fn_string += '_cauchy'
else:
fn_string += '_normal'
stan_model_fn_pkl = os.path.join(
__dir__, 'stan_models', '%s.pkl' % fn_string)
stan_model_fn_stan = os.path.join(
__dir__, 'stan_models', '%s.stan' % fn_string)
if not os.path.exists(stan_model_fn_pkl) or recompile:
self.model = pystan.StanModel(file=stan_model_fn_stan)
with open(stan_model_fn_pkl, 'wb') as f:
pkl.dump(self.model, f)
else:
with open(stan_model_fn_pkl, 'rb') as f:
self.model = pkl.load(f)
def sample(self, signal, chains=1, iter=1000, init_ols=False, *args, **kwargs):
super(HierarchicalStanModel, self).sample(
signal, chains, *args, **kwargs)
data = {'measure': signal,
'subj_idx': self.subj_idx + 1,
'n': self.X.shape[0],
'j': self.n_subjects,
'm': self.X.shape[1],
'X': self.X.values}
if init_ols:
init_dict = [self.get_init_dict(signal)] * chains
else:
init_dict = 'random'
self.results = self.model.sampling(data=data,
chains=chains,
iter=iter,
init=init_dict,
*args,
**kwargs)
def get_subject_traces(self, melt=False):
if not hasattr(self, 'results'):
raise Exception('Model has not been sampled yet!')
traces = self.results['beta_subject'].reshape((self.results['beta_subject'].shape[0],
np.prod(self.results['beta_subject'].shape[1:])))
columns = [(c,) if type(
c) is not tuple else c for c in self.X.columns.values]
columns = [
(sid,) + column for sid in self.unique_subject_ids for column in columns]
columns = pd.MultiIndex.from_tuples(columns,
names=['subject_id'] + self.X.columns.names)
traces = | pd.DataFrame(traces, columns=columns) | pandas.DataFrame |
"""Volume Technical Analysis"""
__docformat__ = "numpy"
import pandas as pd
import pandas_ta as ta
def ad(df_stock: pd.DataFrame, use_open: bool) -> pd.DataFrame:
"""Calculate AD technical indicator
Parameters
----------
df_stock : pd.DataFrame
Dataframe of prices
use_open : bool
Whether to use open prices
Returns
-------
pd.DataFrame
Dataframe with technical indicator
"""
if use_open:
df_ta = ta.ad(
high=df_stock["High"],
low=df_stock["Low"],
close=df_stock["Close"],
volume=df_stock["Volume"],
open_=df_stock["Open"],
).dropna()
# Do not use open stock values
else:
df_ta = ta.ad(
high=df_stock["High"],
low=df_stock["Low"],
close=df_stock["Close"],
volume=df_stock["Volume"],
).dropna()
return pd.DataFrame(df_ta)
def adosc(df_stock: pd.DataFrame, use_open: bool, fast: int, slow: int) -> pd.DataFrame:
"""Calculate AD oscillator technical indicator
Parameters
----------
df_stock : pd.DataFrame
Dataframe of prices
use_open : bool
Whether to use open prices
Returns
-------
pd.DataFrame
Dataframe with technical indicator
"""
if use_open:
df_ta = ta.adosc(
high=df_stock["High"],
low=df_stock["Low"],
close=df_stock["Close"],
volume=df_stock["Volume"],
open_=df_stock["Open"],
fast=fast,
slow=slow,
).dropna()
else:
df_ta = ta.adosc(
high=df_stock["High"],
low=df_stock["Low"],
close=df_stock["Close"],
volume=df_stock["Volume"],
fast=fast,
slow=slow,
).dropna()
return | pd.DataFrame(df_ta) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_series_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
# No numerical dtype can hold both negative and uint64 values,
# so they should be cast as string.
data = '-1\n' + str(2**63)
expected = DataFrame([str(-1), str(2**63)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
data = str(2**63) + '\n-1'
expected = DataFrame([str(2**63), str(-1)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = next(iter(self.read_csv(StringIO('foo,bar\n'),
chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(result), expected,
check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assert_raises_regex(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assert_raises_regex(ValueError,
'you can only specify one'):
self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = np.array([[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep=r'\s+')
tm.assert_numpy_array_equal(df.values, expected)
expected = np.array([[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_numpy_array_equal(df.values, expected)
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = np.array([[1, 2., 4.],
[5., np.nan, 10.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_regex_separator(self):
# see gh-6607
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep=r'\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
assert expected.index.name is None
tm.assert_frame_equal(df, expected)
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
@tm.capture_stdout
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
self.read_csv(StringIO(text), verbose=True)
output = sys.stdout.getvalue()
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 3 NA values in column a\n'
# Reset the stdout buffer.
sys.stdout = StringIO()
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
self.read_csv(StringIO(text), verbose=True, index_col=0)
output = sys.stdout.getvalue()
# Engines are verbose in different ways.
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 1 NA values in column a\n'
def test_iteration_open_handle(self):
if PY3:
pytest.skip(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
if self.engine == 'c':
pytest.raises(Exception, self.read_table,
f, squeeze=True, header=None)
else:
result = self.read_table(f, squeeze=True, header=None)
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
assert expected.A.dtype == 'int64'
assert expected.B.dtype == 'float'
assert expected.C.dtype == 'float'
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
assert df2['Number1'].dtype == float
assert df2['Number2'].dtype == float
assert df2['Number3'].dtype == float
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = self.read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = self.read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
def test_compact_ints_use_unsigned(self):
# see gh-13323
data = 'a,b,c\n1,9,258'
# sanity check
expected = DataFrame({
'a': np.array([1], dtype=np.int64),
'b': np.array([9], dtype=np.int64),
'c': np.array([258], dtype=np.int64),
})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.int8),
'b': np.array([9], dtype=np.int8),
'c': np.array([258], dtype=np.int16),
})
# default behaviour for 'use_unsigned'
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True)
tm.assert_frame_equal(out, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=False)
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.uint8),
'b': np.array([9], dtype=np.uint8),
'c': np.array([258], dtype=np.uint16),
})
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=True)
tm.assert_frame_equal(out, expected)
def test_compact_ints_as_recarray(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
assert result.dtype == ex_dtype
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
assert result.dtype == ex_dtype
def test_as_recarray(self):
# basic test
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# index_col ignored
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True, index_col=0)
tm.assert_numpy_array_equal(out, expected)
# respects names
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = '1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# header order is respected even though it conflicts
# with the natural ordering of the column names
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'b,a\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('b', '=i8'), ('a', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# overrides the squeeze parameter
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a\n1'
expected = np.array([(1,)], dtype=[('a', '=i8')])
out = self.read_csv(StringIO(data), as_recarray=True, squeeze=True)
tm.assert_numpy_array_equal(out, expected)
# does data conversions before doing recarray conversion
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
conv = lambda x: int(x) + 1
expected = np.array([(2, 'a'), (3, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True,
converters={'a': conv})
tm.assert_numpy_array_equal(out, expected)
# filters by usecols before doing recarray conversion
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1,), (2,)], dtype=[('a', '=i8')])
out = self.read_csv(StringIO(data), as_recarray=True,
usecols=['a'])
tm.assert_numpy_array_equal(out, expected)
def test_memory_map(self):
mmap_file = os.path.join(self.dirpath, 'test_mmap.csv')
expected = DataFrame({
'a': [1, 2, 3],
'b': ['one', 'two', 'three'],
'c': ['I', 'II', 'III']
})
out = self.read_csv(mmap_file, memory_map=True)
tm.assert_frame_equal(out, expected)
def test_null_byte_char(self):
# see gh-2741
data = '\x00,foo'
cols = ['a', 'b']
expected = DataFrame([[np.nan, 'foo']],
columns=cols)
if self.engine == 'c':
out = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(out, expected)
else:
msg = "NULL byte detected"
with tm.assert_raises_regex(ParserError, msg):
self.read_csv(StringIO(data), names=cols)
def test_utf8_bom(self):
# see gh-4793
bom = u('\ufeff')
utf8 = 'utf-8'
def _encode_data_with_bom(_data):
bom_data = (bom + _data).encode(utf8)
return BytesIO(bom_data)
# basic test
data = 'a\n1'
expected = DataFrame({'a': [1]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8)
tm.assert_frame_equal(out, expected)
# test with "regular" quoting
data = '"a"\n1'
expected = DataFrame({'a': [1]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, quotechar='"')
tm.assert_frame_equal(out, expected)
# test in a data row instead of header
data = 'b\n1'
expected = DataFrame({'a': ['b', '1']})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, names=['a'])
tm.assert_frame_equal(out, expected)
# test in empty data row with skipping
data = '\n1'
expected = DataFrame({'a': [1]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, names=['a'],
skip_blank_lines=True)
| tm.assert_frame_equal(out, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2022, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import pandas as pd
from pandas._testing import assert_series_equal
from wetterdienst.core.scalar.values import ScalarValuesCore
def test_coerce_strings():
series = ScalarValuesCore._coerce_strings(pd.Series(["foobar"]))
series_expected = pd.Series(["foobar"], dtype= | pd.StringDtype() | pandas.StringDtype |
"""Get data into JVM for prediction and out again as Spark Dataframe"""
import logging
logger = logging.getLogger('nlu')
import pyspark
from pyspark.sql.functions import monotonically_increasing_id
import numpy as np
import pandas as pd
from pyspark.sql.types import StringType, StructType, StructField
class DataConversionUtils():
# Modin aswell but optional, so we dont import the type yet
supported_types = [pyspark.sql.DataFrame, pd.DataFrame, pd.Series, np.ndarray]
@staticmethod
def except_text_col_not_found(cols):
print(
f'Could not find column named "text" in input Pandas Dataframe. Please ensure one column named such exists. Columns in DF are : {cols} ')
@staticmethod
def sdf_to_sdf(data, spark_sess, raw_text_column='text'):
"""No casting, Spark to Spark. Just add index col"""
logger.info(f"Casting Spark DF to Spark DF")
output_datatype = 'spark'
data = data.withColumn('origin_index', monotonically_increasing_id().alias('origin_index'))
stranger_features = []
if raw_text_column in data.columns:
# store all stranger features
if len(data.columns) > 1:
stranger_features = list(set(data.columns) - set(raw_text_column))
else:
DataConversionUtils.except_text_col_not_found(data.columns)
return data, stranger_features, output_datatype
@staticmethod
def pdf_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting pandas to spark and add index col"""
logger.info(f"Casting Pandas DF to Spark DF")
output_datatype = 'pandas'
stranger_features = []
sdf = None
# set first col as text column if there is none
if raw_text_column not in data.columns: data.rename(columns={data.columns[0]: 'text'}, inplace=True)
data['origin_index'] = data.index
if raw_text_column in data.columns:
if len(data.columns) > 1:
# make Nans to None, or spark will crash
data = data.where(pd.notnull(data), None)
data = data.dropna(axis=1, how='all')
stranger_features = list(set(data.columns) - set(raw_text_column))
sdf = spark_sess.createDataFrame(data)
else:
DataConversionUtils.except_text_col_not_found(data.columns)
return sdf, stranger_features, output_datatype
@staticmethod
def pds_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting pandas series to spark and add index col. # for df['text'] colum/series passing casting follows pseries->pdf->spark->pd """
logger.info(f"Casting Pandas Series to Spark DF")
output_datatype = 'pandas_series'
sdf = None
schema = StructType([StructField(raw_text_column, StringType(), True)])
data = pd.DataFrame(data).dropna(axis=1, how='all')
# If series from a column is passed, its column name will be reused.
if raw_text_column not in data.columns and len(data.columns) == 1:
data[raw_text_column] = data[data.columns[0]]
else:
logger.info(
f'INFO: NLU will assume {data.columns[0]} as label column since default text column could not be find')
data[raw_text_column] = data[data.columns[0]]
data['origin_index'] = data.index
if raw_text_column in data.columns:
sdf = spark_sess.createDataFrame(pd.DataFrame(data[raw_text_column]), schema=schema)
else:
DataConversionUtils.except_text_col_not_found(data.columns)
if 'origin_index' not in sdf.columns:
sdf = sdf.withColumn('origin_index', monotonically_increasing_id().alias('origin_index'))
return sdf, [], output_datatype
@staticmethod
def np_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting numpy array to spark and add index col. This is a bit inefficient. Casting follow np->pd->spark->pd. We could cut out the first pd step """
logger.info(f"Casting Numpy Array to Spark DF")
output_datatype = 'numpy_array'
if len(data.shape) != 1: ValueError(
f"Exception : Input numpy array must be 1 Dimensional for prediction.. Input data shape is{data.shape}")
sdf = spark_sess.createDataFrame(pd.DataFrame({raw_text_column: data, 'origin_index': list(range(len(data)))}))
return sdf, [], output_datatype
@staticmethod
def str_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting str to spark and add index col. This is a bit inefficient. Casting follow # inefficient, str->pd->spark->pd , we can could first pd"""
logger.info(f"Casting String to Spark DF")
output_datatype = 'string'
sdf = spark_sess.createDataFrame(pd.DataFrame({raw_text_column: data, 'origin_index': [0]}, index=[0]))
return sdf, [], output_datatype
@staticmethod
def str_list_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting str list to spark and add index col. This is a bit inefficient. Casting follow # # inefficient, list->pd->spark->pd , we can could first pd"""
logger.info(f"Casting String List to Spark DF")
output_datatype = 'string_list'
if all(type(elem) == str for elem in data):
sdf = spark_sess.createDataFrame(
pd.DataFrame({raw_text_column: | pd.Series(data) | pandas.Series |
import argparse
from collections import namedtuple
from datetime import datetime
import logging
import re
import struct
import time
import json
import pandas as pd
import numpy as np
import requests
# Datafeed functions
from . import iex
from . import portcalc
logger = logging.getLogger(__name__)
# pylint: disable=broad-except,too-many-instance-attributes,too-many-arguments
Holding = ['exchange', 'ticker', 'volume']
Tick = ['time', 'exchange', 'ticker', 'bid', 'ask', 'bidsize', 'asksize']
Book = ['exchange', 'ticker', 'bid', 'ask', 'bidsize', 'asksize', 'time']
class Agent:
"""Base class for Pedlar trading agent."""
def __init__(self, maxsteps=20, universe=None, ondatafunc=None, ondataparams=None,
username="algosoc", agentname='random', pedlarurl='https://pedlardev.herokuapp.com/'):
self.maxlookup = 1000
self.tradesession = 0
self.endpoint = pedlarurl
self.username = username
self.agentname = agentname
self.maxsteps = maxsteps
self.history = pd.DataFrame(columns=Tick).set_index(['time', 'exchange', 'ticker'])
self.orderbook = pd.DataFrame(columns=Book).set_index(['exchange', 'ticker'])
# List of holding history to be merge at the end of trading session
self.holdingshistory = []
self.pnlhistory = []
# caplim is the max amount of capital allocated
# shorting uses up caplim but gives cash
# check caplim at each portfolio rebalance and scale down the target holding if that exceeds caplim
self.startcash = 50000
self.portfoval = 50000
self.caplim = self.portfoval * 2
self.pnl = 0
self.cash = self.startcash
# User defined functions
self.universe = universe
self.ondata = ondatafunc
self.ondatauserparms = ondataparams
@classmethod
def from_args(cls, parents=None):
"""Create agent instance from command line arguments."""
parser = argparse.ArgumentParser(description="Pedlar trading agent.",
fromfile_prefix_chars='@',
parents=parents or list())
parser.add_argument("-u", "--username", default="nobody", help="Pedlar Web username.")
parser.add_argument("-s", "--pedlarurl", default="", help="Algosoc Server")
return cls(**vars(parser.parse_args()))
def start_agent(self, verbose=True):
# create user profile in MongoDB if not exist
if self.connection:
payload = {'user':self.username,'agent':self.agentname}
r = requests.post(self.endpoint+"/user", json=payload)
data = r.json()
self.tradesession = data['tradesession']
if verbose:
print('Tradesession: {}'.format(self.tradesession))
print('User: {} Agent: {}'.format(self.username,self.agentname))
print()
# connect to other datasource
# set up trading universe
self.step = 0
self.create_portfolio(self.universe,verbose)
return None
def create_portfolio(self, tickerlist=None, verbose=False):
if tickerlist is None:
tickerlist = [('IEX','SPY'), ('IEX','QQQ')]
self.portfolio = pd.DataFrame(columns=['volume'], index=pd.MultiIndex.from_tuples(tickerlist, names=('exchange', 'ticker')))
self.portfolio['volume'] = 0
iextickers = [x[1] for x in tickerlist if x[0]=='IEX']
self.iextickernames = ','.join(iextickers)
if self.iextickernames == '':
self.iextickernames = 'SPY,QQQ'
if verbose:
print('Portfolio')
print(self.portfolio)
self.tickers = tickerlist
self.n_assets = len(self.tickers)
return None
def download_tick(self):
iexdata = iex.get_TOPS(self.iextickernames)
return iexdata
def extract_tick(self):
iexdata = pd.DataFrame()
return iexdata
def update_history(self, live=True, verbose=False):
if not live:
iex = self.extract_tick()
else:
iex = self.download_tick()
self.historysize = iex.shape[0]
# build order book
self.orderbook = | pd.DataFrame(columns=Book) | pandas.DataFrame |
from pyexpat import model
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from tqdm import tqdm
from fastinference.models import Ensemble, Tree
def create_mini_batches(inputs, targets, batch_size, shuffle=False):
""" Create an mini-batch like iterator for the given inputs / target / data. Shamelessly copied from https://stackoverflow.com/questions/38157972/how-to-implement-mini-batch-gradient-descent-in-python
Parameters
----------
inputs : array-like vector or matrix
The inputs to be iterated in mini batches
targets : array-like vector or matrix
The targets to be iterated in mini batches
batch_size : int
The mini batch size
shuffle : bool, default False
If True shuffle the batches
"""
assert inputs.shape[0] == targets.shape[0]
indices = np.arange(inputs.shape[0])
if shuffle:
np.random.shuffle(indices)
start_idx = 0
while start_idx < len(indices):
if start_idx + batch_size > len(indices) - 1:
excerpt = indices[start_idx:]
else:
excerpt = indices[start_idx:start_idx + batch_size]
start_idx += batch_size
yield inputs[excerpt], targets[excerpt]
def refine(weights, estimators, X, Y, epochs, lr, batch_size, optimizer, verbose):
"""Performs SGD using the MSE loss over the leaf nodes of the given trees on the given data. The weights of each tree are respected during optimization but not optimized.
Args:
weights (np.array): The weights of the trees.
trees (list of Tree): The trees.
X (2d np.array): The data.
Y (np.array): The targe.
epochs (int): The number of epochs SGD is performed.
lr (float): The learning rate of SGD.
batch_size (int): The batch size of SGD
optimizer (str): The optimizer used for optimization. Can be {{"sgd", "adam"}}.
verbose (bool): If True outputs the loss during optimization.
Returns:
list of trees: The refined trees.
"""
n_classes = estimators[0].n_classes
if batch_size > X.shape[0]:
if verbose:
print("WARNING: The batch size for SGD is larger than the dataset supplied: batch_size = {} > X.shape[0] = {}. Using batch_size = X.shape[0]".format(batch_size, X.shape[0]))
batch_size = X.shape[0]
if optimizer == "adam":
m = np.zeros_like(weights)
v = np.zeros_like(weights)
t = 1
for epoch in range(epochs):
mini_batches = create_mini_batches(X, Y, batch_size, True)
batch_cnt = 0
loss_sum = 0
accuracy_sum = 0
with tqdm(total=X.shape[0], ncols=150, disable = not verbose) as pbar:
for x,y in mini_batches:
# Prepare the target and apply all trees
target_one_hot = np.array( [ [1.0 if yi == i else 0.0 for i in range(n_classes)] for yi in y] )
proba = []
for e, w in zip(estimators,weights):
proba.append(w * e.predict_proba(x))
proba = np.array(proba)
fbar = proba.sum(axis=0)
deriv = 2 * (fbar - target_one_hot) * 1.0 / x.shape[0] * 1.0 / n_classes
grad = np.mean(proba*deriv,axis=(1,2))
if optimizer == "sgd":
# sgd
weights -= lr*grad
else:
# adam
beta1 = 0.9
beta2 = 0.999
m = beta1 * m + (1-beta1) * grad
v = beta2 * v + (1-beta2) * (grad ** 2)
m_corrected = m / (1-beta1**t)
v_corrected = v / (1-beta2**t)
weights -= lr * m_corrected / (np.sqrt(v_corrected) + 1e-8)
t += 1
# compute some statistics
loss_sum += ((fbar - target_one_hot)**2).mean()
accuracy_sum += (fbar.argmax(axis=1) == y).mean() * 100.0
batch_cnt += 1
pbar.update(x.shape[0])
desc = '[{}/{}] loss {:2.4f} accuracy {:2.4f}'.format(
epoch,
epochs-1,
loss_sum / batch_cnt,
accuracy_sum / batch_cnt,
)
pbar.set_description(desc)
return weights
def optimize(model, X = None, Y = None, file = "", epochs = 5, lr = 1e-2, batch_size = 32, optimizer = "adam", verbose = False, **kwargs):
"""Performs weight refinement of the given ensemble. Weight-refinement refines the weights of the estimators in the ensemble by optimizing the a joint loss function using SGD. The main purpose is to offer a refinement method that does not have any other dependencies and can be used "as is" with any kind of ensemble. This implementation only supports the MSE loss. If you are interested in other loss functions and/or other optimizers besides vanilla SGD and ADAM please have a look at TODO
For refinement either :code:`X` / :code:`Y` must be provided or :code:`file` must point to a CSV file which has a "y" column. All remaining columns are interpreted as features. If both are provided then :code:`X` / :code:`Y` is used before the file. If none are provided an error is thrown.
You can activate this optimization by simply passing :code:`"leaf-refinement"` to the optimizer, e.g.
.. code-block::
loaded_model = fastinference.Loader.model_from_file("/my/nice/tree.json")
loaded_model.optimize("leaf-refinement", {"X": some_data, "Y" : some_targets})
Args:
model (Ensemble of Trees or Tree): The Tree or Ensemble of Trees that should be refined
X (2d np.array, optional): A (N,d) data matrix used for refinement. Defaults to None.
Y (np.array, optional): A (N,) target vector used for refinement. Defaults to None.
file (str, optional): Path to a CSV file from which X/Y is loaded if these are not provided. If set, the CSV must contain a "y" column to properly load Y. All remaining columns are interpreted as features. Defaults to "".
epochs (int, optional): Number of epochs used for SGD/ADAM. Defaults to 5.
lr (float, optional): Learning rate used for SGD/ADAM. Defaults to 1e-1.
batch_size (int, optional): Batch size used for SGD/ADAM. Defaults to 32.
optimizer (str, optional): Optimizer for optimization. Can be {{"sgd", "adam"}}. Defaults to "adam".
verbose (bool, optional): If True outputs the loss during optimization. Defaults to False.
Returns:
Ensemble of Trees or Tree: The refined ensemble / tree
"""
assert (X is not None and Y is not None) or file.endswith(".csv"), "You can either supply (X,y) directly or use `file' to supply a csv file that contains the data. You did not provide either. Please do so."
assert isinstance(model, Ensemble.Ensemble), "Weight refinement only works for Ensembles, but you provided {}".format(model.__class__.__name__)
assert lr >= 0, "Learning rate must be positive, but you gave {}".format(lr)
assert epochs >= 1, "Number of epochs must be >= 1, but you gave {}".format(epochs)
assert optimizer in ["sgd", "adam"], "The optimizer must be from {{adam, sgd}}, but you gave {}".format(optimizer)
if X is None or Y is None:
df = pd.read_csv(file)
df = df.dropna()
Y = df.pop("y")
df = | pd.get_dummies(df) | pandas.get_dummies |
import pandas as pd
import matplotlib.pyplot as pyplot
import os
from fctest.__PolCurve__ import PolCurve
class ScribPolCurve(PolCurve):
# mea_active_area = 0.21
def __init__(self, path, mea_active_area):
path = os.path.normpath(path)
raw_data = pd.read_csv(path, sep='\t', skiprows=41) # data hard to read without skiprows
data_part = raw_data.iloc[1:, [0, 1, 2, 5, 12, 13, 14, 17, 18]]
data_part.columns = ['time', 'current', 'current_density', 'voltage', \
'temp_cell', 'temp_anode', 'temp_cathode', 'rh_anode', 'rh_cathode']
#current_density = pd.to_numeric(data_part.iloc[:, 2].values)
current = pd.to_numeric(data_part.iloc[:, 1].values)
current_density = current / mea_active_area
voltage = | pd.to_numeric(data_part.iloc[:, 3].values) | pandas.to_numeric |
from airflow import DAG
from airflow.operators.python import PythonOperator, ShortCircuitOperator
from KafkaClient import KafkaClient
from AWSClient import AWSClient
from logger_creator import CreateLogger
from datetime import datetime
import pandas as pd
from io import StringIO
# Configuration Variables
csv_file_name = 'audio_descriptions.csv'
bucket_name = 'unprocessed-stt-audio'
kafka_servers = [
'localhost:9092',
'localhost:9093',
'localhost:9094'
]
# Creating Logger
logger = CreateLogger('Airflow-Audio-Input-storer', handlers=1)
logger = logger.get_default_logger()
# Instantating a KafkaClient Object
kf_client = KafkaClient(
'audio-data-description-storer-DAG',
kafka_servers
)
# Creating a Consumer using for the KafkaClient
kf_client.create_consumer(
topics='Text-Audio-input',
offset='earliest',
auto_commit=True,
group_id='airflow-text-audio-input-reader',
value_deserializer=kf_client.get_json_deserializer(),
timeout=1000
)
# Creating a AWSClient for uploading(storing) the Data
aws_client = AWSClient()
# DECLARING Airflow DAG CONFIGURATION
DAG_CONFIG = {
'depends_on_past': False,
'start_date': datetime(2020, 1, 1),
'email': ['<EMAIL>'],
'email_on_failure': True,
'schedule_interval': '0 0 0/1 ? * * *',
}
# Declaring DAG used functions
# Kafka Data Reader
def _consume_kafka_data(ti):
try:
data = kf_client.get_data()
if(len(data) > 0):
ti.xcom_push(key='kafka_data', value=data)
logger.info(
f'SUCCESSFULLY LOADED {len(data)} DATA VALUES FROM KAFKA\'s "Text-Audio-input" Topic')
return True
return False
except Exception as e:
logger.exception('FAILED TO READ DATA FROM KAFKA SERVERS')
return False
# Dataframe constructor
def _create_and_save_csv_file(ti):
try:
fetched_data = ti.xcom_pull(
key='kafka_data', task_ids=['reading_kafka_data'])[0]
df = | pd.DataFrame(fetched_data) | pandas.DataFrame |
from dash import html, dcc
import pandas as pd
from adasher.elements import number, number_with_diff, CardHeaderStyles
from adasher.cards import card, container, stats_from_df
from adasher.templates import pie_plot, bar_plot, scatter_plot
from adasher import templates
from adasher.advanced import auto_analytics, association
from . import data
_hs = CardHeaderStyles.BLACK_FONT_GRAY_BG
def get_stats_content():
content = [
[(number_1(), 2), (number_2(), 2), (number_3(), 2), (number_4(), 2)],
[(number_with_diff_1(), 2), (number_with_diff_2(), 2), (number_with_diff_3(), 2), (number_with_diff_4(), 2)],
[(stats_using_df_1(), 4), (stats_using_df_2(), 4)]
]
result = list()
result.append(container(content))
return result
def get_py_markdown(code_text):
return dcc.Markdown('''
```python
{}
```
'''.format(code_text))
def number_1():
return card("Basic Number", [number(12)], _hs)
def number_2():
return card("Number with style", [number(12, number_style={'font-size': '30px', 'margin': '15px', 'color': 'red'})], _hs)
def number_3():
return card("Number with header", [number(-12, number_style={'font-size': '30px', 'margin': '15px', 'color': 'green'}, header_text='Number')], _hs)
def number_4():
return card("Number with header/style/info", [number(-12, number_style={"color": 'coral'}, info='All', header_text='Number')], _hs)
def number_with_diff_1():
return card("Number Diff 1", [number_with_diff(10, 12, '+ impact fall')], _hs)
def number_with_diff_2():
return card("Number Diff 2", [number_with_diff(12, 10, '+ impact raise')], _hs)
def number_with_diff_3():
return card("Number Diff 3", [number_with_diff(10, 12, '- impact fall', is_positive_impact=False)], _hs)
def number_with_diff_4():
return card("Number Diff 4", [number_with_diff(12, 10, '- impact raise 1', is_positive_impact=False, header='Num 1'),
number_with_diff(15, 10, '- impact raise 1', is_positive_impact=False, header='Num 2')],
_hs)
def stats_using_df_1():
df = pd.DataFrame({'Metric': ['A', 'B', 'C'], 'T1': [3, 4, 8], 'T2': [4, 5, 7]})
return stats_from_df(df, 'T1', 'T2', 'Metric', header='Metric Header', header_style=_hs)
def stats_using_df_2():
df = pd.DataFrame({'Metric': ['A', 'B', 'C'], 'T1': [3, 4, 8], 'T2': [4, 5, 7]})
return stats_from_df(df, 'T1', 'T2', 'Metric', is_positive_impact=False, header='Metric Header', header_style=_hs)
def get_stats_with_plots_content():
content = [
[(stats_plot_1(), 6), (stats_plot_2(), 6)],
[(stats_plot_3(), 6), (stats_plot_4(), 6)],
]
result = list()
result.append(container(content, 'Number with plot'))
return result
def stats_plot_1():
df = pd.DataFrame({'name': ['A', 'B'], 'value': [3, 4]})
return card('Stats pie plot', [number_with_diff(3, 4, 'info A', header='A'),
number_with_diff(12, 14, 'info B', header='B'),
pie_plot(df, label='name', value='value')], _hs)
def stats_plot_2():
df = pd.DataFrame({'name': ['A', 'B'], 'value': [3, 4]})
return card('Stats bar plot', [number_with_diff(3, 4, 'info A', header='A'),
number_with_diff(12, 14, 'info B', header='B'),
bar_plot(df, x='name', y='value')], _hs)
def stats_plot_3():
df = pd.DataFrame({'name': ['A', 'B'], 'value': [3, 4]})
return card('Stats scatter plot', [number_with_diff(3, 4, 'info A', header='A'),
number_with_diff(12, 14, 'info B', header='B'),
scatter_plot(df, x='name', y='value')], _hs)
def stats_plot_4():
df = | pd.DataFrame({'name': ['A', 'B', 'A', 'B'], 'value': [3, 4, 5, 6], 'group': ['X', 'X', 'Y', 'Y']}) | pandas.DataFrame |
""" Fred Model """
__docformat__ = "numpy"
import logging
from typing import Dict, List, Tuple
import fred
import pandas as pd
import requests
from fredapi import Fred
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def check_series_id(series_id: str) -> Tuple[bool, Dict]:
"""Checks if series ID exists in fred
Parameters
----------
series_id: str
Series ID to check
Returns
-------
bool:
Boolean if series ID exists
dict:
Dictionary of series information
"""
url = f"https://api.stlouisfed.org/fred/series?series_id={series_id}&api_key={cfg.API_FRED_KEY}&file_type=json"
r = requests.get(url, headers={"User-Agent": get_user_agent()})
# The above returns 200 if series is found
# There seems to be an occasional bug giving a 503 response where the json decoding fails
if r.status_code >= 500:
return False, {}
return r.status_code == 200, r.json()
@log_start_end(log=logger)
def get_series_notes(series_term: str) -> pd.DataFrame:
"""Get Series notes. [Source: FRED]
Parameters
----------
series_term : str
Search for this series term
Returns
----------
pd.DataFrame
DataFrame of matched series
"""
fred.key(cfg.API_FRED_KEY)
d_series = fred.search(series_term)
if "seriess" not in d_series:
return pd.DataFrame()
if not d_series["seriess"]:
return pd.DataFrame()
df_fred = | pd.DataFrame(d_series["seriess"]) | pandas.DataFrame |
import pandas as pd
import os
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
root = '/Users/Gabe/Downloads/thesis spreadies'
# sg_1k_1k = pd.read_csv(os.path.join(root,'we_depletions_sg_SWHC1000_INIDEP1000_timeseries.csv'), parse_dates=True)
# sg_600_600 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC600_INIDEP600_timeseries.csv'), parse_dates=True)
# sg_600_300 = pd.read_csv(os.path.join(root,'we_depletions_sg_SWHC600_INIDEP300_timeseries.csv'), parse_dates=True)
# sg_600_150 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC600_INIDEP150_timeseries.csv'), parse_dates=True)
#
# sg_300_300 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC300_INIDEP300_timeseries.csv'), parse_dates=True)
# sg_300_150 = pd.read_csv(os.path.join(root,'we_depletions_sg_SWHC300_INIDEP150_timeseries.csv'), parse_dates=True)
# sg_300_0 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC300_INIDEP0_timeseries.csv'), parse_dates=True)
#
# sg_150_150 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC150_INIDEP150_timeseries.csv'), parse_dates=True)
# sg_150_75 = pd.read_csv(os.path.join(root,'we_depletions_sg_SWHC150_INIDEP75_timeseries.csv'), parse_dates=True)
# sg_150_0 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC150_INIDEP0_timeseries.csv'), parse_dates=True)
#
# print sg_1k_1k.head()
#
# vcm_600_600 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP600_timeseries.csv'), parse_dates=True)
# vcm_600_300 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP300_timeseries.csv'), parse_dates=True)
# vcm_600_150 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP150_timeseries.csv'), parse_dates=True)
# vcm_300_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP300.csv'), parse_dates=True)
# vcm_300_150 = pd.read_csv(os.path.join(root,'ext_we_depletions_vcm_SWHC300_INIDEP150.csv'), parse_dates=True)
# vcm_300_0 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP0.csv'), parse_dates=True)
# plt.plot([1,2,3], [3, 5,7])
# plt.show()
vcm_600_600 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC600_INIDEP600.csv'), parse_dates=True)
vcm_600_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC600_INIDEP300.csv'), parse_dates=True)
vcm_600_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC600_INIDEP0.csv'), parse_dates=True)
vcm_300_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP300.csv'), parse_dates=True)
vcm_300_150 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP150.csv'), parse_dates=True)
vcm_300_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP0.csv'), parse_dates=True)
sg_600_600 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC600_INIDEP600.csv'), parse_dates=True)
sg_600_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC600_INIDEP300.csv'), parse_dates=True)
sg_600_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC600_INIDEP0.csv'), parse_dates=True)
sg_300_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC300_INIDEP300.csv'), parse_dates=True)
sg_300_150 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC300_INIDEP150.csv'), parse_dates=True)
sg_300_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC300_INIDEP0.csv'), parse_dates=True)
sg_150_150 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC150_INIDEP150.csv'), parse_dates=True)
sg_150_075 = pd.read_csv(os.path.join(root,'ext_we_depletions_sg_SWHC150_INIDEP75.csv'), parse_dates=True)
sg_150_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC150_INIDEP0.csv'), parse_dates=True)
sg_50_050 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC50_INIDEP50.csv'), parse_dates=True)
sg_50_025 = pd.read_csv(os.path.join(root,'ext_we_depletions_sg_SWHC50_INIDEP25.csv'), parse_dates=True)
sg_50_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC50_INIDEP0.csv'), parse_dates=True)
vcm_150_150 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC150_INIDEP150.csv'), parse_dates=True)
vcm_150_075 = pd.read_csv(os.path.join(root,'ext_we_depletions_sg_SWHC150_INIDEP75.csv'), parse_dates=True)
vcm_150_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC150_INIDEP0.csv'), parse_dates=True)
# # plt.plot([1,2,3], [3, 5,7])
# # plt.show()
#
# vcm_600_600 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP600_timeseries.csv'), parse_dates=True)
# vcm_600_300 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP300_timeseries.csv'), parse_dates=True)
# vcm_600_150 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP150_timeseries.csv'), parse_dates=True)
#
# vcm_300_300 = pd.read_csv(os.path.join(root, 'we_depletions_vcm_SWHC300_INIDEP300_timeseries.csv'), parse_dates=True)
# vcm_300_150 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC300_INIDEP150_timeseries.csv'), parse_dates=True)
# vcm_300_0 = pd.read_csv(os.path.join(root, 'we_depletions_vcm_SWHC300_INIDEP0_timeseries.csv'), parse_dates=True)
# print(sg_600_600['date'])
#
# plt.plot(sg_600_150['date'], sg_600_150['depletion'], label='sg')
# # plt.grid()
# plt.legend()
# plt.show()
# # plt.savefig(os.path.join(root, 'testfig.png'))
years = mdates.YearLocator()
months = mdates.MonthLocator()
years_fmt = mdates.DateFormatter('%Y')
### ===== SG 50 ======
fig, (ax1, ax2) = plt.subplots(nrows=2, sharey=False, sharex=True)
ax1.plot(pd.to_datetime(sg_50_000['date']), sg_50_000['depletion'], color='r', label='swhc_50_inidep_000', linewidth=5)
ax1.plot(pd.to_datetime(sg_50_025['date']), sg_50_025['depletion'], color='b', label='swhc_50_inidep_025', linewidth=3)
ax1.plot(pd.to_datetime(sg_50_050['date']), sg_50_050['depletion'], color='g', label='swhc_50_inidep_050', linewidth=1)
ax1.set_xlabel('Date')
ax1.set_ylabel('Depletion (mm)')
ax1.set_title('Depletion with Given SWHC and Initial Depletion - Sevilleta')
ax1.legend()
ax1.grid()
ax2.plot(pd.to_datetime(sg_50_000['date']), sg_50_000['recharge_ro'], color='r', label='swhc_50_inidep_000', linewidth=3)
ax2.plot(pd.to_datetime(sg_50_025['date']), sg_50_025['recharge_ro'], color='b', label='swhc_50_inidep_025', linewidth=2)
ax2.plot(pd.to_datetime(sg_50_050['date']), sg_50_050['recharge_ro'], color='g', label='swhc_50_inidep_050', linewidth=1)
ax2.set_xlabel('Date')
ax2.set_ylabel('Recharge (mm)')
ax2.legend()
ax2.grid()
ax2.set_title('Recharge with Given SWHC and Initial Depletion - Sevilleta')
plt.subplots_adjust(hspace=1)
plt.show()
### ===== vcm 150 ======
fig, (ax1, ax2) = plt.subplots(nrows=2, sharey=False, sharex=True)
ax1.plot(pd.to_datetime(vcm_150_000['date']), vcm_150_000['depletion'], color='r', label='swhc_150_inidep_000', linewidth=5)
ax1.plot(pd.to_datetime(vcm_150_075['date']), vcm_150_075['depletion'], color='b', label='swhc_150_inidep_075', linewidth=3)
ax1.plot(pd.to_datetime(vcm_150_150['date']), vcm_150_150['depletion'], color='g', label='swhc_600_inidep_150', linewidth=1)
ax1.set_title('Depletion with Given SWHC and Initial Depletion - <NAME>')
ax1.grid()
ax1.legend()
ax2.plot(pd.to_datetime(vcm_150_000['date']), vcm_150_000['recharge_ro'], color='r', label='swhc_150_inidep_000', linewidth=5)
ax2.plot(pd.to_datetime(vcm_150_075['date']), vcm_150_075['recharge_ro'], color='b', label='swhc_150_inidep_075', linewidth=3)
ax2.plot(pd.to_datetime(vcm_150_150['date']), vcm_150_150['recharge_ro'], color='g', label='swhc_600_inidep_150', linewidth=1)
ax2.set_title('Depletion with Given SWHC and Initial Depletion - <NAME>')
ax2.grid()
ax2.legend()
plt.subplots_adjust(hspace=1)
plt.show()
### ===== SG 600 ======
fig, (ax1, ax2) = plt.subplots(nrows=2, sharey=False, sharex=True)
ax1.plot(pd.to_datetime(sg_600_000['date']), sg_600_000['depletion'], color='r', label='swhc_600_inidep_000', linewidth=5)
ax1.plot(pd.to_datetime(sg_600_300['date']), sg_600_300['depletion'], color='b', label='swhc_600_inidep_300', linewidth=3)
ax1.plot(pd.to_datetime(sg_600_600['date']), sg_600_600['depletion'], color='g', label='swhc_600_inidep_600', linewidth=1)
ax1.set_xlabel('Date')
ax1.set_ylabel('Depletion (mm)')
ax1.set_title('Depletion with Given SWHC and Initial Depletion - Sevilleta')
ax1.legend()
ax1.grid()
ax2.plot(pd.to_datetime(sg_600_000['date']), sg_600_000['recharge_ro'], color='r', label='swhc_600_inidep_000', linewidth=3)
ax2.plot(pd.to_datetime(sg_600_300['date']), sg_600_300['recharge_ro'], color='b', label='swhc_600_inidep_300', linewidth=2)
ax2.plot( | pd.to_datetime(sg_600_600['date']) | pandas.to_datetime |
import pandas as pd
import numpy as np
import pycountry_convert as pc
import pycountry
import os
from iso3166 import countries
PATH_AS_RELATIONSHIPS = '../Datasets/AS-relationships/20210701.as-rel2.txt'
NODE2VEC_EMBEDDINGS = '../Check_for_improvements/Embeddings/Node2Vec_embeddings.emb'
DEEPWALK_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/DeepWalk_128.csv'
DIFF2VEC_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/Diff2Vec_128.csv'
NETMF_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/NetMF_128.csv'
NODESKETCH_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/NodeSketch_128.csv'
WALKLETS_EMBEDDINGS_256 = '../Check_for_improvements/Embeddings/Walklets_256.csv'
NODE2VEC_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Node2Vec_embeddings.emb'
NODE2VEC_LOCAL_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Node2Vec_p2_64.csv'
NODE2VEC_GLOBAL_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Node2Vec_q2_64.csv'
DIFF2VEC_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Diff2Vec_64.csv'
NETMF_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/NetMF_64.csv'
NODESKETCH_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/NodeSketch_64.csv'
NODE2VEC_WL5_E3_LOCAL = '../Check_for_improvements/Embeddings/Node2Vec_64_wl5_ws2_ep3_local.csv'
NODE2VEC_WL5_E3_GLOBAL = '../Check_for_improvements/Embeddings/Node2Vec_64_wl5_ws2_ep3_global.csv'
NODE2VEC_64_WL5_E1_GLOBAL = '../Check_for_improvements/Embeddings/Node2Vec_64_wl5_ws2_global.csv'
BGP2VEC_64 = '../Check_for_improvements/Embeddings/Node2Vec_bgp2Vec.csv'
BGP2VEC_32 = '../Check_for_improvements/Embeddings/BGP2VEC_32'
WALKLETS_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/Walklets_128.csv'
STORE_CSV_TO_FOLDER = '../Embeddings_Visualization/StorePreprocessedEmb'
def country_flag(data):
"""
:param data: Contains a dataframe combining 3 datasets
:param list_alpha_2: Contains the 2-letter abbreviation from each country
:return: Matches the acronyms with the Fullname of the countries
"""
list_alpha_2 = [i.alpha2 for i in list(countries)]
if data['AS_rank_iso'] in list_alpha_2:
return pycountry.countries.get(alpha_2=data['AS_rank_iso']).name
else:
return 'Unknown Code'
def country_to_continent(country_name):
"""
This function takes as input a country name and returns the continent that the given country belongs.
:param country_name: Contains the name of a country
:return: The continent
"""
try:
country_alpha2 = pc.country_name_to_country_alpha2(country_name)
country_continent_code = pc.country_alpha2_to_continent_code(country_alpha2)
country_continent_name = pc.convert_continent_code_to_continent_name(country_continent_code)
return country_continent_name
except:
return np.nan
def convert_country_to_continent(data):
"""
The function converts iso = alpha_2 (example: US) to the whole name of the country. Needs (import iso3166)
:param data: Contains a dataframe combining 4 datasets
:return: The continent for each country
"""
data['AS_rank_iso'] = data.apply(country_flag, axis=1)
temp_list = []
for i in range(0, len(data)):
temp_list.append(country_to_continent(data['AS_rank_iso'][i]))
df = pd.DataFrame(temp_list, columns=['AS_rank_iso'])
data['AS_rank_iso'] = df['AS_rank_iso']
return data['AS_rank_iso']
def merge_datasets(final_df, embeddings_df):
"""
:param final_df: Its the dataset that is generated in Analysis/aggregate_data folder
:param embeddings_df: Contains pretrained embeddings
:return: A new merged dataset (containing improvement_score and the embedding of each ASN)
"""
print(final_df['ASN'].isin(embeddings_df['ASN']).value_counts())
mergedStuff = pd.merge(embeddings_df, final_df, on=['ASN'], how='left')
mergedStuff.replace('', np.nan, inplace=True)
return mergedStuff
def get_path_and_filename(model, dimensions):
"""
:param model: The model's name
:param dimensions: The number of dimensions of the given model
:return: The path where the script will be stored and its name
"""
file_name = 'Preprocessed' + str(model) + str(dimensions) + f'.csv'
outdir = STORE_CSV_TO_FOLDER
if not os.path.exists(outdir):
os.mkdir(outdir)
full_name = os.path.join(outdir, file_name)
return full_name
def read_Node2Vec_embeddings_file():
"""
:return: A dataframe containing the ASNs and the embeddings of each ASn created based on Node2Vec algorithm.
"""
emb_df = pd.read_table(NODE2VEC_EMBEDDINGS, skiprows=1, header=None, sep=" ")
# name the columns
rng = range(0, 65)
new_cols = ['dim_' + str(i) for i in rng]
emb_df.columns = new_cols
# rename first column
emb_df.rename(columns={'dim_0': 'ASN'}, inplace=True)
return emb_df
def read_karateClub_embeddings_file(emb, dimensions):
"""
Karateclub library requires nodes to be named with consecutive Integer numbers. In the end gives as an output
containing the embeddings in ascending order. So in this function we need to reassign each ASN to its own embedding.
:param emb: A dataset containing pretrained embeddings
:param dimensions: The dimensions of the given dataset
:return: A dataframe containing pretrained embeddings
"""
if dimensions == 64:
if emb == 'Diff2Vec':
df = pd.read_csv(DIFF2VEC_EMBEDDINGS_64, sep=',')
elif emb == 'NetMF':
df = pd.read_csv(NETMF_EMBEDDINGS_64, sep=',')
elif emb == 'NodeSketch':
df = pd.read_csv(NODESKETCH_EMBEDDINGS_64, sep=',')
elif emb == 'Walklets':
df = pd.read_csv(WALKLETS_EMBEDDINGS_128, sep=',')
elif emb == 'Node2Vec_Local':
df = pd.read_csv(NODE2VEC_LOCAL_EMBEDDINGS_64, sep=',')
elif emb == 'Node2Vec_Global':
df = pd.read_csv(NODE2VEC_GLOBAL_EMBEDDINGS_64, sep=',')
elif emb == 'Node2Vec_wl5_global':
df = pd.read_csv(NODE2VEC_64_WL5_E1_GLOBAL, sep=',')
elif emb == 'Node2Vec_wl5_e3_global':
df = | pd.read_csv(NODE2VEC_WL5_E3_GLOBAL, sep=',') | pandas.read_csv |
import sys
import pandas as pd
import numpy as np
from random import getrandbits
from collections import OrderedDict
from argparse import ArgumentParser
from datetime import datetime
import ruamel.yaml as yaml
from faker import Factory
from faker.providers.date_time import Provider as date_provider
from faker.providers.currency import Provider as currency_provider
from dateutil import parser
class ConfigurationException(Exception):
def __init__(self, message, conf_file):
super(ConfigurationException, self).__init__("Error while parsing configuration %s: %s" %
(conf_file if conf_file is not None else 'None', message))
self.conf_file = conf_file
def _get_param(_params, name, default=None):
if name is None:
return None
value = _params.get(name) if _params else None
return value if value is not None else default
def _get_dist_param(column, default=None):
_params = _get_param(column, 'params')
if not _params:
return None
_params = _get_param(_params, 'distribution')
if not _params and default is not None:
return {'type': default}
return _params
def _generate_range(column, min_, max_, dtype, length, conf_file=None):
if min_ is None and max_ is None:
return
_params = _get_param(column, 'params')
_from = _get_param(_params, 'from', min_)
_to = _get_param(_params, 'to', max_)
if (min_ is not None and (_from < min_ or _to < min_)) or (max_ is not None and (_from > max_ or _to > max_)):
raise ConfigurationException('Range must be between %d and %d' % (min_, max_), conf_file)
if _from > _to:
raise ConfigurationException("Invalid range: 'from' value must be less or equal to 'to' value", conf_file)
return np.random.randint(low=_from, high=_to, dtype=dtype, size=length)
def _generate_gaussian(column, dtype, length):
if dtype is not None and np.issubdtype(dtype, np.number):
_params = _get_param(column, 'params')
if _params:
_min = _get_param(_params, 'min')
_max = _get_param(_params, 'max')
if np.issubdtype(dtype, np.integer):
_min = _min if _min is not None else np.iinfo(dtype).min
_max = _max if _max is not None else np.iinfo(dtype).max
elif np.issubdtype(dtype, np.float):
_min = _min if _min is not None else np.finfo(dtype).min
_max = _max if _max is not None else np.finfo(dtype).max
else:
_min = _min if _min is not None else 0
_max = _max if _max is not None else 0
_dist = _get_dist_param(column, 'Gaussian')
_dist = _get_param(_dist, 'type') if _dist else None
if _dist is not None and _dist.lower() == 'gaussian':
if np.issubdtype(dtype, np.integer):
return np.random.randint(low=_min, high=_max, dtype=dtype, size=length)
else:
values = _min + np.random.rand(length, 1) * (_max - _min)
return values.flatten()
return None
def _generate_text(column, length, func):
_params = _get_param(column, 'params')
if _params:
_locale = _get_param(_params, 'locale')
fake = Factory.create(_locale) if _locale else Factory.create()
if _params:
_list = _get_param(_params, 'list')
if not _list:
_count = _get_param(_params, 'count')
if _count is not None:
_list = [getattr(fake, func)() for i in range(_count)]
else:
_list = [getattr(fake, func)().encode(encoding='UTF-8') for i in range(length)]
else:
fake = Factory.create()
_list = [getattr(fake, func)().encode(encoding='UTF-8') for i in range(length)]
return [_list[i] for i in np.random.randint(low=0, high=len(_list), size=length)]
def _generate_distribution(column, length, dtype=None):
_params = _get_dist_param(column)
if _params:
_dist = _get_param(_params, 'type')
if _dist is None:
return
_params = _get_param(_params, 'params')
if _dist.lower() == 'gaussian':
_mean = _get_param(_params, 'mean')
_sigma = _get_param(_params, 'sigma')
if _mean is None and _sigma is None:
return _generate_gaussian(column, dtype, length)
else:
return np.random.normal(loc=_mean if _mean is not None else 0,
scale=_sigma if _sigma is not None else 1.0, size=length)
elif _dist.lower() == 'lognormal':
_mean = _get_param(_params, 'mean')
_sigma = _get_param(_params, 'sigma')
return np.random.lognormal(mean=_mean if _mean is not None else 0,
sigma=_sigma if _sigma is not None else 1.0, size=length)
elif _dist.lower() == 'poisson':
_lambda = _get_param(_params, 'lambda')
return np.random.poisson(lam=_lambda, size=length)
elif _dist.lower() == 'beta':
_a = _get_param(_params, 'a')
_b = _get_param(_params, 'b')
return np.random.beta(a=_a, b=_b, size=length)
elif _dist.lower() == 'binomial':
_n = _get_param(_params, 'n')
_p = _get_param(_params, 'p')
return np.random.binomial(n=_n, p=_p, size=length)
elif _dist.lower() == 'gamma':
_gamma = _get_param(_params, 'gamma')
_scale = _get_param(_params, 'scale')
return np.random.gamma(shape=_gamma, scale=_scale if _scale is not None else 0, size=length)
elif _dist.lower() == 'uniform':
_low = _get_param(_params, 'low')
_high = _get_param(_params, 'high')
return np.random.uniform(low=_low, high=_high, size=length)
elif _dist.lower() == 'chi-square':
_df = _get_param(_params, 'df')
return np.random.chisquare(df=_df, size=length)
elif _dist.lower() == 'weibull':
_a = _get_param(_params, 'a')
return np.random.weibull(a=_a, size=length)
elif _dist.lower() == 'triangular':
_left = _get_param(_params, 'left')
_mode = _get_param(_params, 'mode')
_right = _get_param(_params, 'right')
return np.random.triangular(left=_left, mode=_mode, right=_right, size=length)
elif dtype is not None:
return _generate_gaussian(column, dtype, length)
def _validate_configuration(conf_file):
def check_range(a, value, range, _label, conf_file):
if range is not None:
a_min, a_max = range
if value < a_min or value > a_max:
raise ConfigurationException("Feature '{3}': '{0}' parameter must be in range [{1}; {2}]"
.format(a, a_min, a_max, _label), conf_file)
def check_params(_params, a, b, _str, a_optional=False, b_optional=False,
a_range=None, b_range=None, check_greater=False, check_positive=True):
if _params is None:
return
_params = _get_param(_params, 'params')
if _params is None:
return
_a = _get_param(_params, a)
_b = _get_param(_params, b)
if not a_optional and _a is None:
raise ConfigurationException("Feature '{2}': '{0}' parameter must be set for {1}"
.format(a, _str, _label), conf_file)
if not b_optional and _b is None:
raise ConfigurationException("Feature '{2}': '{0}' parameter must be set for {1}"
.format(b, _str, _label), conf_file)
if check_positive and ((_a is not None and _a < 0) or (_b is not None and _b < 0)):
raise ConfigurationException("Feature '{3}': '{0}' and '{1}' parameters for {2} must be positive"
.format(a, b, _str, _label), conf_file)
check_range(a, _a, a_range, _label, conf_file)
check_range(b, _b, b_range, _label, conf_file)
if check_greater and _a > _b:
raise ConfigurationException("Feature '{2}': value of '{0}' parameter must be less than value of '{1}'"
.format(a, b, _label), conf_file)
def check_single_param(_params, a, _str, a_optional=False, check_positive=True):
if _params is None:
return
_params = _get_param(_params, 'params')
if _params is None:
return
_a = _get_param(_params, a)
if not a_optional and _a is None:
raise ConfigurationException("Feature '{2}': '{0}' parameter must be set for {1}"
.format(a, _str, _label), conf_file)
if check_positive and (_a is not None and _a < 0):
raise ConfigurationException("Feature '{2}': '{0}' parameter for {1} must be positive"
.format(a, _str, _label), conf_file)
with open(conf_file, 'r') as stream:
try:
conf = yaml.safe_load(stream)
except yaml.YAMLError as exc:
raise ConfigurationException(exc.message, conf_file)
columns = _get_param(conf, 'columns')
if not columns:
raise ConfigurationException('No columns are defined in the configuration file', conf_file)
for column in columns:
_label = _get_param(column, 'name')
_type = _get_param(column, 'type')
_dist_params = _get_dist_param(column)
# If distribution is set then no need to define type (float as default)
if not _dist_params:
try:
_type_num = np.issubdtype(np.dtype(_type).type, np.number)
except TypeError as ex:
_type_num = False
if not _type:
raise ConfigurationException('Type is not set for {0}:'.format(_label), conf_file)
if _type not in ['day', 'month', 'weekday', 'year', 'date', 'time',
'name', 'country', 'city', 'company', 'currency', 'boolean'] \
and not _type_num:
raise ConfigurationException('Invalid type for %s:' % _label, conf_file)
if _type == 'date':
_params = _get_param(column, 'params')
_to = parser.parse(_params.get('to')) if _params and _params.get('to') is not None else datetime.now()
_from = parser.parse(_params.get('from')) if _params and _params.get('from') is not None else _to
if _from > _to:
raise ConfigurationException('Invalid date range for {0}: [{1}; {2}]'.format(_from, _to, _label), conf_file)
else:
_params = _get_param(column, 'params')
if not _params:
continue
if not None in [_params.get('min'), _params.get('max')]:
_from = _get_param(_params, 'min')
_to = _get_param(_params, 'max')
if _from > _to:
raise ConfigurationException('Invalid numeric range for {0}: [{1}; {2}]'.format(_from, _to, _label), conf_file)
if _params.get('distribution') is not None:
_dist = _get_param(_dist_params, 'type')
if _dist is None:
raise ConfigurationException('Distribution type is not set for {0}:'.format(_label), conf_file)
if _dist.lower() == 'beta':
check_params(_dist_params, 'a', 'b', 'beta distribution')
elif _dist.lower() == 'binomial':
check_params(_dist_params, 'n', 'p', 'binomial distribution', b_range=(0, 1))
elif _dist.lower() == 'gamma':
check_params(_dist_params, 'gamma', 'scale', 'Gamma distribution')
elif _dist.lower() == 'uniform':
check_params(_dist_params, 'low', 'high', 'uniform distribution', check_greater=True)
elif _dist.lower() == 'chi-square':
check_single_param(_dist_params, 'df', 'chi-square distribution')
elif _dist.lower() == 'poisson':
check_single_param(_dist_params, 'lambda', 'Poisson distribution')
elif _dist.lower() == 'weibull':
check_single_param(_dist_params, 'a', 'Weibull distribution')
elif _dist.lower() == 'lognormal':
check_params(_dist_params, 'mean', 'sigma', 'lognormal distribution')
elif _dist.lower() == 'triangular':
check_params(_dist_params, 'left', 'mode', 'triangular distribution',
check_greater=True, check_positive=False)
check_params(_dist_params, 'mode', 'right', 'triangular distribution',
check_greater=True, check_positive=False)
def generate_pandas(conf_file):
_validate_configuration(conf_file)
with open(conf_file, 'r') as stream:
try:
conf = yaml.safe_load(stream)
except yaml.YAMLError as exc:
raise ConfigurationException(exc.message, conf_file)
length = _get_param(conf, 'length', 0)
columns = _get_param(conf, 'columns')
data = OrderedDict()
for column in columns:
_label = _get_param(column, 'name')
_type = _get_param(column, 'type')
if not _label or (not _type and not _get_dist_param(column)):
continue
if _type == 'day':
data[_label] = _generate_range(column, 1, 31, np.uint8, length, conf_file)
elif _type == 'month':
data[_label] = _generate_range(column, 1, 12, np.uint8, length, conf_file)
elif _type == 'weekday':
data[_label] = [date_provider.day_of_week() for i in range(length)]
elif _type == 'year':
data[_label] = _generate_range(column, datetime.min.year, datetime.now().year, np.uint16, length, conf_file)
elif _type == 'date':
_params = _get_param(column, 'params')
_to = parser.parse(_params['to']) if _params and _params.get('to') is not None else datetime.now()
_from = parser.parse(_params['from']) if _params and _params.get('from') is not None else _to
_pattern = _get_param(_params, 'pattern', '%Y-%m-%d')
data[_label] = [date_provider.date_time_between_dates(_from, _to).strftime(_pattern)
for i in range(length)]
elif _type == 'time':
_params = column['params']
_pattern = _get_param(_params, 'pattern', '%H:%M:%S')
data[_label] = [date_provider.time(pattern=_pattern) for i in range(length)]
elif _type == 'currency':
_params = _get_param(column, 'params')
if _params:
_list = _get_param(_params, 'list')
if not _list:
_count = _get_param(_params, 'count')
if _count:
_list = [currency_provider.currency_code() for i in range(_count)]
_items = [_list[i] for i in np.random.randint(low=0, high=len(_list), size=length)]
else:
_items = [currency_provider.currency_code() for i in range(length)]
data[_label] = _items
elif _type == 'name':
data[_label] = _generate_text(column, length, 'name')
elif _type == 'country':
_params = _get_param(column, '_params')
if _params and _get_param(_params, 'code') is not None:
if _get_param(_params, 'code') == True:
data[_label] = _generate_text(column, length, 'country_code')
else:
data[_label] = _generate_text(column, length, 'country')
else:
data[_label] = _generate_text(column, length, 'country')
elif _type == 'city':
data[_label] = _generate_text(column, length, 'city')
elif _type == 'company':
data[_label] = _generate_text(column, length, 'company')
elif _type == 'boolean':
_items = [getrandbits(1) for i in range(length)]
_params = _get_param(column, 'params')
_as_int = _get_param(_params, 'as_int')
if _as_int is not None and _as_int:
pass
else:
_items = [bool(item) for item in _items]
data[_label] = _items
else:
if _type is not None:
try:
_type = np.dtype(_type)
except TypeError as ex:
_type = np.float16
else:
_type = np.float16
series = _generate_distribution(column, length, _type)
if series is not None:
data[_label] = series
return | pd.DataFrame(data) | pandas.DataFrame |
import streamlit as st
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
from nltk.tokenize import sent_tokenize
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
'''
# BERTerReads
---
'''
@st.cache(allow_output_mutation=True)
def load_model():
'''
Function to load (and cache) DistilBERT model
'''
model = SentenceTransformer('distilbert-base-nli-stsb-mean-tokens')
return model
@st.cache(allow_output_mutation=True)
def get_reviews(url):
'''
Function to scrape all the reviews from the first page of a GoodReads book URL
'''
# Download & soupify webpage
r = requests.get(url)
soup = BeautifulSoup(r.content, features='html.parser')
# Find all review text blocks
reviews_src = soup.find_all('div', class_='reviewText stacked')
# Initialize list to store cleaned review text
reviews = []
# Loop through each review text block
for review in reviews_src:
# Extract review text
try:
text = review.find('span', style='display:none').get_text(' ', strip=True)
except:
text = review.get_text(' ', strip=True)
# Remove spoiler tags from review text
text = re.sub(r'\(view spoiler\) \[', '', text)
text = re.sub(r'\(hide spoiler\) \] ', '', text)
# Append review text to list
reviews.append(text)
# Transform review list to dataframe
df = pd.DataFrame(reviews, columns=['review'])
return df
@st.cache
def clean_reviews(df):
'''
Function to clean review text and divide into individual sentences
'''
# Append space to all sentence end characters
df['review'] = df['review'].str.replace('.', '. ').replace('!', '! ').replace('?', '? ')
# Initialize dataframe to store review sentences
sentences_df = pd.DataFrame()
# Loop through each review
for i in range(len(df)):
# Save review to variable
review = df.iloc[i]['review']
# Tokenize review into sentences
sentences = sent_tokenize(review)
# Transform sentences into dataframe
new_sentences = pd.DataFrame(sentences, columns=['sentence'])
# Add sentences to sentences dataframe
sentences_df = sentences_df.append(new_sentences, ignore_index=True)
# Set lower and upper thresholds for sentence word count
lower_thresh = 5
upper_thresh = 50
# Remove whitespaces at the start and end of sentences
sentences_df['sentence'] = sentences_df['sentence'].str.strip()
# Create list of sentence lengths
sentence_lengths = sentences_df['sentence'].str.split(' ').map(len)
# Filter sentences
sentences_df = sentences_df[
(sentence_lengths > lower_thresh) & (sentence_lengths < upper_thresh)]
sentences_df.reset_index(drop=True, inplace=True)
return sentences_df['sentence']
@st.cache
def embed_sentences(sentences):
'''
Function to transform sentences into vectors
'''
sentence_vectors = model.encode(sentences)
return sentence_vectors
@st.cache
def get_opinions(sentences, sentence_vectors, k=3, n=1):
'''
Function to extract the n most representative sentences from k clusters, with density scores
'''
# Instantiate the model
kmeans_model = KMeans(n_clusters=k, random_state=24)
# Fit the model
kmeans_model.fit(sentence_vectors);
# Set the number of cluster centre points to look at when calculating density score
centre_points = int(len(sentences) * 0.02)
# Initialize list to store mean inner product value for each cluster
cluster_density_scores = []
# Initialize dataframe to store cluster centre sentences
df = pd.DataFrame()
# Loop through number of clusters
for i in range(k):
# Define cluster centre
centre = kmeans_model.cluster_centers_[i]
# Calculate inner product of cluster centre and sentence vectors
ips = np.inner(centre, sentence_vectors)
# Find the sentences with the highest inner products
top_index = pd.Series(ips).nlargest(n).index
top_sentence = sentences[top_index].iloc[0]
centre_ips = | pd.Series(ips) | pandas.Series |
import pandas as pd
class RecHash:
def __init__(self):
# Combinations of header labels
self.base = ['Rk', 'Date', 'G#', 'Age', 'Tm', 'Home', 'Opp', 'Result', 'GS']
self.receiving = ['Rec_Tgt', 'Rec_Rec', 'Rec_Yds', 'Rec_Y/R', 'Rec_TD', 'Rec_Ctch%', 'Rec_Y/Tgt']
self.rushing = ['rush_att', 'rush_yds', 'rush_Y/A', 'rush_TD']
self.passing = ['pass_cmp', 'pass_att', 'Cmp%', 'pass_yds', 'pass_td', 'Int', 'Rate', 'Sk', 'Sk-Yds',
'pass_Y/A', 'AY/A']
self.rush_sk = ['rush_sk', 'tkl', 'Ast']
self.scoring2p = ['2pt']
self.scoring = ['Any_TD', 'Any_Pts']
self.punting = ['Pnt', 'Pnt_Yds', 'Y/P', 'Blck']
self.kick_rt = ['Kick_Rt', 'Kick_RtYds', 'Y/Rt', 'Kick_TD']
self.punt_rt = ['Pnt_rt', 'Pnt_Yds', 'Y/Pnt', 'Pnt_TD']
def md5b3c4237d9a10de8cfaad61852cb552c4(self, df):
# Rename columns
df.columns = self.base + self.receiving + self.rushing + self.kick_rt + self.punt_rt + self.scoring + self.rush_sk
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.scoring2p)], axis=1)
# set all the new columns to zero
df.loc[:, self.scoring2p] = 0
return df
def md5bcb96297b50fb2120f475e8e05fbabcd(self,df):
# Rename columns
df.columns = self.base + self.receiving + self.rushing + self.passing + self.kick_rt + self.punt_rt + self.scoring2p + self.scoring
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.rush_sk)], axis=1)
# set all the new columns to zero
df.loc[:, self.rush_sk] = 0
return df
def md54560c290b45e942c16cc6d7811345fce(self,df):
# Rename columns
df.columns = self.base + self.receiving + self.rushing + self.passing + self.punt_rt + self.scoring2p + self.scoring
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.kick_rt)], axis=1)
# set all the new columns to zero
df.loc[:, self.kick_rt] = 0
return df
def md54c82a489ec5b2c943e78c9018dcbbca1(self, df):
# Rename columns
df.columns = self.base + self.receiving + self.rushing + self.passing + self.punt_rt + self.scoring
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.kick_rt + self.scoring2p)], axis=1)
# set all the new columns to zero
df.loc[:, self.kick_rt + self.scoring2p] = 0
return df
def md5e8ffc7202223bb253e92da83b76e9944(self, df):
# Rename columns
df.columns = self.base + self.receiving + self.rushing + self.passing + self.punt_rt + self.scoring + self.rush_sk
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.kick_rt + self.scoring2p)], axis=1)
# set all the new columns to zero
df.loc[:, self.kick_rt + self.scoring2p] = 0
return df
def md550fcceaa170b1a1e501e3f40548e403d(self, df):
# Rename columns
df.columns = self.base + self.receiving + self.rushing + self.kick_rt + self.punt_rt + self.scoring
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.scoring2p)], axis=1)
# set all the new columns to zero
df.loc[:, self.scoring2p] = 0
return df
def md5e160e714b29305ecfecf513cbf84b80f(self, df):
# Rename columns
df.columns = self.base + self.receiving + self.rushing + self.punt_rt + self.scoring
# add missing cols
df = pd.concat([df, | pd.DataFrame(columns=self.kick_rt + self.scoring2p) | pandas.DataFrame |
from __future__ import division, unicode_literals, print_function # for compatibility with Python 2 and 3
import numpy as np
import pandas as pd
import pims
import trackpy as tp
import ipywidgets as widgets
import matplotlib as mpl
import matplotlib.pyplot as plt
from taxispy.detect_peaks import detect_peaks
import math
import random
import time
import subprocess
import platform
from io import BytesIO
import os
from matplotlib.backends.backend_agg import FigureCanvasAgg
from IPython.display import display
from deap import base, creator, tools, algorithms
__author__ = "<NAME>, https://github.com/m1vg"
__version__ = "0.1.6.3"
__license__ = "MIT"
mpl.rc('image', cmap='gray')
Box = widgets.Box
VBox = widgets.VBox
Accordion = widgets.Accordion
Text = widgets.Text
IntText = widgets.IntText
FloatText = widgets.FloatText
Button = widgets.Button
Toggle = widgets.ToggleButton
HTML = widgets.HTML
IntSlider = widgets.IntSlider
Range = widgets.IntRangeSlider
FloatRange = widgets.FloatRangeSlider
Image = widgets.Image
HBox = widgets.HBox
Dropdown = widgets.Dropdown
Label = widgets.Label
Checkbox = widgets.Checkbox
# initialize the user interface.
class UserInterface(object):
def __init__(self):
self.path = []
self.frames_second = []
self.pixels_micron = []
self.fig_size = [5, 5]
self.frames =[]
self.f = []
self.t_i = []
self.slider = None
self.play = None
self.frame_range = None
self.vel = None
self.angular = None
self.ensemble_particle_id_list = None
self.counter = 0
self.interactive_ranges = False
self.vel_ensemble = None
self.angular_ensemble = None
self.trajectories_dic = None
self.cut_button = None
self.max_displacement = None
self.ax1_ind = None
self.ax2_ind = None
self.ax3_ind = None
self.ax4_ind = None
self.fig_individual = None
self.acc_vel_ensemble = None
self.av_vel_ensemble = None
self.av_vel = None
self.av_angular = None
self.av_acc_vel = None
self.peaks_table = None
# options controlling behaviour of genetic algorithm
self.generations = None
self.population = None
self.parallel = None
self.optimal_objective_function = None
self.weights_obj1 = None
self.weights_obj2 = None
self.individuals_bounds = None
# options controlling the behavior of adaptation curve.
self.single_trajectories = False
self.adaptation_curve_data = None
self.adaptation_curve_smooth_data = None
self.show_id = False
self.displacement_3_points = None
self.max_displacement_3_points = None
self.acc_vel = None
self.acc_angular = None
self.optimal_parameter_set = [4, 3, 10]
# First, create four boxes, each for one of the four sections.
self.box0 = VBox()
self.box1 = VBox()
self.box2 = VBox()
self.box3 = VBox()
self.box3_1 = VBox()
self.box4 = VBox()
self.box5 = VBox()
self.box6 = VBox()
self.peak_frame = None
self.peak_values = None
self.peak_height = None
self.excel_unfiltered = None
self.excel_filtered = None
self.lock1 = True
# Now, create accordion
self.interface = Accordion(children=[self.box1, self.box2, self.box3, self.box3_1, self.box4, self.box5])
title_list = ['File', 'Feature Identification', 'Trajectories',
'Visualization', 'Parameter Determination', 'Tumbling Frequencies']
for idx, val in enumerate(title_list):
self.interface.set_title(idx, val)
self.populate_first_window()
self.populate_second_window()
self.populate_third_window()
self.populate_fourth_window()
self.populate_fifth_window()
def populate_first_window(self):
# ###################### now lets start constructing the first box.
legend = HTML(value='<style>div.a {line-height: normal;}</style>''<div class="a">'
'Introduce the location of the folder containing frames to be analyzed in the '
'<b>Path</b> field. Then, click the <b>Load Frames</b> button. If necessary, use the '
'<b>Frames</b> slider to adjust the range of frames to be analyzed, '
'then click on <b>Cut Frames</b>.'
' Adjust numerical values for <b>Frames/s</b> and <b>Pixels/Micron</b> if necessary.'
'Then, click on <b>Process Data</b>. '
'<br><br></div>')
path = Text(description='Path',
placeholder='/Users/Figures ..',
value='/Documents/'
)
frames_second = FloatText(description='Frames/s',
value=20.3)
pixels_micron = FloatText(description='Pixels/Micron',
value=6.0)
load_data = Button(description='Process Data')
load_data.path = path
load_data.frames_second = frames_second
load_data.pixels_micron = pixels_micron
self.pixels_micron = pixels_micron
self.frames_second = frames_second
load_data.on_click(self.load_data_function)
frame_segment = Range(description='Frames', min=0, max=1000, step=1, value=[0, 100])
load_button = Button(description='Load Frames')
load_button.path = path
self.cut_button = Button(description='Cut Frames', disabled=True)
self.cut_button.on_click(self.cut_frames)
load_button.on_click(self.update_frame_segment)
self.box1.children = [legend, path, load_button, frame_segment, self.cut_button,
frames_second, pixels_micron, load_data]
def populate_second_window(self):
# ####################### now let's construct second box
legend0 = HTML(value='<style>div.a {line-height: normal;}</style>'
'<div class="a">Please select adequate values for the following parameters:<br><br></div>')
legend1 = HTML(value='<style>div.a {line-height: normal;}</style>'
'<div class="a"> <br><b>Diameter</b> is given in pixels and its value should be odd. It '
'refers to the diameter of the particles to be identified by the software. '
' <b>Min. Mass</b> '
'refers to the minimal mass (brightness) particles should have in order to be considered. '
'<b>Diameter</b> and <b>Min. Mass</b> are related. <b>Invert</b> '
'refers to the color pattern. Use if cells are represented by black objects in original '
'raw frames. '
'Adequate values for minimal mass can be extracted from the histograms below. '
'The first, an intermediate, and the last frames are characterized '
'by each of the three columns shown below. Histograms are for the mass of the particles,'
' blue circles are cells identified by '
'the software. If cells (bright objects) are not identified properly, adjust '
'parameter values. To continue, click on <b>Calculate Trajectories</b>: <br><br>'
'</div>')
diameter = IntSlider(description='Diameter', value=25, min=1, max=99, step=2, continuous_update=False)
diameter.observe(handler=self.update_hist_frames, names='value')
self.diameter = diameter
invert = Toggle(value=True,
description='Invert?'
)
invert.observe(handler=self.update_hist_frames, names='value')
self.invert = invert
min_mass = IntSlider(description='Min. Mass', value=2000, min=0, max=5000, continuous_update=False)
self.min_mass = min_mass
self.min_mass.observe(handler=self.update_hist_frames, names='value')
self.mass_histogram_box = Box()
self.frames_container = Box()
controllers = HBox()
controllers.children = [diameter, min_mass, invert]
button_calculate_trajectories = Button(description='Calculate Trajectories')
button_calculate_trajectories.on_click(self.refresh_trajectories_ensemble)
self.box2.children = [legend0, controllers, legend1, button_calculate_trajectories,
VBox([self.frames_container, self.mass_histogram_box])]
def populate_third_window(self):
# ####################### now let's construct third box
legend4 = HTML(value='<style>div.a {line-height: normal;}</style>'
'<div class="a">The trajectories shown below are calculated using following parameters:'
' </div>'
)
legend4_1 = HTML(value ='<style>div.a {line-height: normal;}</style>'
'<div class="a">'
'<b>Max. Disp.</b> refers to the maximum displacement (in pixels) '
'allowed for a cell to move between frames. <b> Min. # Frms</b> refers to '
'the minimum number of frames that a trajectory should have to be considered. '
' Please change values as required and click on <b>Recalculate</b> '
'to refresh results. '
'The number of trajectories shown in the plot on the right panel can be '
'reduced by increasing the displacement threshold (<b>Disp. Thrshld.</b>). '
'This threshold can be set to values '
'between 0 and 100% of the maximum displacement of all particles. '
'Trajectories shown exhibit a displacement that equals or surpasses the threshold set. '
'Alternatively, trajectories can be filtered by adjusting the frame range '
'(<b>Frame Rng</b>). </div>'
)
legend5 = HTML(value='')
max_displacement = IntSlider(value=self.diameter.value, description='Max. Disp.', min=0,
max=self.diameter.value*5, continuous_update=False, step=1)
self.max_displacement = max_displacement
memory = IntSlider(value=0, description='Memory', min=0, max=0, continuous_update=False, step=1)
self.memory = memory
number_frames = IntSlider(value=20, description='Min. # Frms', min=0, max=40, step=1)
self.number_frames = number_frames
self.box_trajectories_ensemble1 = Box()
self.box_trajectories_ensemble2 = Box()
self.box_trajectories_ensemble = HBox(children=[self.box_trajectories_ensemble1, self.box_trajectories_ensemble2])
controllers2 = VBox()
controllers2.children = [max_displacement, self.number_frames]
controllers3 = HBox()
controller_displ = IntSlider(value=0, description='Disp. Thrshld', min=0,
max=100, continuous_update=False, step=1)
controller_time_frame = widgets.IntRangeSlider(value=[0, 10], min=0, max=10.0, step=1,
description='Frame Rng:', disabled=False,
continuous_update=False, orientation='horizontal', readout=True)
controller_displ.observe(handler=self.filter_initial_trajectories, type='change', names='value')
controllers3.children = [controller_displ, controller_time_frame]
recalculate = Button(description='Recalculate')
recalculate.on_click(self.recalculate_link)
button_box =HBox(children=[recalculate])
self.legend6 = HTML()
self.box3.controller_time_frame = controller_time_frame
self.box3.controller_displ = controller_displ
self.box3.children = [legend4, controllers2, legend4_1, controllers3, self.box_trajectories_ensemble,
self.legend6, button_box, legend5]
def populate_fourth_window(self):
# ####################### now let's construct 3.1 Box. Visualization of a certain particle.
self.legend3_1 = HTML(value = '<style>div.a {line-height: normal;}</style>'
'<div class="a">'
'Cell trajectories identified by the software can be visualized in this window.'
' Select a trajectory from the drop-down menu and press the play button.'
'<br /><br /></div>'
)
self.trajectories_menu = Dropdown(description='Trajectory')
self.trajectories_menu.observe(handler=self.update_video_parameters, type='change', names='value')
self.video_wid = widgets.Image()
# ####################### now let's construct fourth box
ensemble = HTML('<b>Automatic Parameter Identification Using a Genetic Algorithm</b>')
description = HTML('<style>div.a {line-height: normal;}</style><div class="a">'
'In this section, key parameters for event identification, i.e., '
'# Frames, # Smooth, and Acc. Thrhld, can be automatically identified using '
'an optimization routine. Key parameters are identified by minimizing the difference'
' between the estimated and the real number of change of direction for a given set of '
' trajectories. To populate the training set, first provide the number of trajectories '
'by adjusting the <b> # Trajectories </b> slider, then click on <b>Populate</b>. A randomly '
'selected training set will appear. Update this list by providing the trajectory ID and its '
'observed number of change of direction. Alternatively, provide the name of an Excel '
'file containing two columns, one for the trajectory ID and one for its respective change '
'of direction. The headers of these columns should be "Trajectory" and "Tumbles", '
'respectively. Once the training set has been loaded from an excel file or manually typed, '
'click on <b>Estimate Parameters</b>. Please note that this step is computationally intensive'
' and might take several minutes to complete. After the optimization routine is done, '
'the button <b>Show Parameters</b> will appear and you can continue to the '
'<b>Tumbling Frequencies</b> tab. '
'<br /><br /></div>')
individual = HTML('<b>Analysis of Individual Trajectories</b>')
description_individual = HTML('Select one trajectory from the list to generate velocity plots.'
' Time can be adjusted by changing <b>Time Range</b>.')
self.individual_metrix_box = Box()
self.individual_controllers_box = VBox()
training_controller_box = VBox([HBox([IntSlider(min=0, max=10,value='10', description='# Trajectories:'),
Button(description='Populate')]),
HBox([Text(description='File:', placeholder='Enter Excel File (.xlsx)'),
Button(description='Load')]),
])
training_controller_box.children[0].children[1].on_click(self.populate_training_set)
training_controller_box.children[1].children[1].on_click(self.load_training_set)
training_set = VBox()
estimate_button = Button(description='Estimate Parameters', disabled=True)
estimate_button.on_click(self.prepare_genetic_algorithm)
optimal_parameters_box = VBox()
genetic_algorithm_controller = VBox()
self.box4.children = [individual, # 0
description_individual, # 1
self.individual_controllers_box, # 2
self.individual_metrix_box, # 3
ensemble, # 4
description, # 5
training_controller_box, # 6
training_set, # 7
genetic_algorithm_controller, # 8
estimate_button, # 9
optimal_parameters_box # 10
]
def populate_fifth_window(self):
# ####################### now let's construct fifth box
legend6 = HTML('Set parameters for data smoothing and event identification:')
legend7 = HTML('<style>div.a {line-height: normal;}</style>''<div class="a">'
'<br />Now, set thresholds to filter trajectories with anomalous behavior. '
' Use the displacement threshold to eliminate stuck cells exhibiting '
'high velocity. A threshold for the maximum number of change of directions (Max Chng Dir) '
'can be used to eliminate trajectories with excessive number of turns.<br /><br /></div>')
legend7_1 = HTML('<style>div.a {line-height: normal;}</style>''<div class="a">'
'<br />In order to calculate adaptation curves, set a value for time intervals in seconds '
'- T. int. (s) -. To calculate the adaptation time, set a threshold value for the frequency '
'of change of direction (Chg. Dir.)<br /><br /></div>')
lin_vel_threshold = widgets.BoundedIntText(value=4, min=0, max=100, step=1,
description='Velocity', disabled=False, continuous_update=True)
acc_threshold = widgets.BoundedIntText(value=10, min=0, max=1000, step=1,
description='Acceleration', disabled=False, continuous_update=False)
disp_threshold = IntSlider(value=10, description='Dsplcmt, %', min=0,
max=100, continuous_update=False, step=1)
turns_threshold = widgets.BoundedIntText(value=10, min=0, max=100, step=1, description='Max Chng Dir',
disabled=False,continuous_update=True)
frames_average = widgets.BoundedIntText(value=4, min=0, max=10, step=1,
description='# Frames', disabled=False, continuous_update=False)
smooth_cycles = widgets.BoundedIntText(value=3, min=0, max=10, step=1, description='# Smooth', disabled=False,
continuous_update=False)
time_interval = widgets.BoundedIntText(value=1, min=1, max=10, step=1,
description='T. Int. (s)', disabled=False, continuous_update=True)
change_dir_threshold = widgets.BoundedFloatText(value=0.45, min=0, max=2, step=0.05,
description='Chg. Dir. (1/s)', disabled=False,
continuous_update=True)
frame_ranges = widgets.IntRangeSlider(value=[0, 10], min=0, max=10.0, step=1,
description='Frame Rng:', disabled=False, continuous_update=False,
orientation='horizontal', readout=True)
b_calculate = Button(description='Calculate')
b_calculate.on_click(self.calculate_ensemble)
results = VBox()
results_string = HTML()
options_adaptation_curve = VBox()
data_adaptation_curve = VBox()
b_report = Button(description='Report', disabled=True)
b_report.on_click(self.generate_report)
self.box5.acceleration = acc_threshold
self.box5.lin_vel_threshold = lin_vel_threshold
self.box5.children = [legend6, # 0
frames_average, # 1
smooth_cycles, # 2
acc_threshold, # 3
legend7, # 4
lin_vel_threshold, # 5
HBox([disp_threshold, turns_threshold, frame_ranges]), # 6
legend7_1, # 7
time_interval, # 8
change_dir_threshold, # 9
b_calculate, # 10
results, # 11
results_string, # 12
options_adaptation_curve, # 13
data_adaptation_curve, # 14
b_report] # 15
self.box5.frame_ranges = frame_ranges
# ####################### now let's construct sixth box
legend8 = HTML('Adaptation times can be calculated in this window. Required parameters are the same as '
'for the Ensemble Analysis window. Note that in order for a trajectory to be considered, '
'it must be on focus for a certain number of frames. This parameter is defined in the window'
' <b>Trajectories<b> by the value of # Frames. The same is true for the parameter Max. Disp.'
'and all parameters from the window <b>Feature Identification<b>')
legend9 = HTML('First set parameters for data smoothing:')
legend10 = HTML('Now, set parameters for event identification. Then click <b>Calculate</b>')
b_calculate2 = Button(description='Calculate')
b_calculate2.on_click(self.calculate_adaptation_time)
results2 = VBox()
results_string2 = HTML()
time_interval = widgets.BoundedFloatText(value=5, min=0, max=500, step=1,description='T. Int. (s)',
disabled=False, continuous_update=False)
lin_vel_threshold2 = widgets.BoundedIntText(value=12, min=0, max=100, step=1,
description='Velocity', disabled=False, continuous_update=True)
self.box6.children = [legend8, # 0
legend9, # 1
frames_average, # 2
smooth_cycles, # 3
legend10, # 4
lin_vel_threshold2, # 5
acc_threshold, # 6
time_interval, # 7
b_calculate2, # 8
results2, # 9
results_string2, # 10
]
def load_data_function(self, b):
self.box3.controller_time_frame.observe(handler=self.filter_initial_trajectories, type='change', names='value')
# update max value of time interval for adaptation curve calculation
self.box5.children[8].max = len(self.frames)/self.frames_second.value # updated. it was [7]
# get number of frames and micron/pixel
self.pixels_micron = b.pixels_micron
self.frames_second = b.frames_second
# this function needs to do following things:
# load frames
if len(self.frames) == 0:
self.frames = pims.ImageSequence(b.path.value+'/*.jpg', as_grey=True)
# call function that plots three frames
self.populate_frames()
# generate histogram of mass distribution and place it in self.mass_histogram_box
self.refresh_histogram()
# open next window
self.interface.selected_index = 1
# Generate image for frame 0
y = mpl.pyplot
a = y.imshow(self.frames[0])
y.close()
buf = BytesIO()
canvas = FigureCanvasAgg(a.figure)
canvas.print_png(buf)
data = buf.getvalue()
self.video_wid.value = data
def refresh_histogram(self):
# identify frames
frames = [0, round(len(self.frames)/2), len(self.frames)-1]
children = [Image(value=self.get_hist_data(self.frames[element])) for element in frames]
self.mass_histogram_box.children = children
# new mass value is b['new']
# create histogram and place in box self.mass_histogram_box
def refresh_trajectories_ensemble(self, b):
# observe controller
# Generate trajectories plot and set as children of self.box_trajectories_ensemble
self.f = tp.batch(self.frames[:],
self.diameter.value,
minmass=self.min_mass.value,
invert=self.invert.value,
engine='numba',
processes='auto')
self.generate_link(self.f)
display(self.interface)
self.number_frames.max = len(self.frames)-1
self.interface.selected_index = 2
# Modify widget 'Charactrerization'
self.update_characterization_widget()
def recalculate_link(self,b):
self.generate_link(self.f)
display(self.interface)
def generate_link(self, f):
self.t = tp.link_df(f, self.max_displacement.value, memory=self.memory.value) # maximum displacement in pixels.
self.t1 = tp.filter_stubs(self.t, self.number_frames.value)
self.legend6.value = '<style>div.a {line-height: normal;}</style>''<div class="a"> Showing ' + \
str(self.t1['particle'].nunique()) + ' trajectories out of ' + \
str(self.t['particle'].nunique()) + ' total trajectories.' + ' </div>'
fig_size = [7, 7]
plt.figure(figsize=fig_size)
ax = plt.gca()
yfig = tp.plot_traj(self.t1, ax=ax)
buf = BytesIO()
canvas = FigureCanvasAgg(yfig.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
plt.close(ax.figure)
self.box_trajectories_ensemble1.children = [Image(value=data_fig)]
plt.figure(figsize=fig_size)
ax = plt.gca()
yfig = tp.plot_traj(self.t1, ax=ax)
# generate a new data frame containing X positions for each particle
x = self.t1.set_index(['frame', 'particle'])['x'].unstack()
y = self.t1.set_index(['frame', 'particle'])['y'].unstack()
id_particles = x.columns.values
self.trajectories_menu.options = id_particles
self.current_ids = id_particles
self.trajectories_menu.value = id_particles[-1]
#update .options trait of dropdown Trajectory # of the individual trajectories in characterization widget
self.update_characterization_widget()
counter = 0
for particle in id_particles:
if counter < 200:
#get x and y position
x_text = x[np.isfinite(x[particle])][particle].iloc[0]
y_text = y[np.isfinite(y[particle])][particle].iloc[0]
#plot ID
plt.text(x_text, y_text, str(particle), fontsize=10)
counter += 1
else:
break
buf = BytesIO()
canvas = FigureCanvasAgg(yfig.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
plt.close(ax.figure)
self.box_trajectories_ensemble2.children = [Image(value=data_fig)]
def populate_frames(self):
# identify frames
frames = [0, round(len(self.frames)/2), len(self.frames)-1]
children = [Image(value=self.get_fig_data(self.frames[element])) for element in frames]
self.frames_container.children = children
def get_fig_data(self, data):
# this scripts generate figure from frame data and return string that can be printed using the Figure widget.
# use preset parameters to circle cells.
f = tp.locate(data, self.diameter.value, minmass=self.min_mass.value, invert=self.invert.value) # frame number, diameter of particle
plt.figure(figsize=[5, 4])
ax = plt.gca()
ax.set(xlabel='y, [px]', ylabel='x, [px] ')
y = tp.annotate(f, data, invert=self.invert.value, color='blue', ax=ax) # modify the function 'annotate so that I dont get output.'
buf = BytesIO()
canvas = FigureCanvasAgg(y.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
plt.close(ax.figure)
return data_fig
def get_hist_data(self, data):
plt.figure(figsize=[5, 4])
ax = plt.gca()
f = tp.locate(data, self.diameter.value, minmass=self.min_mass.value, invert=self.invert.value) # frame number, size of particle
ax.hist(f['mass'], bins=20)
# Optionally, label the axes.
ax.set(xlabel='mass', ylabel='count')
buf = BytesIO()
canvas = FigureCanvasAgg(ax.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
plt.close(ax.figure)
return data_fig
def update_hist_frames(self, b):
self.refresh_histogram()
self.populate_frames()
def update_video_parameters(self, b):
self.slider = None
self.play = None
# this function gets called when a certain particle is selected. i.e, when the drop-down menu
# self.trajectories_menu changes its trait value.
# Generate matrix specific for one particle
self.t_i = self.t1[self.t1['particle'] == b['new']]
# update self.video_wid.value with the first image.
if len(self.t_i) != 0:
first_frame = self.t_i['frame'].iloc[0]
plt.figure(figsize=[6, 6])
ax = plt.gca()
ax.set(xlabel='x, [px]', ylabel='y, [px]')
y = tp.annotate(self.t_i[self.t_i['frame'] == first_frame], self.frames[first_frame],
color='blue', invert=False, ax=ax);
buf = BytesIO()
canvas = FigureCanvasAgg(y.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
plt.close(ax.figure)
self.video_wid.value = data_fig
#update values of self.play & self.slider.
self.play = widgets.Play(
value=0,
min=0,
max=len(self.t_i['frame']),
step=1,
description="Press play",
disabled=False)
self.slider = widgets.IntSlider(continuous_update=True,
value=0, min=0, max=len(self.t_i['frame']),
description='Frame #')
widgets.jslink((self.play, 'value'), (self.slider, 'value'))
self.slider.observe(handler=self.update_video, type='change', names='value')
self.trajectories_menu.observe(handler=self.update_video_parameters, type='change', names='value')
single_trajectory = self.get_single_trajectory(self.t_i)
self.box3_1.children = [self.legend3_1, widgets.HBox([self.trajectories_menu, self.play, self.slider]),
HBox([Box([self.video_wid]), Box([single_trajectory])])]
def update_video(self, b):
counter = b['new'] # contains iloc of self.t_i
if counter < len(self.t_i):
frame_id = self.t_i['frame'].iloc[counter]
plt.figure(figsize=[6, 6])
ax = plt.gca()
ax.set(xlabel='x, [px]', ylabel='y, [px]')
y = tp.annotate(self.t_i[self.t_i['frame'] == frame_id], self.frames[frame_id],
color='blue', invert=False, ax=ax);
plt.text(100, 100, str(round(frame_id/self.frames_second.value, 3)) + ' s', color='white')
buf = BytesIO()
canvas = FigureCanvasAgg(y.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
plt.close(ax.figure)
self.video_wid.value = data_fig
def update_characterization_widget(self):
# current ids are in self.current_ids
# update ensemble Box. Target box: self.ensemble_controllers_box.children
# update individual box. Target: self.individual_controllers_box.children
self.trajectories_id = Dropdown(description='Trajectory #', options=self.current_ids)
self.trajectories_id.observe(handler=self.update_frame_range, type='change', names='value')
self.trajectories_id.value = self.current_ids[-1]
self.box4.children[6].children[0].children[0].max = len(self.current_ids)
def update_frame_range(self, b):
# b['new'] contains the ID of the particle.
t_i = self.t1[self.t1['particle'] == b['new']]
min_value = t_i['frame'].iloc[0]/self.frames_second.value if t_i['frame'].iloc[0]/self.frames_second.value != 0 \
else 1/self.frames_second.value
max_value = t_i['frame'].iloc[-1]/self.frames_second.value
frame_range = FloatRange(value=[min_value, max_value],
min=min_value,
max=max_value,
step=1/self.frames_second.value,
description='Time Range',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f')
threshold_mean = widgets.BoundedIntText(
value=self.optimal_parameter_set[0],
min=0,
max=len(self.frames),
step=1,
description='# Frames',
disabled=False,
continuous_update=True)
smooth_cycles = widgets.BoundedIntText(
value=self.optimal_parameter_set[1],
min=0,
max=10,
step=1,
description='# Smooth',
disabled=False,
continuous_update=True)
acceleration_threshold = widgets.BoundedIntText(
value=self.optimal_parameter_set[2],
min=0,
max=1000,
step=1,
description='Acc. Thrhld',
disabled=False,
continuous_update=False)
self.individual_controllers_box.children = [self.trajectories_id,
frame_range,
threshold_mean,
smooth_cycles,
acceleration_threshold]
frame_range.observe(handler=self.print_individual_characterization, type='change', names='value')
if self.interface.selected_index == 4:
particle = b['new'] # original [b['new']]
self.vel, self.acc_vel = self.calculate_vel(self.t1, particle)
# smooth 0 times.
smooth = int(self.optimal_parameter_set[1])
n_frames = int(self.optimal_parameter_set[0])
if smooth != 0 and n_frames != 0:
self.av_vel = self.calculate_average_vel(self.vel, threshold=n_frames)
for x in range(0, smooth - 1):
self.av_vel = self.calculate_average_vel(self.av_vel, threshold=n_frames)
else:
self.av_vel = self.vel
# calculate acceleration from smoothed data.
self.av_acc_vel = self.calculate_av_acc(self.av_vel)
c = {'new': [min_value, max_value]}
self.get_peaks(abs(self.av_acc_vel))
self.print_individual_characterization(c)
threshold_mean.time_range = c
threshold_mean.particle = particle
smooth_cycles.time_range = c
smooth_cycles.particle = particle
acceleration_threshold.time_range = c
acceleration_threshold.particle = particle
threshold_mean.observe(handler=self.update_average_vel, type='change', names='value')
smooth_cycles.observe(handler=self.update_average_vel, type='change', names='value')
acceleration_threshold.observe(handler=self.update_average_vel, type='change', names='value')
def update_average_vel(self, b):
particle = b['owner'].particle
self.vel, self.acc_vel = self.calculate_vel(self.t1, particle)
if b['owner'].description == '# Frames':
threshold = b['new']
smooth_freq = self.individual_controllers_box.children[3].value
mph = self.individual_controllers_box.children[4].value
if b['owner'].description == '# Smooth':
smooth_freq = b['new']
threshold = self.individual_controllers_box.children[2].value
mph = self.individual_controllers_box.children[4].value
if b['owner'].description == 'Acc. Thrhld':
smooth_freq = self.individual_controllers_box.children[3].value
threshold = self.individual_controllers_box.children[2].value
mph = b['new']
if smooth_freq != 0 and threshold != 0:
self.av_vel = self.calculate_average_vel(self.vel, threshold=threshold)
for x in range(0, smooth_freq - 1):
self.av_vel = self.calculate_average_vel(self.av_vel, threshold=threshold)
else:
self.av_vel = self.vel
self.av_acc_vel = self.calculate_av_acc(self.av_vel) # or calculate_av_acc
c = b['owner'].time_range
self.get_peaks(abs(self.av_acc_vel), mph=mph)
self.print_individual_characterization(c)
def print_individual_characterization(self, b):
# target: self.individual_metrix_box
# actual trajectory is contained in self.trajectories_id.value
# x = self.t1.set_index(['frame', 'particle'])['x'].unstack()
# y = self.t1.set_index(['frame', 'particle'])['y'].unstack()
# vel, angular = self.calculate_vel(x, y)
time_frame = b['new']
min_val = time_frame[0] if time_frame[1] != 0 else 1/self.frames_second.value
max_val = time_frame[1]
particle = self.trajectories_id.value
self.fig_individual, ((self.ax1_ind, self.ax2_ind),
(self.ax3_ind, self.ax4_ind)) = plt.subplots(2, 2,
figsize=[15, 10],
sharex='all')
# instantaneous velocities
ax = self.ax1_ind
ax.plot(self.vel.set_index([self.vel.index.values/self.frames_second.value]).loc[min_val:max_val], color='blue');
ax.set_ylabel('Linear Velocity, micron/s', color='blue')
ax.tick_params('y', colors='blue')
ax.grid(color='grey', linestyle='--', linewidth=0.5)
# instantaneous accelerations
ax3 = self.ax3_ind
ax3.plot(self.acc_vel.set_index([self.acc_vel.index.values/self.frames_second.value]).loc[min_val:max_val],
color='grey');
ax3.set_ylabel('Acceleration, micron/s/s', color='grey')
ax3.tick_params('y', colors='grey')
ax3.grid(color='grey', linestyle='--', linewidth=0.5)
# Average Velocities
ax5 = self.ax2_ind
ax5.plot(self.av_vel.set_index([self.av_vel.index.values/self.frames_second.value]).loc[min_val:max_val], color='blue');
ax5.tick_params('y', colors='blue')
ax6 = ax5.twinx() # instantiate a second axes that shares the same x-axis
ax6.plot(abs(self.av_acc_vel.set_index([self.av_acc_vel.index.values / self.frames_second.value]).loc[min_val:max_val]),
color='grey', alpha=0.5);
ax6.set_ylabel('Absolute Acceleration, micron/s/s', color='grey')
ax6.tick_params('y', colors='grey')
ax6.grid(color='grey', linewidth=0.5)
t = self.peaks_table.set_index(self.peaks_table.index.values / self.frames_second.value).loc[min_val:max_val]
ax6.scatter(t.index.values, t.values,marker='*', s=300, alpha=0.5, c='grey')
ax6.grid(color='grey', linestyle='--', linewidth=0.5)
try:
val = self.individual_controllers_box.children[4].value
ax6.plot([min_val, max_val], [val, val], linewidth=2, color='black')
except:
ax6.plot([min_val, max_val], [self.peak_height, self.peak_height], linewidth=2, color='black')
# Average Accelerations
ax7 = self.ax4_ind
ax7.plot(self.av_acc_vel.set_index([self.av_acc_vel.index.values/self.frames_second.value]).loc[min_val:max_val],
color='grey');
ax7.plot([min_val + 1/self.frames_second.value, max_val], [0, 0], color='black', linestyle='--', alpha=0.5)
ax7.tick_params('y', colors='grey')
ax7.grid(color='grey', linestyle='--', linewidth=0.5)
# set title of all four plots
self.ax1_ind.set_title('Instantaneous Velocities')
self.ax2_ind.set_title('Average Velocities')
self.ax3_ind.set_title('Acceleration (Inst. Vel)')
self.ax3_ind.set_xlabel('Time, s')
self.ax4_ind.set_title('Acceleration (Avg. Vel)')
self.ax4_ind.set_xlabel('Time, s')
data_fig = self.easy_print(ax)
plt.close(ax.figure)
self.individual_metrix_box.children = [Image(value=data_fig)]
def calculate_vel(self, data, particleID):
# this function gets a data frame containing information of all particles,
# a desired particle and return velocity and accelereration data frames
particleID = particleID if (isinstance(particleID, int) or isinstance(particleID, list)) else int(particleID)
# get t_i for the desired particle
t_i = data[data['particle'] == particleID]
# get x and y vectors for the desired particle
x = t_i['x']
y = t_i['y']
vel = pd.DataFrame(np.nan, index=t_i.index.values[1:], columns=[particleID])
acc_vel = pd.DataFrame(np.nan, index=t_i.index.values[2:], columns=[particleID])
for frame in x.index.values[1:]:
d = ((x.loc[frame] - x.loc[frame - 1]) ** 2 + (y.loc[frame] - y.loc[frame - 1]) ** 2) ** 0.5
vel.loc[frame] = d * self.frames_second.value / self.pixels_micron.value
if frame > x.index.values[1]:
acc_vel.loc[frame] = (vel.loc[frame] - vel.loc[frame-1]) * self.frames_second.value
return vel, acc_vel
def calculate_average_vel(self, vel, threshold=4):
average_vel = | pd.DataFrame(np.nan, index=vel.index, columns=vel.columns) | pandas.DataFrame |
"""
废弃
新浪网设置了访问频次限制。
新浪有许多以列表形式提供的汇总列,每天访问也仅仅一次。
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from datetime import date
from urllib.error import HTTPError
import pandas as pd
import requests
from bs4 import BeautifulSoup
import logbook
from toolz.itertoolz import partition_all
from ..setting.constants import QUOTE_COLS
from cnswd.utils import ensure_list
# from cnswd.data_proxy import DataProxy
from cnswd.websource.base import friendly_download, get_page_response
from .._exceptions import NoWebData, FrequentAccess
QUOTE_PATTERN = re.compile('"(.*)"')
NEWS_PATTERN = re.compile(r'\W+')
STOCK_CODE_PATTERN = re.compile(r'\d{6}')
SORT_PAT = re.compile(r'↑|↓')
DATA_BASE_URL = 'http://stock.finance.sina.com.cn/stock/go.php/'
MARGIN_COL_NAMES = [
'股票代码', '股票简称',
'融资余额', '融资买入额', '融资偿还额',
'融券余量金额', '融券余量', '融券卖出量', '融券偿还量', '融券余额'
]
INDEX_QUOTE_COLS = [
'指数简称', '最新价', '涨跌', '涨跌幅%', '成交量(万手)', '成交额(万元)'
]
logger = logbook.Logger('新浪网')
@friendly_download(10, 10, 10)
def fetch_company_info(stock_code):
"""获取公司基础信息"""
url_fmt = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/{}.phtml'
url = url_fmt.format(stock_code)
df = | pd.read_html(url, attrs={'id': 'comInfo1'}) | pandas.read_html |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from numpy.random import uniform, seed
# from scipy.interpolate import griddata
tf.random.set_seed(123)
data_path = "../../../data"
train_file_path = "%s/titanic/train.csv" % data_path
test_file_path = "%s/titanic/eval.csv" % data_path
# Load dataset.
dftrain = pd.read_csv(train_file_path)
dfeval = | pd.read_csv(test_file_path) | pandas.read_csv |
import re
import json
import datetime
from datetime import datetime
from datetime import timedelta
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import argparse
import os
import csv
class ProcessTweets(object):
def __init__(self, filename, outname):
self.filename = filename
self.outname = outname
json_file = open(filename)
json_str = json_file.read()
self.json = json.loads(json_str)
self.sid = SentimentIntensityAnalyzer()
def clean_tweet(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def get_sentiment(self, tweet):
polarity_scores = self.sid.polarity_scores(tweet)
return polarity_scores['neg'], polarity_scores['pos'], polarity_scores['neu']
def get_tweets(self):
df = pd.DataFrame.from_dict(self.json)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df.sort_values(by=['timestamp'], inplace=True, ascending=True)
df.reset_index(inplace=True)
self.json = df.to_dict()
timestamps = self.json['timestamp']
start_date = pd.to_datetime(timestamps[0])
end_date = start_date + timedelta(hours=1)
sentiments = dict()
temp = []
tweets = self.json['text']
for count, tweet in enumerate(tweets, start=0):
tweet = tweets[tweet]
curr_time = timestamps[count]
if isinstance(tweet, int):
print(tweet)
if curr_time >= start_date and curr_time < end_date:
neg, pos, neu = self.get_sentiment(self.clean_tweet(tweet))
temp.append([neg, pos, neu])
else:
means = np.mean(np.asarray(temp), axis=0)
obj = {'neg': means[0], 'pos': means[1], 'neu': means[2]}
sentiments[start_date.strftime("%Y-%m-%d %H:%M:%S")] = obj
temp = []
start_date = end_date
end_date = start_date + timedelta(hours=1)
neg, pos, neu = self.get_sentiment(self.clean_tweet(tweet))
temp.append([neg, pos, neu])
tmp_df = | pd.DataFrame.from_dict(sentiments) | pandas.DataFrame.from_dict |
import pandas as pd
import logging
_log = logging.getLogger(__name__)
COUNTRIES = [
'australia',
'brazil',
'canada',
'china',
'denmark',
'finland',
'france',
'germany',
'hong kong',
'india',
'indonesia',
'italy',
'japan',
'malaysia',
'mexico',
'netherlands',
'norway',
'philippines',
'saudi arabia',
'singapore',
'south-korea',
'spain',
'sweden',
'taiwan',
'thailand',
'united arab emirates',
'united-kingdom',
'united-states',
'vietnam',
]
COUNTRY_PATH_FORMAT = 'https://raw.githubusercontent.com/YouGov-Data/covid-19-tracker/master/data/{}.csv'
def _load_dataset():
_log.info("Loading dataset")
all_data = []
for country in COUNTRIES:
country_name = country.replace(' ', '-')
path = COUNTRY_PATH_FORMAT.format(country_name)
try:
country_df = pd.read_csv(path)
except:
try:
country_df = pd.read_csv(path, encoding='cp1252')
except:
_log.error(f'ERROR WITH {country}')
country_df['country'] = country
all_data.append(country_df)
_log.info("Loaded")
return | pd.concat(all_data, axis=0) | pandas.concat |
# ------------------------------------------------------------------------------
# Copyright IBM Corp. 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
# flake8: noqa E203
import getpass
import time
import xml.etree.ElementTree as ET
from collections import namedtuple
from datetime import datetime
from pprint import pformat
import ibm_boto3
import pandas as pd
import requests
from ibm_botocore.client import Config
try:
from utilities import IBMCloudAccess, confirm_action
except Exception:
from .utilities import IBMCloudAccess, confirm_action
from requests.exceptions import HTTPError
import json
import logging
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Helper class to interact with IBM Watson Studio projects
# ------------------------------------------------------------------------------
class ProjectLib:
"""
This is used by SQLClient/COSClient via :py:meth:`read` and :py:meth:`write` methods
Parameters
----------
project: project_lib.Project
The object
`from project_lib import Project`
file_name: str
The file_name where the data about SQL queries' jobs should be read/stored
The content of this file is used to track progress
file_type: str, optional
The file format of `file_name`
.. todo::
NOTE: Currently support only one file
To support many files, we can switch to using dict such as self._data_out[file_name]
"""
def __init__(self, project, file_name, file_type="json"):
self._project = project
self._target_filename = None
self._file_type = None
self._track_file(file_name, file_type)
self._data_out = None
def _track_file(self, filename, file_type):
""" map to real file name """
assert file_type in ["json", "csv"]
self._file_type = file_type
self._target_filename = filename
if not filename.endswith((".json", ".csv")):
self._target_filename = self._target_filename + "." + file_type
return self._target_filename
@property
def data(self):
""" file-like object: storing the file-content """
return self._data_out
@property
def project(self):
""" Project: the project-lib object"""
return self._project
def read(self, file_name=None, file_type="json"):
"""
Read from project-lib's file into file-like object
Parameters
----------
file_name: str, optional
File name in the Watson Studio's project assets. If the file is not provided, then it reads the one passed into the object's constructor.
file_type: str, optional
The type of file, "json" or "csv"
Returns
-------
file-like object:
The content of the data, in dict (json) or pd.DataFrame (csv)
"""
if file_name is None:
file_name = self._target_filename
# Fetch the file
file_is_found = False
if self._data_out is None:
filename_list = self._project.get_files()
for x in filename_list:
if x["name"] == file_name:
file_is_found = True
file_content = self._project.get_file(file_name)
if file_type == "json":
# Read the CSV data file from the object storage into a pandas DataFrame
file_content.seek(0)
# import pandas as pd
# pd.read_json(my_file, nrows=10)
import json
self._data_out = json.load(file_content)
elif file_type == "csv":
# Read the CSV data file from the object storage into a pandas DataFrame
file_content.seek(0)
import pandas as pd
self._data_out = | pd.read_csv(file_content, nrows=10) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
=================================
myInvestor-toolkit startup script
=================================
"""
import datetime as dt
import os
import pandas as pd
from fundamental import DividendYield
from source import YahooFinanceSource
class StockAnalysis:
"""
Stock analysis.
"""
_TICKER_FILE = 'dataset/ticker.csv'
_CURRENT_PRICE_FILE = 'dataset/current_price.csv'
def fund_update_dividend_yields_for_exchange(self, exchange):
"""
Update existing dividend yield file.
:param exchange: Exchange symbol.
:return: None
"""
print('Updating dividend yields for {}'.format(exchange))
df_dividend_data = pd.read_csv('dataset/{}_dividend_yields.csv'.format(exchange), dtype=str)
# Check if dividend file exists
def fund_get_dividend_yields_for_exchange(self, exchange, skip_if_exist=True):
"""
Get dividends yields for the exchange.
:param exchange: Exchange symbol.
:param skip_if_exist: Skip if the dividend yields already in the file.
:return: True on success, otherwise return False.
"""
df_stocks = pd.read_csv(self._TICKER_FILE, dtype=str)
df_stocks = df_stocks.loc[df_stocks['Exchange'] == exchange]
df_stocks = df_stocks.set_index(['Ticker'])
count = len(df_stocks)
index = 1
if (count == 0): return
dividend_file = 'dataset/{}_dividend_yields.csv'.format(exchange)
if (os.path.exists(dividend_file)):
df_dividend_data = pd.read_csv(dividend_file)
df_dividend_data = df_dividend_data.set_index(['symbol', 'date'])
else:
df_dividend_data = pd.DataFrame()
for ticker, row in df_stocks.iterrows():
print('{} / {} - Getting dividend yields for {}'.format(index, count, ticker))
index = index + 1
# Skip if already exist
if (skip_if_exist):
if not df_dividend_data.empty and ticker in df_dividend_data.index:
print('Skipping {}'.format(ticker))
continue
try:
dividend_yield = DividendYield(ticker)
stock_dividends = dividend_yield.get_history()
for symbol, values in stock_dividends.items():
prices = values['prices']
dividend_list = []
for dividend in prices:
dividend_list.append(pd.Series(dividend))
if len(dividend_list) > 0:
df_dividend = pd.DataFrame(dividend_list)
df_dividend['symbol'] = symbol
df_dividend = df_dividend.set_index(['symbol', 'date'])
if (df_dividend_data.empty):
df_dividend_data = df_dividend
else:
df_dividend_data = df_dividend_data.combine_first(df_dividend)
df_dividend_data.to_csv(dividend_file, encoding='utf-8')
except Exception as e:
print('Ooops...error with {} - {}'.format(ticker, str(e)))
continue
return True
return False
def fund_get_stock_financials(self, ticker_file, price_file_name=_CURRENT_PRICE_FILE):
"""
Getting current prices into a file.
:param ticker_file: Ticket file.
:param price_file_name: Output price file name.
:return: True on success, otherwise return False.
"""
df_stocks = pd.read_csv(ticker_file, dtype=str)
tickers = df_stocks.symbol.unique()
current = 1
df_all_stocks_summaries = pd.DataFrame()
for ticker in tickers:
print('{} - Getting current info for {}.'.format(current, ticker))
current = current + 1
yahoo_finance_source = YahooFinanceSource(ticker)
stock_summary_data = yahoo_finance_source.get_stock_summary_data()
stock_summary_data[ticker]['symbol'] = ticker
df_stock_summary = pd.DataFrame([ | pd.Series(stock_summary_data[ticker]) | pandas.Series |
import datetime as dt
import pandas as pd
import pytest
from intake_google_analytics.utils import as_day, is_dt
def test_is_dt():
assert is_dt(dt.date(2020, 3, 19))
assert is_dt(dt.datetime(2020, 3, 19, 16, 20, 0))
assert is_dt(pd.to_datetime('2020-03-19'))
assert is_dt(pd.Timestamp(2020, 3, 19))
assert not is_dt('2020-03-19')
assert not is_dt(dt.timedelta(days=1))
assert not is_dt(pd.DateOffset(months=2))
def test_as_day():
assert as_day(dt.date(2020, 3, 19)) == '2020-03-19'
assert as_day(dt.datetime(2020, 3, 19, 16, 20, 0)) == '2020-03-19'
assert as_day(pd.to_datetime('2020-03-19')) == '2020-03-19'
assert as_day(pd.to_datetime('2020-03-19 16:20:00')) == '2020-03-19'
assert as_day(pd.Timestamp(2020, 3, 19)) == '2020-03-19'
with pytest.raises(AttributeError):
as_day(dt.timedelta(days=1))
as_day( | pd.DateOffset(days=1) | pandas.DateOffset |
import multiprocessing, logging
import pandas as pd
from os import listdir
from os.path import isfile, join
from pandas import DataFrame
from . import load_pointer
from ..savers import save_pointer
from .. import s3_utils, multiprocessing_utils
from .load_s3 import list_bucket_prefix_suffix_s3
logger = logging.getLogger(__name__)
def load(path, delimiter=None, encoding='utf-8', columns_to_keep=None, dtype=None, error_bad_lines=True, header=0,
names=None, format=None, nrows=None, skiprows=None, usecols=None, low_memory=False, converters=None,
filters=None, sample_count=None, worker_count=None, multiprocessing_method='forkserver') -> DataFrame:
if isinstance(path, list):
return load_multipart(
paths=path, delimiter=delimiter, encoding=encoding, columns_to_keep=columns_to_keep,
dtype=dtype, error_bad_lines=error_bad_lines, header=header, names=names, format=format,
nrows=nrows, skiprows=skiprows, usecols=usecols, low_memory=low_memory, converters=converters,
filters=filters,
worker_count=worker_count,
multiprocessing_method=multiprocessing_method
)
if format is not None:
pass
elif path.endswith(save_pointer.POINTER_SUFFIX):
format = 'pointer'
elif path[-1] == '/' and s3_utils.is_s3_url(path): # and path[:2] == 's3'
format = 'multipart_s3'
elif path[-1] == '/' and not s3_utils.is_s3_url(path): # and path[:2] != 's3'
format = 'multipart_local'
elif '.parquet' in path or path[-1] == '/':
format = 'parquet'
else:
format = 'csv'
if delimiter is None:
if path.endswith('.tsv'):
delimiter = '\t'
logger.debug(f'File delimiter for {path} inferred as \'\\t\' (tab). If this is incorrect, please manually load the data as a pandas DataFrame.')
else:
delimiter = ','
logger.debug(f'File delimiter for {path} inferred as \',\' (comma). If this is incorrect, please manually load the data as a pandas DataFrame.')
if format == 'pointer':
content_path = load_pointer.get_pointer_content(path)
return load(path=content_path, delimiter=delimiter, encoding=encoding, columns_to_keep=columns_to_keep, dtype=dtype,
error_bad_lines=error_bad_lines, header=header, names=names, format=None, nrows=nrows, skiprows=skiprows,
usecols=usecols, low_memory=low_memory, converters=converters, filters=filters, sample_count=sample_count,
worker_count=worker_count, multiprocessing_method=multiprocessing_method)
elif format == 'multipart_s3':
bucket, prefix = s3_utils.s3_path_to_bucket_prefix(path)
return load_multipart_s3(bucket=bucket, prefix=prefix, columns_to_keep=columns_to_keep, dtype=dtype, filters=filters,
sample_count=sample_count, worker_count=worker_count, multiprocessing_method=multiprocessing_method) # TODO: Add arguments!
elif format == 'multipart_local':
paths = [join(path, f) for f in listdir(path) if (isfile(join(path, f))) & (f.startswith('part-'))]
return load_multipart(
paths=paths, delimiter=delimiter, encoding=encoding, columns_to_keep=columns_to_keep,
dtype=dtype, error_bad_lines=error_bad_lines, header=header, names=names, format=None,
nrows=nrows, skiprows=skiprows, usecols=usecols, low_memory=low_memory, converters=converters,
filters=filters,
worker_count=worker_count,
multiprocessing_method=multiprocessing_method,
)
elif format == 'parquet':
try:
df = pd.read_parquet(path, columns=columns_to_keep, engine='fastparquet') # TODO: Deal with extremely strange issue resulting from torch being present in package, will cause read_parquet to either freeze or Segmentation Fault when performing multiprocessing
except:
df = | pd.read_parquet(path, columns=columns_to_keep, engine='pyarrow') | pandas.read_parquet |
"""
Pre-trained model obtained from:
https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki.ru.zip
https://gist.github.com/brandonrobertz/49424db4164edb0d8ab34f16a3b742d5
"""
import pandas as pd
import numpy as np
import text
import super_pool
from tqdm import tqdm
cleanup = text.SimpleCleanup()
pool = super_pool.SuperPool()
def load_pretrain():
# 1888424 300
with open("../data/wiki.ru.vec", "r") as f:
data = f.readlines()
samples, dim = data[0].split()
E = np.zeros(shape=(int(samples), int(dim)), dtype="float32")
word_index = {}
idx = 0
for line in tqdm(data[1:], total=E.shape[0]):
word, vec = line.split(" ", 1)
word_index[word] = idx
E[idx, :] = [float(i) for i in vec.split()]
idx += 1
return word_index, E
def prepare():
""" Cleanup and save one text per line to feed fasttext.
"""
df = pd.read_csv("../input/train.csv", usecols=["description", "title"])
df_test = pd.read_csv("../input/test.csv", usecols=["description", "title"])
df = | pd.concat([df, df_test], axis=0) | pandas.concat |
#%%
import os
import sys
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
import torch
from utils import *
HOME = os.path.dirname(os.path.abspath(__file__))
# DATA_DIR = '/home/scao/Documents/kaggle-riiid-test/data/'
# MODEL_DIR = f'/home/scao/Documents/kaggle-riiid-test/model/'
MODEL_DIR = HOME+'/model/'
DATA_DIR = HOME+'/data/'
PRIVATE = False
DEBUG = False
MAX_SEQ = 150
VAL_BATCH_SIZE = 4096
TEST_BATCH_SIZE = 4096
SIMU_PUB_SIZE = 25_000
SIMU_PRI_SIZE = 250_000
#%%
class Iter_Valid(object):
def __init__(self, df, max_user=1000):
df = df.reset_index(drop=True)
self.df = df
self.user_answer = df['user_answer'].astype(str).values
self.answered_correctly = df['answered_correctly'].astype(str).values
df['prior_group_responses'] = "[]"
df['prior_group_answers_correct'] = "[]"
self.sample_df = df[df['content_type_id'] == 0][['row_id']]
self.sample_df['answered_correctly'] = 0
self.len = len(df)
self.user_id = df.user_id.values
self.task_container_id = df.task_container_id.values
self.content_type_id = df.content_type_id.values
self.max_user = max_user
self.current = 0
self.pre_user_answer_list = []
self.pre_answered_correctly_list = []
def __iter__(self):
return self
def fix_df(self, user_answer_list, answered_correctly_list, pre_start):
df= self.df[pre_start:self.current].copy()
sample_df = self.sample_df[pre_start:self.current].copy()
df.loc[pre_start,'prior_group_responses'] = '[' + ",".join(self.pre_user_answer_list) + ']'
df.loc[pre_start,'prior_group_answers_correct'] = '[' + ",".join(self.pre_answered_correctly_list) + ']'
self.pre_user_answer_list = user_answer_list
self.pre_answered_correctly_list = answered_correctly_list
return df, sample_df
def __next__(self):
added_user = set()
pre_start = self.current
pre_added_user = -1
pre_task_container_id = -1
user_answer_list = []
answered_correctly_list = []
while self.current < self.len:
crr_user_id = self.user_id[self.current]
crr_task_container_id = self.task_container_id[self.current]
crr_content_type_id = self.content_type_id[self.current]
if crr_content_type_id == 1:
# no more than one task_container_id of "questions" from any single user
# so we only care for content_type_id == 0 to break loop
user_answer_list.append(self.user_answer[self.current])
answered_correctly_list.append(self.answered_correctly[self.current])
self.current += 1
continue
if crr_user_id in added_user and ((crr_user_id != pre_added_user) or (crr_task_container_id != pre_task_container_id)):
# known user(not prev user or differnt task container)
return self.fix_df(user_answer_list, answered_correctly_list, pre_start)
if len(added_user) == self.max_user:
if crr_user_id == pre_added_user and crr_task_container_id == pre_task_container_id:
user_answer_list.append(self.user_answer[self.current])
answered_correctly_list.append(self.answered_correctly[self.current])
self.current += 1
continue
else:
return self.fix_df(user_answer_list, answered_correctly_list, pre_start)
added_user.add(crr_user_id)
pre_added_user = crr_user_id
pre_task_container_id = crr_task_container_id
user_answer_list.append(self.user_answer[self.current])
answered_correctly_list.append(self.answered_correctly[self.current])
self.current += 1
if pre_start < self.current:
return self.fix_df(user_answer_list, answered_correctly_list, pre_start)
else:
raise StopIteration()
if DEBUG:
test_df = | pd.read_pickle(DATA_DIR+'cv2_valid.pickle') | pandas.read_pickle |
import warnings
warnings.simplefilter(action = 'ignore', category = UserWarning)
# Front matter
import os
import glob
import re
import pandas as pd
import numpy as np
import scipy.constants as constants
import sympy as sp
from sympy import Matrix, Symbol
from sympy.utilities.lambdify import lambdify
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib import gridspec
# Seaborn, useful for graphics
import seaborn as sns
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
rc = {'lines.linewidth': 1,
'axes.labelsize': 20,
'axes.titlesize': 20,
'legend.fontsize': 26,
'xtick.direction': u'in',
'ytick.direction': u'in'}
sns.set_style('ticks', rc=rc)
# Functions
def calc_V_bcc(a):
return a**3
def calc_V_hcp(a,c):
return (np.sqrt(3)/2)*a**2*c
def calc_dV_bcc(a,da):
return 3*a**2*da
def calc_dV_hcp(a,c,da,dc):
return np.sqrt( (np.sqrt(3)*a*c*da)**2 + ((np.sqrt(3)/2)*a**2*dc)**2 )
# Numeric Vinet EOS, used for everything except calculating dP
def VinetEOS(V,V0,K0,Kprime0):
A = V/V0
P = 3*K0*A**(-2/3) * (1-A**(1/3)) * np.exp((3/2)*(Kprime0-1)*(1-A**(1/3)))
return P
# Symbolic Vinet EOS, needed to calculate dP
def VinetEOS_sym(V,V0,K0,Kprime0):
A = V/V0
P = 3*K0*A**(-2/3) * (1-A**(1/3)) * sp.exp((3/2)*(Kprime0-1)*(1-A**(1/3)))
return P
# Create a covariance matrix from EOS_df with V0, K0, and K0prime; used to get dP
def getCov3(EOS_df, phase):
dV0 = np.float(EOS_df[EOS_df['Phase'] == phase]['dV0'])
dK0 = np.float(EOS_df[EOS_df['Phase'] == phase]['dK0'])
dKprime0 = np.float(EOS_df[EOS_df['Phase'] == phase]['dKprime0'])
V0K0_corr = np.float(EOS_df[EOS_df['Phase'] == phase]['V0K0 corr'])
V0Kprime0_corr = np.float(EOS_df[EOS_df['Phase'] == phase]['V0Kprime0 corr'])
K0Kprime0_corr = np.float(EOS_df[EOS_df['Phase'] == phase]['K0Kprime0 corr'])
corr_matrix = np.eye(3)
corr_matrix[0,1] = V0K0_corr
corr_matrix[1,0] = V0K0_corr
corr_matrix[0,2] = V0Kprime0_corr
corr_matrix[2,0] = V0Kprime0_corr
corr_matrix[1,2] = K0Kprime0_corr
corr_matrix[2,1] = K0Kprime0_corr
sigmas = np.array([[dV0,dK0,dKprime0]])
cov = (sigmas.T@sigmas)*corr_matrix
return cov
# Create a covariance matrix with V, V0, K0, and K0prime; used to get dP
def getVinetCov(dV, EOS_df, phase):
cov3 = getCov3(EOS_df, phase)
cov = np.eye(4)
cov[1:4,1:4] = cov3
cov[0,0] = dV**2
return cov
def calc_dP_VinetEOS(V, dV, EOS_df, phase):
# Create function for Jacobian of Vinet EOS
a,b,c,d = Symbol('a'),Symbol('b'),Symbol('c'),Symbol('d') # Symbolic variables V, V0, K0, K'0
Vinet_matrix = Matrix([VinetEOS_sym(a,b,c,d)]) # Create a symbolic Vinet EOS matrix
param_matrix = Matrix([a,b,c,d]) # Create a matrix of symbolic variables
# Symbolically take the Jacobian of the Vinet EOS and turn into a column matrix
J_sym = Vinet_matrix.jacobian(param_matrix).T
# Create a numpy function for the above expression
# (easier to work with numerically)
J_Vinet = lambdify((a,b,c,d), J_sym, 'numpy')
J = J_Vinet(V,*getEOSparams(EOS_df, phase)) # Calculate Jacobian
cov = getVinetCov(dV, EOS_df, phase) # Calculate covariance matrix
dP = (J.T@cov@J).item() # Calculate uncertainty and convert to a scalar
return dP
def getEOSparams(EOS_df, phase):
V0 = np.float(EOS_df[EOS_df['Phase'] == phase]['V0'])
K0 = np.float(EOS_df[EOS_df['Phase'] == phase]['K0'])
Kprime0 = np.float(EOS_df[EOS_df['Phase'] == phase]['Kprime0'])
return V0, K0, Kprime0
def calc_rho(V,dV,M):
# Convert from cubic angstroms to cm^3/mol
V_ccpermol = (V/2)*constants.N_A/(10**24)
rho = M/V_ccpermol
drho = (M*2*10**24/constants.N_A)*(dV/(V**2))
return rho, drho
# Import EOS information
EOS_df = pd.read_csv('FeAlloyEOS.csv')
# Find the filepath of all .xy XRD pattern files
patternfilepath_list = [filepath for filepath in glob.glob('*/*/*.xy')]
allresults_df = pd.DataFrame()
bccFe_df = pd.DataFrame()
bccFeNi_df = pd.DataFrame()
bccFeNiSi_df = pd.DataFrame()
hcpFeNi_df = pd.DataFrame()
hcpFeNiSi_df = | pd.DataFrame() | pandas.DataFrame |
"""
Extract summary unit data created using tabulate_area.py and postprocess to join
into vector tiles.
The following code compacts values in a few ways. These were tested against
versions of the vector tiles that retained individual integer columns, and the
compacted version here ended up being smaller.
Blueprint and corridors were encoded to a pipe-delimited series of percents * 10
(to preserve 1 decimal place in frontend), omitting any 0 values:
<value0>|<value1>|...
Each indicator was encoded to a pipe-delimited series of percents * 10, then
any indicators present were merged into a single comma-delimited string.
Indicators that were not present were omitted.
Note: indicators are keyed based on their index within the indicators array;
this must be used in the frontend in same order and must remain in consistent
order.
Time-series values (SLR, urban) were converted to percent * 10 then delta encoded
into caret-delimited strings:
<baseline>^<delta_value0>^<delta_value1>
Areas where there were no values present were converted to empty strings. Areas
where there was no change from the baseline just include the baseline.
Values that could have multiple key:value entries (ownership, protection) are dictionary-encoded:
FED:<fed_%>,LOC:<loc_%>,...
Counties are encoded as:
<FIPS>:state|county,<FIPS>|...
"""
from pathlib import Path
import csv
import numpy as np
import pandas as pd
from analysis.constants import INDICATOR_INDEX, URBAN_YEARS, DEBUG
from analysis.lib.attribute_encoding import encode_values, delta_encode_values
data_dir = Path("data")
results_dir = data_dir / "results"
out_dir = data_dir / "for_tiles"
### HUC12
working_dir = results_dir / "huc12"
print("Reading HUC12 units...")
huc12 = pd.read_feather(
data_dir / "inputs/summary_units" / "huc12.feather", columns=["id", "name", "acres"]
).set_index("id")
huc12.acres = huc12.acres.round().astype("uint")
huc12["type"] = "subwatershed"
print("Encoding HUC12 Blueprint & indicator values...")
blueprint = pd.read_feather(results_dir / "huc12/blueprint.feather").set_index("id")
# Unpack blueprint values
blueprint_cols = [c for c in blueprint.columns if c.startswith("blueprint_")]
corridor_cols = [c for c in blueprint.columns if c.startswith("corridors_")]
blueprint_total = blueprint[blueprint_cols].sum(axis=1).rename("blueprint_total")
shape_mask = blueprint.shape_mask
# convert Blueprint to integer percents * 10, and pack into pipe-delimited string
blueprint_percent = encode_values(blueprint[blueprint_cols], shape_mask, 1000).rename(
"blueprint"
)
# convert corridors to integer percents * 10, and pack into pipe-delimited string
corridors_percent = encode_values(blueprint[corridor_cols], shape_mask, 1000).rename(
"corridors"
)
indicators = dict()
# serialized id is based on position
for i, id in enumerate(INDICATOR_INDEX.keys()):
cols = [c for c in blueprint.columns if c.startswith(id)]
values = blueprint[cols]
# drop indicators that are not present in this area
# if only 0 values are present, ignore this indicator
ix = values[cols[1:]].sum(axis=1) > 0
indicators[i] = encode_values(values.loc[ix], shape_mask.loc[ix], 1000).rename(i)
# encode to dict-encoded value <i>:<percents>,...
# dropping any that are not present in a given record
indicators = (
pd.DataFrame(indicators)
.fillna("")
.apply(lambda g: ",".join((f"{k}:{v}" for k, v in g.items() if v)), axis=1)
.rename("indicators")
)
blueprint_df = (
blueprint[["shape_mask"]]
.round()
.astype("uint")
.join(blueprint_total.round().astype("uint"))
.join(blueprint_percent)
.join(corridors_percent)
.join(indicators)
).fillna("")
### Convert SLR and urban to integer acres, and delta encode
print("Encoding SLR values...")
slr = (
pd.read_feather(working_dir / "slr.feather").set_index("id").round().astype("uint")
)
slr = delta_encode_values(
slr.drop(columns=["shape_mask"]), slr.shape_mask, 1000
).rename("slr")
print("Encoding urban values...")
urban = (
pd.read_feather(working_dir / "urban.feather")
.set_index("id")
.round()
.astype("uint")
)
urban = delta_encode_values(
urban.drop(columns=["shape_mask"]), urban.shape_mask, 1000
).rename("urban")
### Dictionary encode ownership and protection for each HUC12:
# FED:<fed_acres>,LOC: <loc_acres>, ...
ownership = (
pd.read_feather(working_dir / "ownership.feather")
.set_index("id")
.join(huc12.acres.rename("total_acres"))
)
ownership["percent"] = (
(1000 * ownership.acres / ownership.total_acres).round().astype("uint")
)
# drop anything at 0%
ownership = ownership.loc[ownership.percent > 0].copy()
ownership = pd.Series(
(ownership.FEE_ORGTYP + ":" + ownership.percent.astype("str"))
.groupby(level=0)
.apply(lambda r: ",".join(v for v in r)),
name="ownership",
)
protection = (
pd.read_feather(working_dir / "protection.feather")
.set_index("id")
.join(huc12.acres.rename("total_acres"))
)
protection["percent"] = (
(1000 * protection.acres / protection.total_acres).round().astype("uint")
)
# drop anything at 0%
protection = protection.loc[protection.percent > 0].copy()
protection = pd.Series(
(protection.GAP_STATUS.astype("str") + ":" + protection.percent.astype("str"))
.groupby(level=0)
.apply(lambda r: ",".join(v for v in r)),
name="protection",
)
### Convert counties into a dict encoded string per HUC12,
# dividing state and county by "|" and entries by ","
# <FIPS>:state|county,
counties = pd.Series(
pd.read_feather(working_dir / "counties.feather")
.set_index("id")
.apply(
lambda r: ":".join([r.values[0], "|".join((str(v) for v in r.values[1:]))]),
axis=1,
)
.groupby(level=0)
.apply(lambda g: ",".join((v for v in g.values))),
name="counties",
)
huc12 = (
huc12.join(blueprint_df, how="left")
.join(slr, how="left")
.join(urban, how="left")
.join(ownership, how="left")
.join(protection, how="left")
.join(counties, how="left")
.fillna("")
)
### Read in marine data
working_dir = results_dir / "marine_blocks"
print("Reading marine_blocks...")
marine = pd.read_feather(
data_dir / "inputs/summary_units/marine_blocks.feather",
columns=["id", "name", "acres"],
).set_index("id")
marine.acres = marine.acres.round().astype("uint")
marine["type"] = "marine lease block"
print("Encoding marine Blueprint & indicator values...")
blueprint = | pd.read_feather(working_dir / "blueprint.feather") | pandas.read_feather |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Use a machine learning approach to identify which acoustic index is more important to
discriminate between landscape cover.
env_cover ~ acoustic_indices
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import glob
#%% Load data
df_env = | pd.read_csv('../../env_data/ANH_to_GXX.csv') | pandas.read_csv |
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import csv
import os
import math
import matplotlib.lines as mlines
import numpy as np
import seaborn as sns
import pandas as pd
from sklearn.decomposition import PCA
from itertools import chain
import logging
from matplotlib.patches import Patch
import glob
from pandas.plotting import table
import matplotlib.ticker as ticker
experiment_level_graph_settings=[
["rescaled", "deep", ["not_rescaled", "res_nn", "drop_res_nn", "drop_res_up_nn"],["Original Scale", "Average Low\n Altitude Scale", "Average High\n Altitude Scale", "Average Low\n Altitude Scale\n with High\n Altitude Resolution"]],
["distortion_correction", "Blues", [False, True], ["Original Image", "Distortion Corrected"]],
["colour_correction_type", "Reds", ["grey", "alt"], ["Greyworld Corrected", "Altitude Corrected"]],
# ["separate_channel_ops", "PiYG", [False, True], ["Cross Channel Operations", "Separate Channel Operations"]],
# ["elastic_distortions","PRGn",[False, True], ["Without Elastic Distortions", "With Elastic Distortions"]]
]
instance_level_graph_settings = []
instance_level_graph_settings.extend(experiment_level_graph_settings)
instance_level_graph_settings.append(["gt_class", "Accent", [8.0, 3.0, 5.0, 2.0, 1.0, 6.0], ["Seastar", "Small\n Rockfish", "Fish 4", "Large\n Rockfish", "Eel", "Crab"]])
def get_experiments():
experiments = []
directories = [x for x in os.walk("../../../")][0][1]
for directory in directories:
colour_correction_type = directory.split("_")[0]
if directory.split("_")[1] == "dist":
distortion_correction = True
else:
distortion_correction = False
if directory.split("_")[2] == "rescaled":
rescaled = True
else:
rescaled = False
filenames = [f for f in os.listdir(directory) if f[-4:] == ".csv"]
print(filenames)
for filename in filenames:
experiment = {
"colour_correction_type": colour_correction_type,
"distortion_correction": distortion_correction,
"rescaled": rescaled,
}
with open("./" + directory + "/" + filename, "r") as csvfile:
plots = csv.reader(csvfile, delimiter=",")
headers = next(plots, None)
for header in headers:
experiment[header] = []
for row in plots:
for i, header in enumerate(headers):
experiment[header].append(float(row[i]))
experiment["minimum_val_loss"] = min(experiment["val_loss"])
experiment["minimum_loss"] = min(experiment["loss"])
number = int(filename.split("_")[1].split(".")[0])
experiment["number"] = number
experiment["repeat"] = math.floor(number / 4)
if (number % 4) / 2 < 1:
experiment["elastic_distortions"] = True
else:
experiment["elastic_distortions"] = False
if (number % 4) % 2 != 0:
experiment["separate_channel_ops"] = True
else:
experiment["separate_channel_ops"] = False
print(experiment)
experiments.append(experiment)
return experiments
def int_to_string(bucket):
return f"{bucket*500}-{(bucket+1)*500}"
def size_to_bucket(size, bucket_size):
bucket = math.floor(size / bucket_size)
return bucket
def size_buckets(size_list, bucket_size):
size_bucket=[]
for size in size_list:
bucket = size_to_bucket(size, bucket_size)
size_bucket.append(bucket)
return size_bucket
def size_overlaps(size_overlaps_string):
size_list = []
overlaps_list = []
split_size_overlaps = size_overlaps_string[1:-1].split("dtype=float32)],")
for item in split_size_overlaps:
split = item.split(": ")
size = int(split[0])
overlaps = []
split_overlaps = split[1][1:-1].split("array(")[1:]
for overlaps_item in split_overlaps:
numbers = [float(x) for x in overlaps_item.split("]]")[0][2:].split(",")]
overlaps.append(max(numbers))
size_list.append(size)
overlaps_list.append(np.mean(overlaps))
# numbers =[float(x) for x in item.split("]]")[0][2:].split(',')]
# size_overlaps_list.append(max(numbers))
print(size_list)
print(overlaps_list)
return size_list, overlaps_list
def overlaps(overlaps_string):
overlaps_list = []
split_overlaps = overlaps_string[1:-1].split(",")[1:]
for item in split_overlaps:
numbers = [float(x) for x in split_overlaps]
overlaps_list.append(max(numbers))
return np.mean(overlaps_list)
def plot_boxplots(experiments):
plt.clf()
axes = plt.gca()
axes.set_ylim([0, 4.6])
fig, ax = plt.subplots()
new_experiments = []
print(experiments.keys())
for index, experiment in experiments.iterrows():
for value in [
"val_mrcnn_class_loss",
"val_mrcnn_bbox_loss",
"val_mrcnn_mask_loss",
"mrcnn_bbox_loss",
"val_rpn_bbox_loss",
"mrcnn_mask_loss",
"rpn_class_loss",
"rpn_bbox_loss",
"val_loss",
"mrcnn_class_loss",
"val_rpn_class_loss",
"loss",
"overlaps_AE_area1",
"overlaps_AE_area2",
"overlaps_AE_area3",
"gt_size_list",
"overlaps",
"gt_class_list",
"gt_size_list_AE_area1",
"gt_class_list_AE_area1",
"gt_size_list_AE_area2",
"gt_class_list_AE_area2",
"gt_size_list_AE_area3",
"gt_class_list_AE_area3",
"AP_list",
"AP_list_AE_area1",
"AP_list_AE_area2",
"AP_list_AE_area3",
"classless_AP_list",
"classless_AP_list_AE_area1",
"classless_AP_list_AE_area2",
"classless_AP_list_AE_area3",
]:
experiment[value] = stringlist_to_list(experiment[value])
experiment['mean_overlap'] = np.mean(experiment['overlaps'])
experiment['0_threshold'] = len([i for i in experiment['overlaps'] if i > 0])/len(experiment['overlaps'])
experiment['5_threshold'] = len([i for i in experiment['overlaps'] if i >= 0.05])/len(experiment['overlaps'])
experiment['10_threshold'] = len([i for i in experiment['overlaps'] if i >= 0.1])/len(experiment['overlaps'])
experiment['15_threshold'] = len([i for i in experiment['overlaps'] if i >= 0.15])/len(experiment['overlaps'])
experiment['20_threshold'] = len([i for i in experiment['overlaps'] if i >= 0.2])/len(experiment['overlaps'])
experiment['30_threshold'] = len([i for i in experiment['overlaps'] if i >= 0.3])/len(experiment['overlaps'])
experiment['40_threshold'] = len([i for i in experiment['overlaps'] if i >= 0.4])/len(experiment['overlaps'])
experiment['50_threshold'] = len([i for i in experiment['overlaps'] if i >= 0.5])/len(experiment['overlaps'])
experiment['mean_nonzero_overlaps'] = np.mean([item for item in experiment['overlaps'] if item != 0])
experiment['mean_AP'] = np.mean(experiment['AP_list'])
experiment['mean_classless_AP'] = np.mean(experiment['classless_AP_list'])
experiment['mean_classless_AP_AE_area1'] = np.mean(experiment["classless_AP_list_AE_area1"])
experiment['mean_classless_AP_AE_area2'] = np.mean(experiment["classless_AP_list_AE_area2"])
experiment['mean_classless_AP_AE_area3'] = np.mean(experiment["classless_AP_list_AE_area3"])
print(experiment['rescaled'])
print(experiment['overlaps'])
print(experiment['30_threshold'])
for ae_dataset in ["AE_area1", "AE_area2", "AE_area3"]:
experiment[f'mean_overlap_{ae_dataset}'] = np.mean(experiment[f'overlaps_{ae_dataset}'])
experiment[f'0_threshold_{ae_dataset}'] = len([i for i in experiment[f'overlaps_{ae_dataset}'] if i > 0])/len(experiment[f'overlaps_{ae_dataset}'])
experiment[f'10_threshold_{ae_dataset}'] = len([i for i in experiment[f'overlaps_{ae_dataset}'] if i >= 0.1])/len(experiment[f'overlaps_{ae_dataset}'])
experiment[f'30_threshold_{ae_dataset}'] = len([i for i in experiment[f'overlaps_{ae_dataset}'] if i >= 0.3])/len(experiment[f'overlaps_{ae_dataset}'])
experiment[f'mean_nonzero_overlaps{ae_dataset}'] = np.mean([item for item in experiment[f'overlaps_{ae_dataset}'] if item != 0])
print(experiment[f"AP_list_{ae_dataset}"])
experiment[f"mean_AP_{ae_dataset}"] = np.mean(experiment[f"AP_list_{ae_dataset}"])
experiment[f"mean_classless_AP_{ae_dataset}"] = np.mean(experiment[f"classless_AP_list_{ae_dataset}"])
experiment['dataset'] = 'tunasand'
new_experiments.append(experiment)
print("Calculating threshold for 95% and 68% of points")
nonzero_overlap = [item for sublist in list(experiments['overlaps']) for item in stringlist_to_list(sublist) if item != 0]
nonzero_overlap.sort()
threshold_95 = nonzero_overlap[int(len(nonzero_overlap)*0.05)]
print(threshold_95)
threshold_68 = nonzero_overlap[int(len(nonzero_overlap)*0.32)]
print(threshold_68)
experiments_dataframe = pd.DataFrame(new_experiments)
sns.set()
plt.figure(figsize=(8,5))
mAP_summary_table(experiments_dataframe)
include_row = experiments_dataframe.rescaled.isin(["not_rescaled", "res_nn", "drop_res_nn", "drop_res_up_nn"])
filtered_experiments = experiments_dataframe[include_row]
for value in experiment_level_graph_settings:
plt.clf()
g = sns.boxplot(
x=filtered_experiments[value[0]],
y=filtered_experiments["30_threshold"],
palette=value[1],
order=value[2],
)
g.set_xticklabels(value[3], rotation=0)
g.set_ylim([0,1])
g.set_xlabel("")
plt.title(value[0])
plt.ylabel("Object Detection Rate")
plt.savefig(f"{value[0]}_detection_boxplot", bbox_inches='tight')
plt.clf()
g = sns.boxplot(
x=filtered_experiments[value[0]],
y=filtered_experiments["mean_AP"],
palette=value[1],
# order=value[2],
)
print(value)
print(g.get_xticklabels())
g.set_xticklabels(value[3], rotation=0)
g.set_ylim([0,1])
g.set_xlabel("")
plt.title(value[0])
plt.ylabel("mAP")
plt.savefig(f"{value[0]}_mAP_boxplot", bbox_inches='tight')
plt.clf()
g = sns.boxplot(
x=filtered_experiments[value[0]],
y=filtered_experiments["mean_classless_AP"],
palette=value[1],
# order=value[2],
)
g.set_xticklabels(value[3], rotation=0)
g.set_ylim([0,1])
g.set_xlabel("")
plt.title(value[0])
plt.ylabel("mAP")
plt.savefig(f"{value[0]}_classless_mAP_boxplot", bbox_inches='tight')
for ae_dataset in ["AE_area1", "AE_area2", "AE_area3"]:
plt.clf()
g = sns.boxplot(
x=filtered_experiments[value[0]],
y=filtered_experiments[f"mean_AP_{ae_dataset}"],
palette=value[1],
order=value[2],
)
g.set_xticklabels(value[3], rotation=0)
g.set_ylim([0,1])
g.set_xlabel("")
plt.title(value[0])
plt.ylabel("mAP")
plt.savefig(f"{value[0]}_mAP_boxplot_{ae_dataset}", bbox_inches='tight')
plt.clf()
g = sns.boxplot(
x=filtered_experiments[value[0]],
y=filtered_experiments[f"mean_classless_AP_{ae_dataset}"],
palette=value[1],
order=value[2],
)
g.set_xticklabels(value[3], rotation=0)
g.set_ylim([0,1])
g.set_xlabel("")
plt.title(value[0])
plt.ylabel("mAP")
plt.savefig(f"{value[0]}_classless_mAP_boxplot_{ae_dataset}", bbox_inches='tight')
plt.clf()
g = sns.boxplot(
x=experiments_dataframe["dataset"],
y=experiments_dataframe["mean_nonzero_overlaps"],
hue=experiments_dataframe[value[0]],
palette=value[1],
hue_order=value[2],
)
# g.get_legend().remove()
# g.set_xticklabels(g.get_xticklabels(), rotation=45)
# plt.title(value[0])
# plt.xlabel('Epoch')
plt.ylabel("Mean Segmentation IOU Scores")
plt.savefig(f"{value[0]}_mean_IOU_boxplot", bbox_inches='tight')
plt.figure(figsize=(7,5))
plt.figure(figsize=(6,5))
for feature in experiment_level_graph_settings: #[['rescaled', ["not_rescaled", "res_nn", "drop_res_nn", "drop_res_up_nn"], ["Original Scale", "Average Low\n Altitude Scale", "Average High\n Altitude Scale", "Average Low\n Altitude Scale\n with High\n Altitude Resolution"]], ['distortion_correction', [False, True], ["Original Image", "Distortion Corrected"]]]:
for threshold in ['0','10','30']:
plt.clf()
g = sns.boxplot(
x=filtered_experiments[feature[0]],
y=filtered_experiments[f"{threshold}_threshold"],
# hue=experiments_dataframe["rescaled"],
order=feature[2],
palette=feature[1],
)
# g.get_legend().remove()
g.set_xticklabels(feature[3], rotation=0)
g.set_ylim([0,1])
g.set_xlabel("")
plt.title(f"Success Rate at {threshold}% IOU Threshold, {feature[0]} Comparison")
# plt.xlabel('Epoch')
plt.ylabel("Object Detection Rate")
plt.savefig(f"{feature[0]}_boxplot_threshold{threshold}", bbox_inches='tight')
for ae_dataset in ["AE_area1", "AE_area2", "AE_area3"]:
plt.clf()
g = sns.boxplot(
x=filtered_experiments[feature[0]],
y=filtered_experiments[f"{threshold}_threshold_{ae_dataset}"],
# hue=experiments_dataframe["rescaled"],
order=feature[2],
palette=feature[1],
)
# g.get_legend().remove()
g.set_xticklabels(feature[3], rotation=0)
g.set_ylim([0,1])
g.set_xlabel("")
plt.title(f"Object Detection Rate at {threshold}% IOU Threshold, {feature[0]} Comparison")
# plt.xlabel('Epoch')
plt.ylabel("Object Detection Rate")
plt.savefig(f"{feature[0]}_boxplot_threshold{threshold}_{ae_dataset}", bbox_inches='tight')
plt.figure(figsize=(7,5))
def group_experiments_by_predictions(experiments, category):
category_values = []
for i, experiment in experiments.iterrows():
if not experiment[category] in category_values:
category_values.append(experiment[category])
# print(experiments.where(experiments[category]==category_values[0]))
new_experiments = []
for category_value in category_values:
category_experiments_indices = experiments[category] == category_value
category_experiments = experiments[category_experiments_indices]
new_experiment = {
category: category_value,
"size": np.mean(
np.array(list(category_experiments["size"])), axis=0
)
}
new_experiments.append(new_experiment)
experiments_dataframe = pd.DataFrame(new_experiments)
return experiments_dataframe
def group_experiments_by_epoch(experiments, category):
category_values = []
for i, experiment in experiments.iterrows():
if not experiment[category] in category_values:
category_values.append(experiment[category])
# print(experiments.where(experiments[category]==category_values[0]))
new_experiments = []
for category_value in category_values:
category_experiments_indices = experiments[category] == category_value
category_experiments = experiments[category_experiments_indices]
new_experiment = {
category: category_value
}
for value in ["val_mrcnn_class_loss", "val_mrcnn_bbox_loss", "val_mrcnn_mask_loss", "val_rpn_bbox_loss", "val_rpn_class_loss", "mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss", "rpn_bbox_loss", "rpn_class_loss"]:
try:
new_experiment[value]=np.mean(
np.array(list(category_experiments[value])), axis=0
)
except:
print("unable to get the mean across axis 0 for value: " + value)
print(category_experiments)
new_experiment[value]=np.zeros(100)
# "val_mrcnn_class_loss": np.mean(
# np.array(list(category_experiments["val_mrcnn_class_loss"])), axis=0
# ),
# "val_mrcnn_bbox_loss": np.mean(
# np.array(list(category_experiments["val_mrcnn_bbox_loss"])), axis=0
# ),
# "val_mrcnn_mask_loss": np.mean(
# np.array(list(category_experiments["val_mrcnn_mask_loss"])), axis=0
# ),
# "val_rpn_bbox_loss": np.mean(
# np.array(list(category_experiments["val_rpn_bbox_loss"])), axis=0
# ),
# "val_rpn_class_loss": np.mean(
# np.array(list(category_experiments["val_rpn_class_loss"])), axis=0
# ),
# "mrcnn_class_loss": np.mean(
# np.array(list(category_experiments["mrcnn_class_loss"])), axis=0
# ),
# "mrcnn_bbox_loss": np.mean(
# np.array(list(category_experiments["mrcnn_bbox_loss"])), axis=0
# ),
# "mrcnn_mask_loss": np.mean(
# np.array(list(category_experiments["mrcnn_mask_loss"])), axis=0
# ),
# "rpn_bbox_loss": np.mean(
# np.array(list(category_experiments["rpn_bbox_loss"])), axis=0
# ),
# "rpn_class_loss": np.mean(
# np.array(list(category_experiments["rpn_class_loss"])), axis=0
# ),
# }
new_experiments.append(new_experiment)
experiments_dataframe = pd.DataFrame(new_experiments)
return experiments_dataframe
def plot_colour_correction_lossvsvalloss(experiments, name):
new_experiments = []
print(experiments.keys())
for index, experiment in experiments.iterrows():
print(experiment["val_mrcnn_class_loss"])
experiment["val_mrcnn_class_loss"] = [
float(n) for n in experiment["val_mrcnn_class_loss"][1:-1].split(", ")
]
experiment["val_mrcnn_bbox_loss"] = [
float(n) for n in experiment["val_mrcnn_bbox_loss"][1:-1].split(", ")
]
experiment["val_mrcnn_mask_loss"] = [
float(n) for n in experiment["val_mrcnn_mask_loss"][1:-1].split(", ")
]
experiment["mrcnn_bbox_loss"] = [
float(n) for n in experiment["mrcnn_bbox_loss"][1:-1].split(", ")
]
experiment["val_rpn_bbox_loss"] = [
float(n) for n in experiment["val_rpn_bbox_loss"][1:-1].split(", ")
]
experiment["mrcnn_mask_loss"] = [
float(n) for n in experiment["mrcnn_mask_loss"][1:-1].split(", ")
]
experiment["rpn_class_loss"] = [
float(n) for n in experiment["rpn_class_loss"][1:-1].split(", ")
]
experiment["rpn_bbox_loss"] = [
float(n) for n in experiment["rpn_bbox_loss"][1:-1].split(", ")
]
experiment["val_loss"] = [
float(n) for n in experiment["val_loss"][1:-1].split(", ")
]
experiment["mrcnn_class_loss"] = [
float(n) for n in experiment["mrcnn_class_loss"][1:-1].split(", ")
]
experiment["val_rpn_class_loss"] = [
float(n) for n in experiment["val_rpn_class_loss"][1:-1].split(", ")
]
experiment["loss"] = [float(n) for n in experiment["loss"][1:-1].split(", ")]
experiment["overlaps"] = stringlist_to_list(experiment["overlaps"])
for epoch in range(len(experiment["val_loss"])):
new_row = {
"rescaled": experiment["rescaled"],
"distortion_correction": experiment["distortion_correction"],
"minimum_loss": experiment["minimum_loss"],
"number": experiment["number"],
"minimum_val_loss": experiment["minimum_val_loss"],
"elastic_distortions": experiment["elastic_distortions"],
"colour_correction_type": experiment["colour_correction_type"],
"separate_channel_ops": experiment["separate_channel_ops"],
"epoch": epoch,
"mrcnn_mask_loss": float(experiment["mrcnn_mask_loss"][epoch]),
"val_mrcnn_bbox_loss": float(experiment["val_mrcnn_bbox_loss"][epoch]),
"loss": float(experiment["loss"][epoch]),
"val_mrcnn_class_loss": float(
experiment["val_mrcnn_class_loss"][epoch]
),
"val_mrcnn_mask_loss": float(experiment["val_mrcnn_mask_loss"][epoch]),
"mrcnn_bbox_loss": float(experiment["mrcnn_bbox_loss"][epoch]),
"val_rpn_bbox_loss": float(experiment["val_rpn_bbox_loss"][epoch]),
"rpn_class_loss": float(experiment["rpn_class_loss"][epoch]),
"rpn_bbox_loss": float(experiment["rpn_bbox_loss"][epoch]),
"mrcnn_class_loss": float(experiment["mrcnn_class_loss"][epoch]),
"val_rpn_class_loss": float(experiment["val_rpn_class_loss"][epoch]),
"val_loss": float(experiment["val_loss"][epoch]),
"combo": str(experiment["colour_correction_type"])
+ str(experiment["distortion_correction"])
+ str(experiment["rescaled"]),
}
new_experiments.append(new_row)
experiments_dataframe = pd.DataFrame(new_experiments)
sns.set()
plt.clf()
axes = plt.gca()
axes.set_ylim([0, 4.6])
fig, ax = plt.subplots()
sns.lineplot(x="epoch", y="val_loss", data=experiments_dataframe, hue="combo")
print(len(ax.lines))
for i in range(len(ax.lines)):
ax.lines[i].set_linestyle("--")
g = sns.lineplot(x="epoch", y="loss", data=experiments_dataframe, hue="combo")
g.legend_.remove()
# ax.lines[3].set_linestyle("--")
print(ax.lines)
print(ax.lines[3].get_linestyle())
# plt.legend()
# handles, labels = ax.get_legend_handles_labels()
# custom_lines = [Patch(facecolor=ax.lines[0].get_color()),
# Patch(facecolor=ax.lines[1].get_color()),
# Patch(facecolor=ax.lines[2].get_color()),
# # Patch(facecolor=ax.lines[3].get_color()),
# ]
# handles=custom_lines
# labels = labels[1:4]
# handles.extend([ax.lines[4], ax.lines[0]])
# labels.extend(['training', 'validation'])
# print(handles)
# print(labels)
# ax.legend(handles=[], labels=[])
plt.title("Loss and validation loss per epoch")
plt.savefig(name + "combo_type_loss_vs_valloss")
def stringlist_to_list(stringlist):
if type(stringlist) != str:
return [0]
if len(stringlist) < 3:
return [0]
real_list = [float(n) for n in stringlist[1:-1].split(", ")]
return real_list
def get_labels(buckets):
# buckets = sorted(list(buckets))
return [int_to_string(i) for i in buckets]
def plot_size_overlap_boxplots(experiments):
ts_experiments = []
ae_dive1_experiments = []
ae_dive2_experiments = []
ae_dive3_experiments = []
print(experiments.keys())
for index, experiment in experiments.iterrows():
print(experiment)
for value in [
"val_mrcnn_class_loss",
"val_mrcnn_bbox_loss",
"val_mrcnn_mask_loss",
"mrcnn_bbox_loss",
"val_rpn_bbox_loss",
"mrcnn_mask_loss",
"rpn_class_loss",
"rpn_bbox_loss",
"val_loss",
"mrcnn_class_loss",
"val_rpn_class_loss",
"loss",
"overlaps_AE_area1",
"overlaps_AE_area2",
"overlaps_AE_area3",
"gt_size_list",
"overlaps",
"gt_class_list",
"gt_size_list_AE_area1",
"gt_class_list_AE_area1",
"gt_size_list_AE_area2",
"gt_class_list_AE_area2",
"gt_size_list_AE_area3",
"gt_class_list_AE_area3",
"predicted_size_list",
"predicted_class_list",
"predicted_size_list_AE_area1",
"predicted_class_list_AE_area1",
"predicted_size_list_AE_area2",
"predicted_class_list_AE_area2",
"predicted_size_list_AE_area3",
"predicted_class_list_AE_area3",
]:
print(value)
experiment[value] = stringlist_to_list(experiment[value])
experiment["size_bucket"] = size_buckets(experiment["predicted_size_list"], 500)
experiment["ae_size_bucket_dive1"] = size_buckets(experiment["predicted_size_list_AE_area1"], 20000)
experiment["ae_size_bucket_dive2"] = size_buckets(experiment["predicted_size_list_AE_area2"], 20000)
experiment["ae_size_bucket_dive3"] = size_buckets(experiment["predicted_size_list_AE_area3"], 20000)
for prediction in range(len(experiment["gt_class_list"])):
if experiment["overlaps"][prediction] >= 0.3:
new_row = {
"rescaled": experiment["rescaled"],
"distortion_correction": experiment["distortion_correction"],
"minimum_loss": experiment["minimum_loss"],
"number": experiment["number"],
"minimum_val_loss": experiment["minimum_val_loss"],
"elastic_distortions": experiment["elastic_distortions"],
"colour_correction_type": experiment["colour_correction_type"],
"separate_channel_ops": experiment["separate_channel_ops"],
"combo": experiment["colour_correction_type"]
+ str(experiment["distortion_correction"])
+ str(experiment["rescaled"]),
"size_bucket":experiment["size_bucket"][prediction],
"gt_size":experiment["gt_size_list"][prediction],
"gt_class":experiment["gt_class_list"][prediction],
"overlap":experiment["overlaps"][prediction],
"dataset":"tunasand"
}
if new_row['rescaled'] in ["not_rescaled", "res_nn", "drop_res_nn", "drop_res_up_nn"]:
ts_experiments.append(new_row)
for prediction in range(len(experiment["gt_class_list_AE_area1"])):
if experiment["overlaps_AE_area1"][prediction] >= 0.3:
new_row = {
"rescaled": experiment["rescaled"],
"distortion_correction": experiment["distortion_correction"],
"minimum_loss": experiment["minimum_loss"],
"number": experiment["number"],
"minimum_val_loss": experiment["minimum_val_loss"],
"elastic_distortions": experiment["elastic_distortions"],
"colour_correction_type": experiment["colour_correction_type"],
"separate_channel_ops": experiment["separate_channel_ops"],
"combo": experiment["colour_correction_type"]
+ str(experiment["distortion_correction"])
+ str(experiment["rescaled"]),
"size_bucket":experiment["ae_size_bucket_dive1"][prediction],
"gt_class":experiment["gt_class_list_AE_area1"][prediction],
"overlap":experiment["overlaps_AE_area1"][prediction],
"dataset":"ae_dive1"
}
if new_row['rescaled'] in ["not_rescaled", "res_nn", "drop_res_nn", "drop_res_up_nn"]:
ae_dive1_experiments.append(new_row)
for prediction in range(len(experiment["gt_class_list_AE_area2"])):
if experiment["overlaps_AE_area2"][prediction] >= 0.3:
new_row = {
"rescaled": experiment["rescaled"],
"distortion_correction": experiment["distortion_correction"],
"minimum_loss": experiment["minimum_loss"],
"number": experiment["number"],
"minimum_val_loss": experiment["minimum_val_loss"],
"elastic_distortions": experiment["elastic_distortions"],
"colour_correction_type": experiment["colour_correction_type"],
"separate_channel_ops": experiment["separate_channel_ops"],
"combo": experiment["colour_correction_type"]
+ str(experiment["distortion_correction"])
+ str(experiment["rescaled"]),
"size_bucket":experiment["ae_size_bucket_dive2"][prediction],
"gt_class":experiment["gt_class_list_AE_area2"][prediction],
"overlap":experiment["overlaps_AE_area2"][prediction],
"dataset":"ae_dive2"
}
if new_row['rescaled'] in ["not_rescaled", "res_nn", "drop_res_nn", "drop_res_up_nn"]:
ae_dive2_experiments.append(new_row)
for prediction in range(len(experiment["gt_class_list_AE_area3"])):
if experiment["overlaps_AE_area3"][prediction] >= 0.3:
new_row = {
"rescaled": experiment["rescaled"],
"distortion_correction": experiment["distortion_correction"],
"minimum_loss": experiment["minimum_loss"],
"number": experiment["number"],
"minimum_val_loss": experiment["minimum_val_loss"],
"elastic_distortions": experiment["elastic_distortions"],
"colour_correction_type": experiment["colour_correction_type"],
"separate_channel_ops": experiment["separate_channel_ops"],
"combo": experiment["colour_correction_type"]
+ str(experiment["distortion_correction"])
+ str(experiment["rescaled"]),
"size_bucket":experiment["ae_size_bucket_dive3"][prediction],
"gt_class":experiment["gt_class_list_AE_area3"][prediction],
"overlap":experiment["overlaps_AE_area3"][prediction],
"dataset":"ae_dive3"
}
if new_row['rescaled'] in ["not_rescaled", "res_nn", "drop_res_nn", "drop_res_up_nn"]:
ae_dive3_experiments.append(new_row)
# Filter dictionary by keeping elements whose keys are divisible by 2
ts_experiments = pd.DataFrame(ts_experiments)
ae_dive1_experiments = pd.DataFrame(ae_dive1_experiments)
ae_dive2_experiments = pd.DataFrame(ae_dive2_experiments)
ae_dive3_experiments = pd.DataFrame(ae_dive3_experiments)
for value in set(ts_experiments['rescaled']):
subset = ts_experiments[ts_experiments['rescaled']==value]
sns.set()
labels=get_labels(set(subset['size_bucket']))
plt.clf()
# palette = get_palette(subset['size_bucket'])
counts = subset.groupby("size_bucket").count()
print(value)
print(counts)
print(counts['rescaled'].sum())
ax = sns.boxplot(x=subset["size_bucket"], y=subset["overlap"])
axes = plt.gca()
axes.set_ylabel("IOU scores")
axes.set_xlabel("Size of groundtruth segment in pixels")
ax.tick_params(which='major', width=1.00)
ax.tick_params(which='major', length=5)
ax.tick_params(which='minor', width = 0.75)
ax.tick_params(which='minor', length=2.5)
# ax.get_legend().remove()
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(1))
ax.xaxis.set_major_formatter(ticker.ScalarFormatter())
new_labels=['blah']
new_labels.extend(labels[0::5])
ax.xaxis.set_ticklabels(new_labels,rotation=50)
plt.title("IOU to size of groundtruth scatterplot")
plt.savefig(f"scatter_{value}_box_plot", bbox_inches='tight')
plt.clf()
ax = sns.boxplot(x=ts_experiments["gt_class"], y=ts_experiments["overlap"], hue=ts_experiments['size_bucket'])
axes = plt.gca()
axes.set_ylabel("IOU scores")
axes.set_xlabel("Size of groundtruth segment in pixels")
ax.tick_params(which='major', width=1.00)
ax.tick_params(which='major', length=5)
ax.tick_params(which='minor', width = 0.75)
ax.tick_params(which='minor', length=2.5)
# ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
# ax.xaxis.set_minor_locator(ticker.MultipleLocator(1))
# ax.xaxis.set_major_formatter(ticker.ScalarFormatter())
# new_labels=['blah']
# new_labels.extend(labels[0::5])
# ax.xaxis.set_ticklabels(new_labels,rotation=50)
plt.title("IOU to size of groundtruth scatterplot")
plt.savefig("scatter_class_box_plot", bbox_inches='tight')
plt.clf()
sns.boxplot(x=ae_dive1_experiments["size_bucket"], y=ae_dive1_experiments["overlap"])
axes = plt.gca()
axes.set_ylabel("IOU scores")
axes.set_xlabel("Size of groundtruth segment in pixels")
# axes.set_xticklabels(labels,rotation=50)
plt.title("IOU to size of groundtruth scatterplot - " + experiment["rescaled"])
plt.savefig("ae_dive1_scatter_box_plot", bbox_inches='tight')
plt.clf()
sns.boxplot(x=ae_dive2_experiments["size_bucket"], y=ae_dive2_experiments["overlap"])
axes = plt.gca()
axes.set_ylabel("IOU scores")
axes.set_xlabel("Size of groundtruth segment in pixels")
# axes.set_xticklabels(labels,rotation=50)
plt.title("IOU to size of groundtruth scatterplot - " + experiment["rescaled"])
plt.savefig("ae_dive2_scatter_box_plot", bbox_inches='tight')
plt.clf()
sns.boxplot(x=ae_dive3_experiments["size_bucket"], y=ae_dive3_experiments["overlap"])
axes = plt.gca()
axes.set_ylabel("IOU scores")
axes.set_xlabel("Size of groundtruth segment in pixels")
# axes.set_xticklabels(labels,rotation=50)
plt.title("IOU to size of groundtruth scatterplot - " + experiment["rescaled"])
plt.savefig("ae_dive3_scatter_box_plot", bbox_inches='tight')
plt.clf()
plt.figure(figsize=(15,5))
for value in instance_level_graph_settings:
plt.clf()
g = sns.boxplot(
x=ts_experiments[value[0]],
y=ts_experiments["overlap"],
palette=value[1],
order=value[2],
fliersize=1,
hue=ts_experiments["rescaled"],
)
print(value[2])
# g.get_legend().remove()
g.set_xticklabels(value[3], rotation=0)
g.set_xlabel("")
g.set_ylim([0,1])
# plt.title(value[0])
# plt.xlabel('Epoch')
plt.ylabel("Segmentation IOU Scores")
plt.savefig(f"{value[0]}_IOU_boxplot", bbox_inches='tight')
for ae_dataset in [[ae_dive1_experiments, "AE_area1"], [ae_dive2_experiments, "AE_area2"], [ae_dive3_experiments, "AE_area3"]]:
plt.clf()
g = sns.boxplot(
x=ae_dataset[0][value[0]],
y=ae_dataset[0]["overlap"],
hue=ae_dataset[0]["rescaled"],
order=value[2],
palette=value[1],
)
# g.get_legend().remove()
g.set_xticklabels(value[3], rotation=0)
g.set_xlabel("")
g.set_ylim([0,1])
# plt.title(value[0])
# plt.xlabel('Epoch')
plt.ylabel("Segmentation IOU Scores")
plt.savefig(f"{value[0]}_IOU_boxplot_{ae_dataset[1]}", bbox_inches='tight')
def plot_size_overlap_scatter(experiments):
ts_experiments = []
ae_dive1_experiments = []
ae_dive2_experiments = []
ae_dive3_experiments = []
print(experiments.keys())
for index, experiment in experiments.iterrows():
print(experiment)
for value in [
"val_mrcnn_class_loss",
"val_mrcnn_bbox_loss",
"val_mrcnn_mask_loss",
"mrcnn_bbox_loss",
"val_rpn_bbox_loss",
"mrcnn_mask_loss",
"rpn_class_loss",
"rpn_bbox_loss",
"val_loss",
"mrcnn_class_loss",
"val_rpn_class_loss",
"loss",
"overlaps_AE_area1",
"overlaps_AE_area2",
"overlaps_AE_area3",
"gt_size_list",
"overlaps",
"gt_class_list",
"gt_size_list_AE_area1",
"gt_class_list_AE_area1",
"gt_size_list_AE_area2",
"gt_class_list_AE_area2",
"gt_size_list_AE_area3",
"gt_class_list_AE_area3",
]:
print(value)
experiment[value] = stringlist_to_list(experiment[value])
for prediction in range(len(experiment["gt_class_list"])):
new_row = {
"rescaled": experiment["rescaled"],
"distortion_correction": experiment["distortion_correction"],
"minimum_loss": experiment["minimum_loss"],
"number": experiment["number"],
"minimum_val_loss": experiment["minimum_val_loss"],
"elastic_distortions": experiment["elastic_distortions"],
"colour_correction_type": experiment["colour_correction_type"],
"separate_channel_ops": experiment["separate_channel_ops"],
"combo": experiment["colour_correction_type"]
+ str(experiment["distortion_correction"])
+ str(experiment["rescaled"]),
"gt_size":experiment["gt_size_list"][prediction],
"gt_class":experiment["gt_class_list"][prediction],
"overlap":experiment["overlaps"][prediction],
}
ts_experiments.append(new_row)
for prediction in range(len(experiment["gt_class_list_AE_area1"])):
new_row = {
"rescaled": experiment["rescaled"],
"distortion_correction": experiment["distortion_correction"],
"minimum_loss": experiment["minimum_loss"],
"number": experiment["number"],
"minimum_val_loss": experiment["minimum_val_loss"],
"elastic_distortions": experiment["elastic_distortions"],
"colour_correction_type": experiment["colour_correction_type"],
"separate_channel_ops": experiment["separate_channel_ops"],
"combo": experiment["colour_correction_type"]
+ str(experiment["distortion_correction"])
+ str(experiment["rescaled"]),
"gt_size":experiment["gt_size_list_AE_area1"][prediction],
"gt_class":experiment["gt_class_list_AE_area1"][prediction],
"overlap":experiment["overlaps_AE_area1"][prediction],
"dataset":"ae_dive1"
}
ae_dive1_experiments.append(new_row)
for prediction in range(len(experiment["gt_class_list_AE_area2"])):
new_row = {
"rescaled": experiment["rescaled"],
"distortion_correction": experiment["distortion_correction"],
"minimum_loss": experiment["minimum_loss"],
"number": experiment["number"],
"minimum_val_loss": experiment["minimum_val_loss"],
"elastic_distortions": experiment["elastic_distortions"],
"colour_correction_type": experiment["colour_correction_type"],
"separate_channel_ops": experiment["separate_channel_ops"],
"combo": experiment["colour_correction_type"]
+ str(experiment["distortion_correction"])
+ str(experiment["rescaled"]),
"gt_size":experiment["gt_size_list_AE_area2"][prediction],
"gt_class":experiment["gt_class_list_AE_area2"][prediction],
"overlap":experiment["overlaps_AE_area2"][prediction],
"dataset":"ae_dive2"
}
ae_dive2_experiments.append(new_row)
for prediction in range(len(experiment["gt_class_list_AE_area3"])):
new_row = {
"rescaled": experiment["rescaled"],
"distortion_correction": experiment["distortion_correction"],
"minimum_loss": experiment["minimum_loss"],
"number": experiment["number"],
"minimum_val_loss": experiment["minimum_val_loss"],
"elastic_distortions": experiment["elastic_distortions"],
"colour_correction_type": experiment["colour_correction_type"],
"separate_channel_ops": experiment["separate_channel_ops"],
"combo": experiment["colour_correction_type"]
+ str(experiment["distortion_correction"])
+ str(experiment["rescaled"]),
"gt_size":experiment["gt_size_list_AE_area3"][prediction],
"gt_class":experiment["gt_class_list_AE_area3"][prediction],
"overlap":experiment["overlaps_AE_area3"][prediction],
"dataset":"ae_dive3"
}
ae_dive3_experiments.append(new_row)
ts_experiments = pd.DataFrame(ts_experiments)
ae_dive1_experiments = pd.DataFrame(ae_dive1_experiments)
ae_dive2_experiments = pd.DataFrame(ae_dive2_experiments)
ae_dive3_experiments = pd.DataFrame(ae_dive3_experiments)
sns.set()
axes = plt.gca()
axes.set_ylim([-0.01, 1])
axes.set_xscale('log')
axes.set_xlim([1, 30000])
axes.set_ylabel("IOU scores")
axes.set_xlabel("Size of groundtruth segment in pixels")
plt.legend()
plt.title("IOU to size of groundtruth scatterplot - all data points")
splot=sns.scatterplot(x="gt_size", y="overlap", hue="gt_class", data=ts_experiments, s=5, marker='.', linewidth=0, palette=sns.color_palette("muted", n_colors=len(set(ts_experiments["gt_class"]))))
# splot.set(xscale="log")
plt.savefig("ts_all_points_scatter_plot", bbox_inches='tight')
plt.clf()
# axes = plt.gca()
# # axes.set_xlim([0, 30000])
# axes.set_ylim([-0.01, 1])
# axes.set_ylabel("IOU scores")
# axes.set_xscale('log')
# axes.set_xlim([1, 3000000])
# axes.set_xlabel("Size of groundtruth segment in pixels")
# plt.legend()
# plt.title("IOU to size of groundtruth scatterplot - all data points")
# splot=sns.scatterplot(x="gt_size", y="overlap", hue="gt_class", data=ae_dive1_experiments, s=5, marker='.', linewidth=0, palette=sns.color_palette("muted", n_colors=len(set(ae_dive1_experiments["gt_class"]))))
# # splot.set(xscale="log")
# plt.savefig("ae_dive1_all_points_scatter_plot", bbox_inches='tight')
# plt.clf()
# axes = plt.gca()
# # axes.set_xlim([0, 30000])
# axes.set_xscale('log')
# axes.set_xlim([1, 3000000])
# axes.set_ylim([-0.01, 1])
# axes.set_ylabel("IOU scores")
# axes.set_xlabel("Size of groundtruth segment in pixels")
# plt.legend()
# plt.title("IOU to size of groundtruth scatterplot - all data points")
# splot=sns.scatterplot(x="gt_size", y="overlap", hue="gt_class", data=ae_dive2_experiments, s=5, marker='.', linewidth=0, palette=sns.color_palette("muted", n_colors=len(set(ae_dive2_experiments["gt_class"]))))
# # splot.set(xscale="log")
# plt.savefig("ae_dive2_all_points_scatter_plot", bbox_inches='tight')
# plt.clf()
# axes = plt.gca()
# axes.set_xscale('log')
# axes.set_xlim([1, 3000000])
# axes.set_ylim([-0.01, 1])
# axes.set_ylabel("IOU scores")
# axes.set_xlabel("Size of groundtruth segment in pixels")
# plt.legend()
# plt.title("IOU to size of groundtruth scatterplot - all data points")
# splot=sns.scatterplot(x="gt_size", y="overlap", hue="gt_class", data=ae_dive3_experiments, s=5, marker='.', linewidth=0, palette=sns.color_palette("muted", n_colors=len(set(ae_dive3_experiments["gt_class"]))))
# # splot.set(xscale="log")
# plt.savefig("ae_dive3_all_points_scatter_plot", bbox_inches='tight')
# plt.clf()
for r in set(ts_experiments["rescaled"]):
rescaled_set = ts_experiments[ts_experiments['rescaled']==r]
axes = plt.gca()
axes.set_xscale('log')
axes.set_xlim([1, 30000])
axes.set_ylim([-0.01, 1])
axes.set_ylabel("IOU scores")
axes.set_xlabel("Size of groundtruth segment in pixels")
plt.legend()
plt.title("IOU to size of groundtruth scatterplot - all rescaling")
splot=sns.scatterplot(x="gt_size", y="overlap", hue="gt_class", data=rescaled_set, s=5, marker='.', linewidth=0, palette=sns.color_palette("muted", n_colors=len(set(rescaled_set["gt_class"]))))
# splot.set(xscale="log")
plt.savefig("ts_rescaled_scatter_plot_"+str(r), bbox_inches='tight')
plt.clf()
# for r in set(ae_dive1_experiments["rescaled"]):
# rescaled_set = ae_dive1_experiments[ae_dive1_experiments['rescaled']==r]
# axes = plt.gca()
# axes.set_xscale('log')
# axes.set_xlim([1, 30000])
# axes.set_ylim([-0.01, 1])
# axes.set_ylabel("IOU scores")
# axes.set_xlabel("Size of groundtruth segment in pixels")
# plt.legend()
# plt.title("IOU to size of groundtruth scatterplot - all rescaling")
# splot=sns.scatterplot(x="size", y="overlap", hue="class", data=rescaled_set, s=5, marker='.', linewidth=0, palette=sns.color_palette("muted", n_colors=len(set(rescaled_set["class"]))))
# # splot.set(xscale="log")
# plt.savefig("ae_dive1_rescaled_scatter_plot_"+str(r), bbox_inches='tight')
# plt.clf()
# for r in set(ae_dive2_experiments["rescaled"]):
# rescaled_set = ae_dive2_experiments[ae_dive2_experiments['rescaled']==r]
# axes = plt.gca()
# axes.set_xscale('log')
# axes.set_xlim([1, 30000])
# axes.set_ylim([-0.01, 1])
# axes.set_ylabel("IOU scores")
# axes.set_xlabel("Size of groundtruth segment in pixels")
# plt.legend()
# plt.title("IOU to size of groundtruth scatterplot - all rescaling")
# splot=sns.scatterplot(x="size", y="overlap", hue="class", data=rescaled_set, s=5, marker='.', linewidth=0, palette=sns.color_palette("muted", n_colors=len(set(rescaled_set["class"]))))
# # splot.set(xscale="log")
# plt.savefig("ae_dive2_rescaled_scatter_plot_"+str(r), bbox_inches='tight')
# plt.clf()
# for r in set(ae_dive3_experiments["rescaled"]):
# rescaled_set = ae_dive3_experiments[ae_dive3_experiments['rescaled']==r]
# axes = plt.gca()
# axes.set_xscale('log')
# axes.set_xlim([1, 30000])
# axes.set_ylim([-0.01, 1])
# axes.set_ylabel("IOU scores")
# axes.set_xlabel("Size of groundtruth segment in pixels")
# plt.legend()
# plt.title("IOU to size of groundtruth scatterplot - all rescaling")
# splot=sns.scatterplot(x="size", y="overlap", hue="class", data=rescaled_set, s=5, marker='.', linewidth=0, palette=sns.color_palette("muted", n_colors=len(set(rescaled_set["class"]))))
# # splot.set(xscale="log")
# plt.savefig("ae_dive3_rescaled_scatter_plot_"+str(r), bbox_inches='tight')
# plt.clf()
for c in set(ts_experiments["gt_class"]):
class_set = ts_experiments[ts_experiments['gt_class']==c]
axes = plt.gca()
axes.set_xscale('log')
axes.set_xlim([1, 30000])
axes.set_ylim([-0.01, 1])
axes.set_ylabel("IOU scores")
axes.set_xlabel("Size of groundtruth segment in pixels")
plt.legend()
plt.title("IOU to size of groundtruth scatterplot - all rescaling")
splot=sns.scatterplot(x="gt_size", y="overlap", hue="rescaled", data=class_set, s=5, marker='.', linewidth=0, palette=sns.color_palette("muted", n_colors=len(set(class_set["rescaled"]))))
# splot.set(xscale="log")
plt.savefig("ts_class_scatter_plot_"+str(c).split('.')[0], bbox_inches='tight')
plt.clf()
# for c in set(ae_dive1_experiments["class"]):
# class_set = ae_dive1_experiments[ae_dive1_experiments['class']==c]
# axes = plt.gca()
# axes.set_xscale('log')
# axes.set_xlim([1, 3000000])
# axes.set_ylim([-0.01, 1])
# axes.set_ylabel("IOU scores")
# axes.set_xlabel("Size of groundtruth segment in pixels")
# plt.legend()
# plt.title("IOU to size of groundtruth scatterplot - all rescaling")
# splot=sns.scatterplot(x="size", y="overlap", hue="rescaled", data=class_set, s=5, marker='.', linewidth=0, palette=sns.color_palette("muted", n_colors=len(set(class_set["rescaled"]))))
# # splot.set(xscale="log")
# plt.savefig("ae_dive1_class_scatter_plot_"+str(c).split('.')[0], bbox_inches='tight')
# plt.clf()
# for c in set(ae_dive2_experiments["class"]):
# class_set = ae_dive2_experiments[ae_dive2_experiments['class']==c]
# axes = plt.gca()
# axes.set_xscale('log')
# axes.set_xlim([1, 3000000])
# axes.set_ylim([-0.01, 1])
# axes.set_ylabel("IOU scores")
# axes.set_xlabel("Size of groundtruth segment in pixels")
# plt.legend()
# plt.title("IOU to size of groundtruth scatterplot - all rescaling")
# splot=sns.scatterplot(x="size", y="overlap", hue="rescaled", data=class_set, s=5, marker='.', linewidth=0, palette=sns.color_palette("muted", n_colors=len(set(class_set["rescaled"]))))
# # splot.set(xscale="log")
# plt.savefig("ae_dive2_class_scatter_plot_"+str(c).split('.')[0], bbox_inches='tight')
# plt.clf()
# # axes.set_xlim([0, 30000])
# for c in set(ae_dive3_experiments["class"]):
# class_set = ae_dive3_experiments[ae_dive3_experiments['class']==c]
# axes = plt.gca()
# axes.set_xscale('log')
# axes.set_xlim([1, 3000000])
# axes.set_ylim([-0.01, 1])
# axes.set_ylabel("IOU scores")
# axes.set_xlabel("Size of groundtruth segment in pixels")
# plt.legend()
# plt.title("IOU to size of groundtruth scatterplot - all rescaling")
# splot=sns.scatterplot(x="size", y="overlap", hue="rescaled", data=class_set, s=5, marker='.', linewidth=0, palette=sns.color_palette("muted", n_colors=len(set(class_set["rescaled"]))))
# # splot.set(xscale="log")
# plt.savefig("ae_dive3_class_scatter_plot_"+str(c).split('.')[0], bbox_inches='tight')
# plt.clf()
def plot_colour_correction_stackedarea(experiments, name):
plt.clf()
axes = plt.gca()
axes.set_ylim([0, 4.6])
fig, ax = plt.subplots()
new_experiments = []
print(experiments.keys())
for index, experiment in experiments.iterrows():
print(experiment)
for value in [
"val_mrcnn_class_loss",
"val_mrcnn_bbox_loss",
"val_mrcnn_mask_loss",
"mrcnn_bbox_loss",
"val_rpn_bbox_loss",
"mrcnn_mask_loss",
"rpn_class_loss",
"rpn_bbox_loss",
"val_loss",
"mrcnn_class_loss",
"val_rpn_class_loss",
"loss",
"overlaps"
]:
experiment[value] = stringlist_to_list(experiment[value])
new_experiments.append(experiment)
experiments_dataframe = pd.DataFrame(new_experiments)
print(type(experiments_dataframe["val_mrcnn_class_loss"]))
experiments_dataframe = group_experiments_by_epoch(experiments_dataframe, "rescaled")
print(type(experiments_dataframe["val_mrcnn_class_loss"]))
sns.set(rc={"lines.linewidth": 0.3})
plt.clf()
axes = plt.gca()
axes.set_ylim([0, 4.6])
fig, ax = plt.subplots()
for i, experiment in experiments_dataframe.iterrows():
y = [
experiment["rpn_bbox_loss"],
experiment["rpn_class_loss"],
experiment["mrcnn_bbox_loss"],
experiment["mrcnn_mask_loss"],
experiment["mrcnn_class_loss"],
]
labels = [
"rpn_bbox_loss",
"rpn_class_loss",
"mrcnn_bbox_loss",
"mrcnn_mask_loss",
"mrcnn_class_loss",
]
x = range(1, 101)
axes = plt.gca()
axes.set_ylim([0, 4.6])
axes.set_ylabel("Training Loss")
axes.set_xlabel("Training Epoch")
print(x)
print(y)
plt.stackplot(x, y, labels=labels)
plt.legend()
# plt.title(experiment["rescaled"])
print(name)
plt.savefig(str(name) + str(experiment["rescaled"]) + "_stacked_loss", bbox_inches='tight')
plt.clf()
axes = plt.gca()
axes.set_ylim([0, 4.6])
y = [
experiment["val_rpn_bbox_loss"],
experiment["val_rpn_class_loss"],
experiment["val_mrcnn_bbox_loss"],
experiment["val_mrcnn_mask_loss"],
experiment["val_mrcnn_class_loss"],
]
labels = [
"val_rpn_bbox_loss",
"val_rpn_class_loss",
"val_mrcnn_bbox_loss",
"val_mrcnn_mask_loss",
"val_mrcnn_class_loss",
]
plt.stackplot(x, y, labels=labels)
axes.set_ylabel("Validation Loss")
axes.set_xlabel("Training Epoch")
plt.legend()
# plt.title(experiment["rescaled"])
plt.savefig(str(name) + str(experiment["rescaled"]) + "_stacked_val_loss", bbox_inches='tight')
plt.clf()
def compile_dataframes():
all_filenames=os.listdir("./maskandclassloss")
#combine all files in the list
combined_csv = pd.concat([ | pd.read_csv("./maskandclassloss/"+f) | pandas.read_csv |
#!/usr/bin/env python3
"""
LINCS REST API client
New (2019) iLINCS:
http://www.ilincs.org/ilincs/APIinfo
http://www.ilincs.org/ilincs/APIdocumentation
(http://lincsportal.ccs.miami.edu/dcic/api/ DEPRECATED?)
"""
###
import sys,os,re,json,logging
import urllib,urllib.parse
import pandas as pd
#
from ..util import rest
#
API_HOST="www.ilincs.org"
API_BASE_PATH="/api"
BASE_URL='https://'+API_HOST+API_BASE_PATH
#
#############################################################################
def GetGene(ids, base_url=BASE_URL, fout=None):
tags=None; df=pd.DataFrame();
for id_this in ids:
url = base_url+'/GeneInfos/'+id_this
rval = rest.Utils.GetURL(url, parse_json=True)
logging.debug(json.dumps(rval, indent=2))
if not tags:
tags = [tag for tag in rval.keys() if type(rval[tag]) not in (list, dict)]
gene = rval
df = pd.concat([df, pd.DataFrame({tags[j]:[gene[tags[j]]] for j in range(len(tags))})])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"IDs: {len(ids)}")
return df
#############################################################################
def GetDataset(ids, base_url=BASE_URL, fout=None):
tags=None; df=pd.DataFrame();
for id_this in ids:
url = base_url+'/PublicDatasets/'+id_this
rval = rest.Utils.GetURL(url, parse_json=True)
logging.debug(json.dumps(rval, indent=2))
if not tags:
tags = [tag for tag in rval.keys() if type(rval[tag]) not in (list, dict)]
dset = rval
df = pd.concat([df, pd.DataFrame({tags[j]:[dset[tags[j]]] for j in range(len(tags))})])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"IDs: {len(ids)}")
return df
#############################################################################
def GetCompound(ids, base_url=BASE_URL, fout=None):
tags=None; df=pd.DataFrame();
for id_this in ids:
url = base_url+'/Compounds/%s'%id_this
rval = rest.Utils.GetURL(url, parse_json=True)
logging.debug(json.dumps(rval, indent=2))
if not tags:
tags = [tag for tag in rval.keys() if type(rval[tag]) not in (list, dict)]
cpd = rval
df = pd.concat([df, pd.DataFrame({tags[j]:[cpd[tags[j]]] for j in range(len(tags))})])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"IDs: {len(ids)}; n_cpd: {df.shape[0]}")
return df
#############################################################################
def ListCompounds(base_url=BASE_URL, fout=None):
n_out=0; tags=None; df=pd.DataFrame();
skip=0; nchunk=100;
while True:
filter_arg = """%7B"skip"%3A"""+str(skip)+"""%2C"limit"%3A"""+str(nchunk)+"""%7D"""
#url = f"{base_url}/Compounds?filter={urllib.parse.quote(filter_arg)}"
url = f"{base_url}/Compounds?filter={filter_arg}"
rval = rest.Utils.GetURL(url, parse_json=True)
if not rval: break
compounds = rval
for compound in compounds:
logging.debug(json.dumps(compound, indent=2))
if not tags:
tags = [tag for tag in compound.keys() if type(compound[tag]) not in (list, dict)]
df_this = pd.DataFrame({tags[j]:[compound[tags[j]]] for j in range(len(tags))})
if fout is None:
df = pd.concat([df, df_this])
else:
df_this.to_csv(fout, "\t", index=False, header=bool(n_out==0))
n_out += df_this.shape[0]
skip += nchunk
logging.info(f"n_out: {n_out}")
if fout is None: return df
#############################################################################
def SearchDataset(searchTerm, lincs, base_url=BASE_URL, fout=None):
tags=None; df=pd.DataFrame();
url = base_url+'/PublicDatasets/findTermMeta'
d = {'term':searchTerm}
if lincs: d['lincs'] = True
rval = rest.Utils.PostURL(url, data=d, parse_json=True)
logging.debug(json.dumps(rval, indent=2))
dsets = rval['data'] if 'data' in rval else []
for dset in dsets:
logging.debug(json.dumps(dset, indent=2))
if not tags:
tags = [tag for tag in dset.keys() if type(dset[tag]) not in (list, dict)]
df = pd.concat([df, pd.DataFrame({tags[j]:[dset[tags[j]]] for j in range(len(tags))})])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"Datasets: {df.shape[0]}")
return df
#############################################################################
def SearchSignature(ids, lincs, base_url=BASE_URL, fout=None):
#SignatureMeta?filter={"where":{"lincspertid":"LSM-2121"},"limit":10}
tags=None; df=pd.DataFrame();
for id_this in ids:
url = base_url+'/SignatureMeta?filter={"where":{"lincspertid":"'+id_this+'"}}'
rval = rest.Utils.GetURL(url, parse_json=True)
logging.debug(json.dumps(rval, indent=2))
sigs = rval
for sig in sigs:
logging.debug(json.dumps(sig, indent=2))
if not tags:
tags = [tag for tag in sig.keys() if type(sig[tag]) not in (list, dict)]
df = pd.concat([df, pd.DataFrame({tags[j]:[sig[tags[j]]] for j in range(len(tags))})])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"IDs: {len(ids)}; n_sig: {df.shape[0]}")
return df
#############################################################################
def GetSignature(ids, ngene, base_url=BASE_URL, fout=None):
tags=None; df= | pd.DataFrame() | pandas.DataFrame |
from typing import Any
import pandas as pd
from sklearn.model_selection import train_test_split
from error_consistency.consistency import (
ErrorConsistencyKFoldHoldout,
ErrorConsistencyKFoldInternal,
)
from error_consistency.testing.loading import CLASSIFIERS, DATA, OUTDIR
def test_classifiers_holdout(capsys: Any) -> None:
with capsys.disabled():
for dataset_name, (x, y) in DATA.items():
print(f"Preparing {dataset_name} data...")
x, x_test, y, y_test = train_test_split(x, y, test_size=0.2)
df_data = | pd.DataFrame() | pandas.DataFrame |
"""
GIS For Electrification (GISEle)
Developed by the Energy Department of Politecnico di Milano
Initialization Code
Code for importing input GIS files, perform the weighting strategy and creating
the initial Point geodataframe.
"""
import os
import math
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
from gisele.functions import s
import osmnx as ox
def roads_import(geo_df,crs):
'''
Download road layers from OpenStreetMaps, and simplify geometries to use
them for the grid routing algorithms
:param df:
:return:
'''
geo_df.to_crs(epsg=4326,inplace=True)
bounds = geo_df.geometry.total_bounds
print('Downloading roads, from OpenStreetMap..')
graph = ox.graph_from_bbox(bounds[1], bounds[3],
bounds[0], bounds[2], network_type='drive_service')
ox.save_graph_shapefile(graph,
filepath='Output/Datasets/Roads') # crs is 4326
#simplify geometry
roads = gpd.read_file('Output/Datasets/Roads/edges.shp')
roads_simple = roads.geometry.simplify(tolerance=0.0005)
roads_simple = roads_simple.to_crs(epsg=int(crs))
roads_simple.to_file('Output/Datasets/Roads/roads.shp')
def weighting(df, resolution, landcover_option):
"""
Assign weights to all points of a dataframe according to the terrain
characteristics and the distance to the nearest road.
:param df: From which part of GISEle the user is starting
:param resolution: resolution of the dataframe df
:return df_weighted: Point dataframe with weight attributes assigned
"""
df_weighted = df.dropna(subset=['Elevation'])
df_weighted.reset_index(drop=True)
df_weighted.Slope.fillna(value=0, inplace=True)
df_weighted.Land_cover.fillna(method='bfill', inplace=True)
df_weighted['Land_cover'] = df_weighted['Land_cover'].round(0)
df_weighted.Population.fillna(value=0, inplace=True)
df_weighted['Weight'] = 0
print('Weighting the Dataframe..')
os.chdir(r'general_input//')
landcover_csv = | pd.read_csv('Landcover.csv') | pandas.read_csv |
import streamlit as st
from PIL import Image
import cv2
import numpy as np
from matplotlib import pyplot as plt
from skimage import data
from skimage.color import rgb2gray
from skimage.feature import corner_harris, corner_subpix, corner_peaks, hessian_matrix_det
from skimage.filters import difference_of_gaussians
import pandas as pd
def main():
selected_box = st.sidebar.selectbox(
'Choose one of the following',
('Keypoints/Descriptors', 'Harris Detector', 'Hessian Detector', 'Difference of Gaussian', 'Scale-Invariant Descriptors')
)
if selected_box == 'Keypoints/Descriptors':
keypoints_descriptors()
if selected_box == 'Harris Detector':
harris_detector()
if selected_box == 'Hessian Detector':
Hessian_detector()
if selected_box == 'Difference of Gaussian':
DoG()
if selected_box == 'Scale-Invariant Descriptors':
Scale_Invar()
def welcome():
st.title('Feature Detection using Streamlit')
st.subheader('A simple app that shows different image processing algorithms. You can choose the options'
+ ' from the left. I have implemented only a few to show how it works on Streamlit. ' +
'You are free to add stuff to this app.')
st.image('Library.jpg',use_column_width=True)
def load_image(filename):
image = cv2.imread(filename)
return image
def keypoints_descriptors():
st.header('Keypoints and Descriptors')
st.latex(r'''
\text{\underline{Keypoints} are specific locations of interest in an image:}
''')
st.latex(r'''
\text{eyes, mouth, nose, mountains, buildings, or corners.}
''')
st.latex(r'''
\text{A \underline{keypoint descriptor} or \underline{patch} is a vector that describes the appearance of the area surrounding a keypoint.}
''')
st.image('pandafeatures.png', use_column_width=True,clamp = True)
## gives more information about the local region surrounding the keypoint
##example image here
st.latex(r'''
\text{Keypoints and keypoint descriptors are useful for object detection, or facial recognition.}
''')
st.latex(r'''
\text{As well as for image stitching, 3D reconstruction, or camera pose estimation.}
''')
## things that require matching between various images to reach an end goal. panorama
st.latex(r'''
\text{...}
''')
st.header('Method: Detection, Description and Matching')
st.latex(r'''
\text{1.) Find keypoints.}
''')
st.latex(r'''
\text{2.) Take patches surrounding the keypoints. i.e. keypoint descriptors}
''')
st.latex(r'''
\text{3.) Match patches between images.}
''')
st.latex(r'''
\text{...}
''')
st.subheader('Basic Intuition for Keypoint Selection')
st.latex(r'''
\color{green}\text{Good}\color{black}\text{ keypoints should be unique and allow for easy recognition and matching across various images.}
''')
st.latex(r'''
\color{red}\text{Bad}\color{black}\text{ keypoints are things such as flat regions, or regions with little deviation across x and y.}
''')
st.image('houseexample.png', use_column_width=True,clamp = True)
st.latex(r'''
\text{...}
''')
st.subheader('Additional Desireable Properties')
st.latex(r'''
\text{We need a certain quantity of patches, to successfully match between images.}
''')
st.latex(r'''
\text{Invariant to translation, rotation, and scale.}
''')
st.latex(r'''
\text{Resistant to affine transformations.}
''')
st.latex(r'''
\text{Resistant to lighting, color, or noise variations.}
''')
st.latex(r'''
\text{...}
''')
st.subheader('Now we will see some various detectors...')
def harris_detector():
st.header("Harris Detector")
st.latex(r'''
\text{The basic idea behind the harris detector is that}
''')
st.image('harris_example.png',use_column_width=True)
st.latex(r'''
\color{red}\text{a flat region:} \color{black}\text{ no change in all directions.}
''')
st.latex(r'''
\color{red}\text{ an edge:}\color{black}\text{ no change along the edge direction.}
''')
st.latex(r'''
\color{green}\text{ a corner:}\color{black}\text{ significant changes in all directions.}
''')
st.latex(r'''...''')
st.latex(r'''
E(u,v) = \sum_{x,y}\overbrace{w(x,y)}^{\text{window function}}\, [\, \underbrace{I(x+u,y+v)}_{\text{shifted intensity}}
- \underbrace{I(x,y)}_{\text{intensity}}\, ]^2
''')
st.latex(r'''...''')
st.latex(r'''
\text{ If we look at the second term,}
''')
st.latex(r'''
\text{for flat regions,}\, [I(x+u,y+v) -I(x,y)]^2 \approx 0
''')
st.latex(r'''
\text{ and for distinct regions,}\, [I(x+u,y+v) -I(x,y)]^2 \approx large
''')
st.latex(r'''
\text{For corner detection we wish to } \color{red}\text{maximize}\,\color{black} E(u,v)
''')
st.latex(r'''\downarrow''')
st.latex(r'''math''')
st.latex(r'''\downarrow''')
st.latex(r'''
E(u,v) \approx \begin{bmatrix}
u & v\\
\end{bmatrix}
M
\begin{bmatrix}
u\\
v
\end{bmatrix}
''')
st.latex(r'''
M= \sum_{x,y}w(x,y)
\begin{bmatrix}
I_x I_x & I_x I_y\\
I_y I_x & I_y I_y
\end{bmatrix}
''')
st.latex(r'''
\text{Where } Ix \text{ and } Iy \text{ are image derivatives in x and y directions.}
''')
st.latex(r'''
\text{These can be found using the sobel kernel.}
''')
st.latex(r'''
G_x=
\begin{bmatrix}
-1 & 0 & 1\\
-2 & 0 & 2\\
-1 & 0 & 1
\end{bmatrix},\quad
\,\,\,G_y=
\begin{bmatrix}
1 & 2 & 1\\
0 & 0 & 0\\
-1 & -2 & -1
\end{bmatrix}
''')
st.latex(r'''...''')
st.latex(r'''
\text{A scoring function R is created, which determines if a corner is captured in a window}
''')
st.latex(r'''
R = det\,M-k(\,\,Tr[M]\,\,)^2
''')
st.latex(r''' \quad det\,M = \lambda_1 \lambda_2 \quad \textrm{\&} \quad Tr[M] = \lambda_1 + \lambda_2
''')
st.latex(r'''...''')
st.latex(r'''
\text{Thresholding to R:}''')
st.image('eigenvalues.png', use_column_width=True,clamp = True)
st.latex(r'''
\text{R}\approx \text{small} \implies \color{red}\text{flat region}
''')
st.latex(r'''
\text{R}< 0 \implies \color{red}\text{edge}
''')
st.latex(r'''
\text{R}\approx{large}\implies \color{green}\text{corner}
''')
filename = st.selectbox(
'Which image do you want to process?',
('UCSB_Henley_gate.jpg', 'Building.jpeg', 'checkerboard.png','Library.jpg'))
# sliders ------------------------------------------------------------------------------------------
thresh = st.slider('Change Threshold', min_value=0.0000, max_value=.5000,step=0.0001, format='%f')
block_size = st.slider('Change Block Size', min_value=2, max_value=10)
aperture_size = st.slider('Change Aperture', min_value=1, max_value=31,step=2)
k = st.slider('Harris Detector Free Variable', min_value=0.0000, max_value=.1000,step=0.0001,value=0.04, format='%f')
iteration_count = st.slider('Change Dilation', min_value=1, max_value=100, value=2)
# harris detector processing ------------------------------------------------------------------------
img = cv2.imread(filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, block_size, aperture_size, k)
# dilation of the points
dst = cv2.dilate(dst, None, iterations=iteration_count)
# Thresholding
img[dst > thresh * dst.max()] = [0,0,255]
st.image(img, use_column_width=True,channels="BGR")
def Hessian_detector():
#<NAME>
st.header("Feature Detection with Hessian Detector")
st.subheader("How it works:")
st.write("1. The Hessian of the image corresponds to the curvature of the image based on its pixel values.")
st.latex(r'''
H(I) = \begin{bmatrix}
I_{xx} & I_{xy} \\
I_{xy} & I_{yy}
\end{bmatrix}
''')
st.write("2. When we perform the eigenvalue decomposition of H(I)(x,y), the eigenvectors correspond to the direction of greatest and lowest curvature and their respective eigenvalues correspond to the magnitude of curvature")
st.latex(r'''
eig(H(I)) = \left\{
\begin{array}{ll}
\underline{e_1} , \lambda_1 \text{=> Greatest curvature}\\
\underline{e_2} , \lambda_2 \text{=>Lowest curvature}
\end{array}
\right.
''')
st.write("3. Since we are only interested in the strength of curvature we can simply take the determinant of H to yield the overall curvature strength for all x,y coordinates")
st.latex(r'''
det(H) => \lambda_1 * \lambda_2
''')
st.write("4. Threshold the determinant \"image\" to yield our coordinate features!")
st.subheader("Hessian Detector Demo")
image_file = st.file_uploader("Upload Image", type=["png","jpg","jpeg"])
if image_file is not None:
image = Image.open(image_file)
img = np.array(image)
img_rgb = img
else:
img = load_image('Banff.jpg')
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
x,y = img_gray.shape
rad = int(0.0065 * x)
max_dis= 10*int( 0.004 *x)
thres = st.slider('Change Threshold value',min_value = 0.01,max_value = 0.5, value=0.05)
min_dis = st.slider('Change Minimum Distance',min_value = 1,max_value = max_dis)
#st.image(img_gray, use_column_width=True)
dets = hessian_matrix_det(img_gray)
#st.image(-dets, clamp = True, channels = 'gray')
coords_hessian = corner_peaks(hessian_matrix_det(img_gray), min_distance=min_dis, threshold_rel=thres)
st.text("Hessian Features Detected")
HesImg = img_rgb
for (y,x) in coords_hessian:
HesImg = cv2.circle(HesImg, (x,y), radius=rad, color=(255,0,0), thickness=-1)
st.image(HesImg, use_column_width=True,clamp = True)
def sigmoid(x,s):
#<NAME>
if (s == 0):
l = len(x)
s = np.zeros(l)
hf= l//2
s[hf:l] = 1
sig = s
else:
z = np.exp(-x/s)
sig = 1 / (1 + z)
return sig
def DoG():
## <NAME>
st.header("Difference of Gaussian Detector")
st.subheader("How it works:")
st.write("1. We take two blurred versions of the image w.r.t two sigmas")
sig0 = st.slider('Select a sigmas', 0.0, 10.0, (0.0, 0.0))
st.write("2. We subtract the two blurred images and yield a bandpass filterd image")
x = np.arange(-5,5,0.01, dtype = float)
s0 = sigmoid(x,0)
s1 = sigmoid(x,sig0[0])
s2 = sigmoid(x,sig0[1])
s3 = s2-s1
s = np.stack((s0,s1,s2,s3),axis=1)
df = | pd.DataFrame(s, columns=['Edge','s1','s2',"s2-s1"]) | pandas.DataFrame |
import unittest
import pandas as pd
import pytest
import riptable as rt
# N.B. TL;DR We have to import the actual implementation module to override the module global
# variable "tm.N" and "tm.K".
# In pandas 1.0 they move the code from pandas/util/testing.py to pandas/_testing.py.
# The "import pandas.util.testing" still works but because it doesn't contain the actual code
# our attempt to override the "tm.N" and "tm.K" will not change the actual value for
# makeTimeDataFrame, which will produce data with different shape and make the test
# "test_accum_table" fail. Maybe we want to reconsider using the pandas internal testing utils.
try:
import pandas._testing as tm
except ImportError:
import pandas.util.testing as tm
from riptable import *
from numpy.testing import (
assert_array_equal,
assert_almost_equal,
assert_array_almost_equal,
)
from riptable.rt_numpy import arange
# To create AccumTable test data
from riptable.Utils.pandas_utils import dataset_from_pandas_df
from riptable.rt_datetime import DateTimeNano
tm.N = 3
tm.K = 5
class Accum2_Test(unittest.TestCase):
'''
TODO: add more tests for different types
'''
def test_accum2(self):
c = cut(arange(10), 3)
self.assertTrue(sum(c._np - FA([1, 1, 1, 1, 2, 2, 2, 3, 3, 3])) == 0)
c = cut(arange(10.0), 3)
self.assertTrue(sum(c._np - FA([1, 1, 1, 1, 2, 2, 2, 3, 3, 3])) == 0)
c = cut(arange(11), 3)
self.assertTrue(sum(c._np - FA([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3])) == 0)
c = cut(FA([2, 4, 6, 8, 10]), FA([0, 2, 4, 6, 8, 10]))
self.assertTrue(sum(c._np - FA([1, 2, 3, 4, 5])) == 0)
c = cut(
FA([2, 4, 6, 8, 10]),
FA([0, 2, 4, 6, 8, 10]),
labels=['a', 'b', 'c', 'd', 'e'],
)
self.assertTrue(sum(c._np - FA([1, 2, 3, 4, 5])) == 0)
def test_qcut(self):
c = qcut(arange(10), 3)
self.assertTrue(sum(c._np - FA([2, 2, 2, 2, 3, 3, 3, 4, 4, 4])) == 0)
c = qcut(arange(11), 3)
self.assertTrue(sum(c._np - FA([2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4])) == 0)
c = qcut(range(5), 3, labels=["good", "medium", "bad"])
self.assertTrue(sum(c._np - FA([2, 2, 3, 4, 4])) == 0)
c = cut(
FA([2, 4, 6, 8, 10]),
FA([0, 2, 4, 6, 8, 10]),
labels=['a', 'b', 'c', 'd', 'e'],
)
def test_cut_errors(self):
with self.assertRaises(ValueError):
c = cut(
FA([2, 4, 6, 8, 10]),
FA([0, 2, 4, 6, 8, 10]),
labels=['a', 'b', 'c', 'd', 'e', 'f'],
)
def test_simple_cats(self):
data = arange(1, 6) * 10
colnames = FastArray(['a', 'b', 'c', 'd', 'e'])
c1 = Categorical(colnames)
c2 = Categorical(arange(5))
# no filter
ac = Accum2(c2, c1)
result = ac.sum(data)
self.assertEqual(result._ncols, 7)
for i, colname in enumerate(colnames):
arr = result[colname]
self.assertEqual(arr[i], data[i])
def test_simple_cats_filter_accum(self):
data = arange(1, 6) * 10
colnames = FastArray(['a', 'b', 'c', 'd', 'e'])
c1 = Categorical(colnames)
c2 = Categorical(arange(5))
# filtered accum object
ac = Accum2(c2, c1, showfilter=True)
result = ac.sum(data)
self.assertEqual(result._ncols, 8)
for i, colname in enumerate(colnames):
arr = result[colname]
self.assertEqual(arr[i + 1], data[i])
def test_simple_cats_filter_operation(self):
data = arange(1, 6) * 10
colnames = FastArray(['a', 'b', 'c', 'd', 'e'])
c1 = Categorical(colnames)
c2 = Categorical(arange(5))
# filtered operation
ac = Accum2(c2, c1)
result = ac.sum(data, showfilter=True)
self.assertEqual(result._ncols, 8)
for i, colname in enumerate(colnames):
arr = result[colname]
self.assertEqual(arr[i + 1], data[i])
def test_multikey_cats(self):
unsorted_str = FastArray(['c', 'e', 'b', 'd', 'a'])
ints = arange(1, 6) * 10
data = np.random.rand(5) * 10
# unsorted no filter
c1 = Categorical([unsorted_str, ints])
c2 = Categorical([unsorted_str, ints])
ac = Accum2(c2, c1)
result = ac.sum(data)
self.assertEqual(result._ncols, 8)
for i, key1 in enumerate(unsorted_str):
k1 = bytes.decode(key1)
k2 = ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i], data[i])
# sorted no filter
sortidx = np.argsort(unsorted_str)
sorted_str = unsorted_str[sortidx]
sorted_ints = ints[sortidx]
sorted_data = data[sortidx]
c1 = Categorical([unsorted_str, ints], ordered=True)
c2 = Categorical([unsorted_str, ints], ordered=True)
ac = Accum2(c2, c1)
result = ac.sum(data)
self.assertEqual(result._ncols, 8)
for i, key1 in enumerate(sorted_str):
k1 = bytes.decode(key1)
k2 = sorted_ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i], sorted_data[i])
@pytest.mark.xfail(reason='20200416 This test was previously overridden by a later test in the file with the same name. Need to revisit and get back in a working state.')
def test_multikey_cats_filter_accum_sorted(self):
unsorted_str = FastArray(['c', 'e', 'b', 'd', 'a'])
ints = arange(1, 6) * 10
data = np.random.rand(5) * 10
# unsorted filter accum object
c1 = Categorical([unsorted_str, ints])
c2 = Categorical([unsorted_str, ints])
ac = Accum2(c2, c1, showfilter=True)
result = ac.sum(data)
self.assertEqual(result._ncols, 9)
for i, key1 in enumerate(unsorted_str):
k1 = bytes.decode(key1)
k2 = ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i + 1], data[i])
# sorted filter accum object
sortidx = np.argsort(unsorted_str)
sorted_str = unsorted_str[sortidx]
sorted_ints = ints[sortidx]
sorted_data = data[sortidx]
c1 = Categorical([unsorted_str, ints], sort_gb=True)
c2 = Categorical([unsorted_str, ints], sort_gb=True)
ac = Accum2(c2, c1, showfilter=True)
result = ac.sum(data)
self.assertEqual(result._ncols, 9)
for i, key1 in enumerate(sorted_str):
k1 = bytes.decode(key1)
k2 = sorted_ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
# TODO fix this regression that was masked due to duplicate test names
# self.assertAlmostEqual(arr[i + 1], sorted_data[i])
def test_multikey_cats_filter_accum_ordered(self):
unsorted_str = FastArray(['c', 'e', 'b', 'd', 'a'])
ints = arange(1, 6) * 10
data = np.random.rand(5) * 10
# unsorted filter accum object
c1 = Categorical([unsorted_str, ints])
c2 = Categorical([unsorted_str, ints])
ac = Accum2(c2, c1)
result = ac.sum(data, showfilter=True)
self.assertEqual(result._ncols, 9)
for i, key1 in enumerate(unsorted_str):
k1 = bytes.decode(key1)
k2 = ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i + 1], data[i])
# sorted filter accum object
sortidx = np.argsort(unsorted_str)
sorted_str = unsorted_str[sortidx]
sorted_ints = ints[sortidx]
sorted_data = data[sortidx]
c1 = Categorical([unsorted_str, ints], ordered=True)
c2 = Categorical([unsorted_str, ints], ordered=True)
ac = Accum2(c2, c1)
result = ac.sum(data, showfilter=True)
self.assertEqual(result._ncols, 9)
for i, key1 in enumerate(sorted_str):
k1 = bytes.decode(key1)
k2 = sorted_ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i + 1], sorted_data[i])
def test_dataset_accum2(self):
# test from accum2 off dataset and with a filter
ds = Dataset({'test': arange(10), 'data': arange(10) // 2})
x = ds.accum2('data', 'test').sum(ds.test, filter=ds.data == 3)
totalcol = x.summary_get_names()[0]
self.assertEqual(x[totalcol][3], 13)
def test_accum2_mean(self):
ds = Dataset({'time': arange(200.0)})
ds.data = np.random.randint(7, size=200)
ds.data2 = np.random.randint(7, size=200)
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']
ds.symbol = Cat(1 + arange(200) % 5, symbols)
ac = Accum2(ds.data, ds.symbol).mean(ds.time)
totalcol = ac[ac.summary_get_names()[0]]
footer = ac.footer_get_values()['Mean']
for i in range(len(symbols)):
s_mean = ds[ds.symbol == symbols[i], :].time.mean()
self.assertEqual(footer[i + 1], s_mean)
for i in range(7):
s_mean = ds[ds.data == i, :].time.mean()
self.assertEqual(totalcol[i], s_mean)
def test_accum2_median(self):
ds = Dataset({'time': arange(200.0)})
ds.data = np.random.randint(7, size=200)
ds.data2 = np.random.randint(7, size=200)
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']
ds.symbol = Cat(1 + arange(200) % 5, symbols)
ac = Accum2(ds.data, ds.symbol).median(ds.time)
totalcol = ac[ac.summary_get_names()[0]]
footer = ac.footer_get_values()['Median']
for i in range(len(symbols)):
s_median = ds[ds.symbol == symbols[i], :].time.median()
self.assertEqual(footer[i + 1], s_median)
for i in range(7):
s_median = ds[ds.data == i, :].time.median()
self.assertEqual(totalcol[i], s_median)
def test_accum2_nanmedian_with_filter(self):
ds = Dataset({'time': arange(200.0)})
ds.data = np.random.randint(7, size=200)
ds.data2 = np.random.randint(7, size=200)
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']
# N.B. make a copy here for testing
symbol_categorical = Cat(1 + arange(200) % 5, symbols)
# N.B. Categorical.copy and Categorical constructor doesn't do deep copy?!
ds.symbol = Cat(1 + arange(200) % 5, symbols)
chosen_symbols = ['AMZN', 'AAPL']
filt = symbol_categorical.isin(chosen_symbols)
ac = Accum2(ds.data, ds.symbol)
stat1 = ac.nanmedian(ds.time, filter=filt)
totalcol = stat1[stat1.summary_get_names()[0]]
footer = stat1.footer_get_values()['Median']
# Make sure we don't change the input data
self.assertTrue(not rt.any(ds.symbol._fa == 0))
for sym in chosen_symbols:
s_median = rt.nanmedian(ds[symbol_categorical == sym, :].time)
i = rt.where(symbol_categorical.category_array == sym)[0].item()
self.assertEqual(footer[i + 1], s_median)
for i in range(7):
s_median = rt.nanmedian(ds[(ds.data == i) & filt, :].time)
self.assertEqual(totalcol[i], s_median)
chosen_symbols = ['IBM', 'FB']
filt = symbol_categorical.isin(chosen_symbols)
stat2 = ac.nanmedian(ds.time, filter=filt)
totalcol = stat2[stat2.summary_get_names()[0]]
footer = stat2.footer_get_values()['Median']
# Make sure we don't change the input data
self.assertTrue(not rt.any(ds.symbol._fa == 0))
for sym in chosen_symbols:
s_median = rt.nanmedian(ds[symbol_categorical == sym, :].time)
i = rt.where(symbol_categorical.category_array == sym)[0].item()
self.assertEqual(footer[i + 1], s_median)
for i in range(7):
s_median = rt.nanmedian(ds[(ds.data == i) & filt, :].time)
self.assertEqual(totalcol[i], s_median)
def test_showfilter_label_subclass(self):
d = Date.range('20190201', '20190210')
c = Categorical(d)
c2 = Categorical(arange(10))
ac = Accum2(c, c2)
result = ac.count(showfilter=True)
self.assertTrue(isinstance(result.YLabel, Date))
self.assertTrue(result.YLabel.isnan()[0])
d = DateTimeNano.random(10)
c = Categorical(d)
c2 = Categorical(arange(10))
ac = Accum2(c, c2)
result = ac.count(showfilter=True)
self.assertTrue(isinstance(result.YLabel, DateTimeNano))
self.assertTrue(result.YLabel.isnan()[0])
d = DateSpan(arange(10, 20))
c = Categorical(d)
c2 = Categorical(arange(10))
ac = Accum2(c, c2)
result = ac.count(showfilter=True)
self.assertTrue(isinstance(result.YLabel, DateSpan))
self.assertTrue(result.YLabel.isnan()[0])
d = TimeSpan(np.random.rand(10) * 10_000_000_000)
c = Categorical(d)
c2 = Categorical(arange(10))
ac = Accum2(c, c2)
result = ac.count(showfilter=True)
self.assertTrue(isinstance(result.YLabel, TimeSpan))
self.assertTrue(result.YLabel.isnan()[0])
def test_apply(self):
arrsize = 200
numrows = 7
ds = Dataset({'time': arange(arrsize * 1.0)})
ds.data = np.random.randint(numrows, size=arrsize)
ds.data2 = np.random.randint(numrows, size=arrsize)
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']
ds.symbol = Cat(1 + arange(arrsize) % len(symbols), symbols)
ds.accum2('symbol', 'data').sum(ds.data2)
ds.accum2('symbol', 'data').sum(ds.data2, showfilter=True)
ds.accum2('symbol', 'data').median(ds.data2, showfilter=True)
ds.accum2('symbol', 'data').median(ds.data2, showfilter=False)
ds.accum2('symbol', 'data').apply_reduce(np.median, ds.data2, showfilter=True)
ds.accum2('symbol', 'data').apply_reduce(np.median, ds.data2, showfilter=False)
f = logical(arange(200) % 2)
ds.accum2('symbol', 'data').apply_reduce(np.median, ds.data2, filter=f)
ds.accum2('symbol', 'data').apply_reduce(
np.median, ds.data2, filter=f, showfilter=True
)
ds.accum2('symbol', 'data').median(ds.data2, filter=f, showfilter=True)
def test_apply_nonreduce(self):
arrsize = 200
numrows = 7
ds = rt.Dataset({'time': rt.arange(arrsize * 1.0)})
ds.data = arange(arrsize) % numrows
ds.data2 = (arange(arrsize) + 3) % numrows
symbols = [
'AAPL',
'AMZN',
'FB',
'GOOG',
'IBM',
'6',
'7',
'8',
'9',
'10',
'11',
'12',
'13',
'14',
'15',
'16',
'17',
'18',
]
ds.symbol = rt.Cat(1 + rt.arange(arrsize) % len(symbols), symbols)
result = ds.symbol.apply_reduce(
lambda x, y: np.sum(np.minimum(x, y)), (ds.data, ds.data)
)
ac = ds.accum2('symbol', 'data')
newds = ac.apply_nonreduce(np.cumsum)
ds2 = ac.apply_reduce(
lambda x, y: np.sum(np.maximum(x, y)), (newds.data, newds.data2)
)
x = np.maximum(newds.data, newds.data2)
y = ac.apply_nonreduce(
lambda x, y: np.maximum(x, y), (newds.data, newds.data2)
)[0]
self.assertTrue(np.all(x == y))
class AccumTable_Test(unittest.TestCase):
@pytest.mark.skip(reason="Test needs to be re-written to remove the np.random.seed usage -- it's not stable across numpy versions.")
def test_accum_table(self):
# Create the test data
def unpivot(frame):
N, K = frame.shape
data = {
'value': frame.values.ravel('F'),
'variable': np.asarray(frame.columns).repeat(N),
'date': np.tile(np.asarray(frame.index), K),
}
return pd.DataFrame(data, columns=['date', 'variable', 'value'])
np.random.seed(1234)
df = unpivot(pd.concat([ | tm.makeTimeDataFrame() | pandas.util.testing.makeTimeDataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
调用wset函数的部分
下载数据的方法
1.在时间上使用折半可以最少的下载数据,但已经下了一部分,要补下时如果挪了一位,又得全重下
2.在文件上,三个文件一组,三组一样,删中间一个,直到不能删了,退出
"""
import os
import pandas as pd
from .utils import asDateTime
def download_sectorconstituent(w, date, sector, windcode, field='wind_code'):
"""
板块成份
中信证券一级行业指数:时间好像没有必要,因为日历日也会查询出来
风险警示股票:日期就是查询的日期,股票也是最新名,没有啥用
w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000;field=wind_code")
w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000")
w.wset("sectorconstituent","date=2017-03-03;windcode=000300.SH")
:param w:
:param sector:
:param date:
:return:
"""
param = 'date=%s' % date
if sector:
param += ';sector=%s' % sector
if windcode:
param += ';windcode=%s' % windcode
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("sectorconstituent", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
try:
df['date'] = pd.to_datetime(df['date'])
except KeyError:
pass
return df
def download_indexconstituent(w, date, windcode, field='wind_code,i_weight'):
"""
指数权重
如果指定日期不是交易日,会返回时前一个交易日的信息
:param w:
:param windcode:
:param date:
:return:
"""
param = 'date=%s' % date
if windcode:
param += ';windcode=%s' % windcode
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("indexconstituent", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
return df
def download_optioncontractbasicinfo(w, exchange='sse', windcode='510050.SH', status='trading',
field='wind_code,trade_code,sec_name,contract_unit,listed_date,expire_date,reference_price'):
"""
指数权重
如果指定日期不是交易日,会返回时前一个交易日的信息
:param w:
:param windcode:
:param date:
:return:
"""
param = 'exchange=%s' % exchange
param += ';windcode=%s' % windcode
param += ';status=%s' % status
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("optioncontractbasicinfo", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
return df
def download_optionchain(w, date='2017-11-28', us_code='510050.SH',
field='option_code,option_name,strike_price,multiplier'):
"""
下载指定日期期权数据
w_wset_data = vba_wset("optionchain","date=2017-11-28;us_code=510050.SH;option_var=全部;call_put=全部;field=option_code,option_name,strike_price,multiplier",)
:param w:
:param windcode:
:param date:
:return:
"""
param = 'date=%s' % date
param += ';us_code=%s' % us_code
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("optionchain", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
return df
def read_constituent(path):
"""
读取板块文件
:param path:
:return:
"""
try:
df = pd.read_csv(path, encoding='utf-8-sig', parse_dates=True)
except Exception as e:
return None
try:
df['date'] = pd.to_datetime(df['date'])
except KeyError:
pass
return df
def read_sectorconstituent_from_dir(path, key_field='wind_code'):
"""
从目录中读取整个文件
:param path:
:param key_field:
:return:
"""
last_set = None
df = None
for parent, dirnames, filenames in os.walk(path):
for filename in filenames:
filepath = os.path.join(parent, filename)
curr_df = read_constituent(filepath)
# 由于两头数据可能一样,这样处理,只留第一个,可以加快处理速度
curr_set = set(curr_df[key_field])
if last_set == curr_set:
last_set = curr_set
continue
last_set = curr_set
data_date_str = filename[:-4]
curr_df['_datetime_'] = pd.to_datetime(data_date_str)
if df is None:
df = curr_df
else:
df = pd.concat([df, curr_df])
return df
def write_constituent(path, df):
df.to_csv(path, encoding='utf-8-sig', date_format='%Y-%m-%d', index=False)
def read_indexconstituent_from_dir(path):
"""
由于权重每天都不一样,只能根据用户指定的日期下载才行
:param path:
:return:
"""
last_set = None
df = None
for parent, dirnames, filenames in os.walk(path):
for filename in filenames:
filepath = os.path.join(parent, filename)
curr_df = read_constituent(filepath)
# 2016-12-12,有成份新加入,但权重为nan
curr_df.fillna(0, inplace=True)
data_date_str = filename[:-4]
curr_df['_datetime_'] = pd.to_datetime(data_date_str)
if df is None:
df = curr_df
else:
df = pd.concat([df, curr_df])
return df
def download_corporationaction(w, startdate, enddate, windcode):
"""
分红送转
如何获取某一天的分红情况,开始与结束设成同一天,不设置wind_code
实际上只取一个如600000.SH发现很早以前的信息可能缺失
> w.wset('CorporationAction','startdate=20150605;enddate=20150605')
:param w:
:param windcode:
:param date:
:return:
"""
param = 'startdate=%s' % startdate
if enddate:
param += ';enddate=%s' % enddate
if windcode:
param += ';windcode=%s' % windcode
w.asDateTime = asDateTime
w_wset_data = w.wset("corporationaction", param)
df = | pd.DataFrame(w_wset_data.Data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import prep_for_model_runs as prep
import build_models as build
import modify_contact as mod
import model_params_class as mp
import calc_summary_stat as summ
"""Build model for policy intervention 1
This function is the same as build_model in build_models.py with the exception
of the additional parameter, group_size_p1. This is the group_df_p1 data.frame
from generate_matrix.py that reduces the prison release population a certain
number of days after the SIP_DATE.
"""
def build_model_p1(group_size_data, TIME, SIP_DATE, contact_matrix1, contact_matrix2,
transmission_rate, prison_peak_rate, prison_peak_date, group_size_p1,
jail_release_date):
Group_Names, Group_Size, initial_sizes, recovery_rates = build.build_initial_values(group_size_data)
susceptible_rows = []
infected_rows = []
recovered_rows = []
lambda_matrix = contact_matrix1 * transmission_rate
S_t, I_t, R_t = initial_sizes
susceptible_rows.append(S_t)
infected_rows.append(I_t)
recovered_rows.append(R_t)
white_prison_i = np.where(Group_Names == 'White_Prison')
black_prison_i = np.where(Group_Names == 'Black_Prison')
k1, k2 = prison_rate_build(
Group_Size, prison_peak_date, white_prison_i, black_prison_i, prison_peak_rate)
#represents new infections per day
delta_i = [R_t]
for i in range(0, TIME):
if i == SIP_DATE - 1:
lambda_matrix = contact_matrix2 * transmission_rate
if i == jail_release_date - 1:
Group_Size = group_size_pol1['Group_Size'].values
# NOT SURE IF THIS IS CORRECT COLUMN BECAUSE I'M NOT SURE IF WE'RE
# USING THE OLD FORMAT OF THE GROUP SIZE DATA OR NOT
k1, k2 = prison_rate_build(Group_Size, prison_peak_date, white_prison_i,
black_prison_i, prison_peak_rate)
# multiplying k*k contact matrix * k*1 vetor of proportion of group infected
#l is a vector with length k
l = np.squeeze(np.asarray(np.dot(lambda_matrix, I_t/Group_Size)))
#this is the number of new infections
contacts = l * S_t #force of infection * number Susceptible by group
delta_i.append(contacts)
I_14 = R_t[0]
if i >= 14:
I_14 = delta_i[i-14]
dSdt = - contacts
dIdt = contacts - recovery_rates * I_14
dRdt = recovery_rates * I_14
S_t = S_t + dSdt
I_t = I_t + dIdt
R_t = R_t + dRdt
if i <= prison_peak_date:
I_t[white_prison_i] = np.exp(i*k1)
I_t[black_prison_i] = np.exp(i*k2)
S_t[white_prison_i] = Group_Size[white_prison_i] - np.exp(i*k1)
dSdt[black_prison_i] = Group_Size[black_prison_i] - np.exp(i*k2)
# Should this be S_t?
susceptible_rows.append(S_t)
infected_rows.append(I_t)
recovered_rows.append(R_t)
s = pd.DataFrame(susceptible_rows, columns=Group_Names)
i = pd.DataFrame(infected_rows, columns=Group_Names)
r = | pd.DataFrame(recovered_rows, columns=Group_Names) | pandas.DataFrame |
# %% Imports
import pandas as pd
import re
def flatten_columns(df):
df.columns = ["_".join(df) for df in df.columns.ravel()]
df.columns = [re.sub(r'_$', '', col) for col in df.columns]
return df
def rename_cols_3m(df):
df.columns = [f"{col}_3m" if col not in INDEX else col for col in df.columns]
return df
df_full = pd.read_csv('../data/split.csv')
df_activity = pd.read_csv('../data/data_raw/activity.csv')
df_rte_raw = pd.read_csv('../data/data_raw/rtes.csv')
hcps = pd.read_csv('../data/data_raw/hcps.csv')
INDEX = ['month', 'region', 'brand']
# %%
df_rte = (
df_rte_raw
.merge(hcps.loc[:, ["hcp", "tier"]], how='left', on='hcp')
.assign(inverse_tier_1=lambda x: 4 - x.tier)
)
# %%
df_rte['date_last_opened'] = pd.to_datetime(df_rte.time_last_opened).dt.date
df_rte['date_sent'] = pd.to_datetime(df_rte.time_sent).dt.date
# %%
df_rte['month_last_opened'] = | pd.to_datetime(df_rte.date_last_opened) | pandas.to_datetime |
import pandas as pd
def fix_datasets():
dati = pd.read_csv("dati_regioni.csv")
regioni = pd.read_csv("regioni.csv")
## Devo mergiare i dati del trentino
dati.drop(columns = ["casi_da_sospetto_diagnostico", "casi_da_screening"], axis = 1, inplace = True)
df_r = dati.loc[(dati['denominazione_regione'] == "P.A. Bolzano") | (dati['denominazione_regione'] == "P.A. Trento")]
df_trentino = df_r.groupby("data").sum()
df_trentino['denominazione_regione'] = "Trentino Alto Adige"
df_trentino['lat'] = 46.068935
df_trentino['long'] = 11.121231
df_trentino = df_trentino.reset_index()
dati = dati.loc[(dati['denominazione_regione'] != "P.A. Trento") & (dati['denominazione_regione'] != "P.A. Bolzano")]
dati_fix = | pd.concat([dati, df_trentino], sort=False) | pandas.concat |
import datetime
import pandas as pd
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
def plot_team(team):
years = [2012,2013,2014,2015,2016,2017]
g = pd.read_csv("audl_elo.csv")
dates = pd.to_datetime(g[(g["team_id"] == team)]["date"])
elo = g[(g["team_id"] == team)]["elo_n"]
plt.plot(dates,elo)
plt.show()
def plot_team_b(team):
years = [2012,2013,2014,2015,2016,2017]
g = pd.read_csv("audl_elo.csv")
fig, axs = plt.subplots(1,len(years),sharey=True)
for i in range(len(axs)):
#Plotting
dates = pd.to_datetime(g[(g["team_id"] == team) & (g["year_id"] == years[i])]["date"])
elo = g[(g["team_id"] == team) & (g["year_id"] == years[i])]["elo_n"]
axs[i].plot(dates,elo)
#Formatting
axs[i].xaxis.set_ticks_position('none')
axs[i].set_xlabel(str(years[i]))
axs[i].tick_params('x',labelbottom=False)
axs[i].set_ylim(1050,1950)
if i == 0:
axs[i].yaxis.tick_left()
axs[i].set_yticks(range(1100,2000,100))
if i != len(axs)-1:
axs[i].spines['right'].set_visible(False)
if i != 0:
axs[i].yaxis.set_ticks_position('none')
axs[i].spines['left'].set_visible(False)
plt.show()
def plot_teams(teams):
years = [2012,2013,2014,2015,2016,2017]
g = pd.read_csv("audl_elo.csv")
#plt.style.use('fivethirtyeight')
fig, axs = plt.subplots(1,len(years),sharey=True)
for i in range(len(axs)):
season_start = pd.to_datetime(g[(g["year_id"] == years[i])]["date"]).min() - datetime.timedelta(7)
season_end= pd.to_datetime(g[(g["year_id"] == years[i])]["date"]).max()
#Plotting
colors = ['b','g','r','c','m','y','k']
for j,team in enumerate(teams):
dates = pd.to_datetime(g[(g["team_id"] == team) & (g["year_id"] == years[i])]["date"])
if dates.shape[0] > 0:
dates = pd.Series(season_start).append(dates)
elo = g[(g["team_id"] == team) & (g["year_id"] == years[i])]["elo_n"]
if elo.shape[0] > 0:
start_elo = g[(g["team_id"] == team) & (g["year_id"] == years[i])]["elo_i"].iloc[0]
elo = pd.Series(start_elo).append(elo)
axs[i].plot(dates,elo,color = colors[j])
#Formatting
axs[i].xaxis.set_ticks_position('none')
axs[i].set_xlabel(str(years[i]))
axs[i].tick_params('x',labelbottom=False)
axs[i].set_ylim(1050,1950)
axs[i].set_xlim(season_start,season_end)
axs[i].grid(True)
if i == 0:
axs[i].yaxis.tick_left()
axs[i].set_yticks(range(1100,2000,100))
if i != len(axs)-1:
axs[i].spines['right'].set_visible(False)
if i != 0:
axs[i].yaxis.set_ticks_position('none')
axs[i].spines['left'].set_visible(False)
if i == len(axs)-1:
axs[i].legend(teams)
plt.show()
def better_plot(team):
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
# load a numpy record array from yahoo csv data with fields date,
# open, close, volume, adj_close from the mpl-data/example directory.
# The record array stores python datetime.date as an object array in
# the date column
g = pd.read_csv("audl_elo.csv")
dates = pd.to_datetime(g[(g["team_id"] == team)]["date"])
elo = g[(g["team_id"] == team)]["elo_n"]
fig, ax = plt.subplots()
ax.plot(dates, elo)
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
datemin = datetime.date(dates.min().year, 1, 1)
datemax = datetime.date(dates.max().year + 1, 1, 1)
ax.set_xlim(datemin, datemax)
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
#ax.format_ydata = price
ax.grid(True)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
plt.show()
def plot_teams_one(target_team):
years = [2012,2013,2014,2015,2016,2017]
g = | pd.read_csv("audl_elo.csv") | pandas.read_csv |
import praw
import pandas as pd
from praw.models import MoreComments
import datetime
reddit = praw.Reddit(client_id='pm9diOFYiSsXHw',
client_secret='<KEY>',
user_agent='webscraper',
username='yash3277',
password='<PASSWORD>')
posts = []
subreddit = reddit.subreddit('Coronavirus')
for post in subreddit.hot(limit=1000):
posts.append([post.title, post.score, post.id, post.subreddit, post.url, post.num_comments, post.selftext, post.created, datetime.datetime.fromtimestamp(post.created)])
posts = pd.DataFrame(posts,columns=['title', 'score', 'id', 'subreddit', 'url', 'num_comments', 'body', 'created', 'date'])
posts.to_csv('Coronavirus.csv', index=False)
posts = pd.read_csv('Coronavirus.csv')
id_list = posts['id'].to_list()
comments=[]
for id in id_list:
submission = reddit.submission(id=id)
for comment in submission.comments.list():
if isinstance(comment, MoreComments):
continue
comments.append([comment.body,id,datetime.datetime.fromtimestamp(comment.created)])
comments = pd.DataFrame(comments, columns=['comments','id', 'date'])
comments.to_csv('CoronavirusComments.csv', index=False)
posts = []
subreddit = reddit.subreddit('COVID19')
for post in subreddit.hot(limit=1000):
posts.append([post.title, post.score, post.id, post.subreddit, post.url, post.num_comments, post.selftext, post.created, datetime.datetime.fromtimestamp(post.created)])
posts = | pd.DataFrame(posts,columns=['title', 'score', 'id', 'subreddit', 'url', 'num_comments', 'body', 'created', 'date']) | pandas.DataFrame |
import requests
import zipfile
import io
import pandas as pd
from datetime import datetime, timedelta
pd.set_option('display.width', None)
class DBManager:
"""Constructs and manages a sqlite database for accessing historical inputs for NEM spot market dispatch.
Constructs a database if none exists, otherwise connects to an existing database. Specific datasets can be added
to the database from AEMO nemweb portal and inputs can be retrieved on a 5 min dispatch interval basis.
Examples
--------
Create the database or connect to an existing one.
>>> import sqlite3
>>> con = sqlite3.connect('historical.db')
Create the database manager.
>>> historical = DBManager(con)
Create a set of default table in the database.
>>> historical.create_tables()
Add data from AEMO nemweb data portal. In this case we are adding data from the table DISPATCHREGIONSUM which contains
a dispatch summary by region, the data comes in monthly chunks.
>>> historical.DISPATCHREGIONSUM.add_data(year=2020, month=1)
>>> historical.DISPATCHREGIONSUM.add_data(year=2020, month=2)
This table has an add_data method indicating that data provided by AEMO comes in monthly files that do not overlap.
If you need data for multiple months then multiple add_data calls can be made.
Data for a specific 5 min dispatch interval can then be retrieved.
>>> print(historical.DISPATCHREGIONSUM.get_data('2020/01/10 12:35:00').head())
SETTLEMENTDATE REGIONID TOTALDEMAND DEMANDFORECAST INITIALSUPPLY
0 2020/01/10 12:35:00 NSW1 9938.01 34.23926 9902.79199
1 2020/01/10 12:35:00 QLD1 6918.63 26.47852 6899.76270
2 2020/01/10 12:35:00 SA1 1568.04 4.79657 1567.85864
3 2020/01/10 12:35:00 TAS1 1124.05 -3.43994 1109.36963
4 2020/01/10 12:35:00 VIC1 6633.45 37.05273 6570.15527
Some tables will have a set_data method instead of an add_data method, indicating that the most recent data file
provided by AEMO contains all historical data for this table. In this case if multiple calls to the set_data method
are made the new data replaces the old.
>>> historical.DUDETAILSUMMARY.set_data(year=2020, month=2)
Data for a specific 5 min dispatch interval can then be retrieved.
>>> print(historical.DUDETAILSUMMARY.get_data('2020/01/10 12:35:00').head())
DUID START_DATE END_DATE DISPATCHTYPE CONNECTIONPOINTID REGIONID TRANSMISSIONLOSSFACTOR DISTRIBUTIONLOSSFACTOR SCHEDULE_TYPE
0 AGLHAL 2019/07/01 00:00:00 2020/01/20 00:00:00 GENERATOR SHPS1 SA1 0.9748 1.0000 SCHEDULED
1 AGLNOW1 2019/07/01 00:00:00 2999/12/31 00:00:00 GENERATOR NDT12 NSW1 0.9929 1.0000 NON-SCHEDULED
2 AGLSITA1 2019/07/01 00:00:00 2999/12/31 00:00:00 GENERATOR NLP13K NSW1 1.0009 1.0000 NON-SCHEDULED
3 AGLSOM 2019/07/01 00:00:00 2999/12/31 00:00:00 GENERATOR VTTS1 VIC1 0.9915 0.9891 SCHEDULED
4 ANGAST1 2019/07/01 00:00:00 2999/12/31 00:00:00 GENERATOR SDRN1 SA1 0.9517 0.9890 SCHEDULED
Parameters
----------
con : sqlite3.connection
Attributes
----------
BIDPEROFFER_D : InputsByIntervalDateTime
Unit volume bids by 5 min dispatch intervals.
BIDDAYOFFER_D : InputsByDay
Unit price bids by market day.
DISPATCHREGIONSUM : InputsBySettlementDate
Regional demand terms by 5 min dispatch intervals.
DISPATCHLOAD : InputsBySettlementDate
Unit operating conditions by 5 min dispatch intervals.
DUDETAILSUMMARY : InputsStartAndEnd
Unit information by the start and end times of when the information is applicable.
DISPATCHCONSTRAINT : InputsBySettlementDate
The generic constraints that were used in each 5 min interval dispatch.
GENCONDATA : InputsByMatchDispatchConstraints
The generic constraints information, their applicability to a particular dispatch interval is determined by
reference to DISPATCHCONSTRAINT.
SPDREGIONCONSTRAINT : InputsByMatchDispatchConstraints
The regional lhs terms in generic constraints, their applicability to a particular dispatch interval is
determined by reference to DISPATCHCONSTRAINT.
SPDCONNECTIONPOINTCONSTRAINT : InputsByMatchDispatchConstraints
The connection point lhs terms in generic constraints, their applicability to a particular dispatch interval is
determined by reference to DISPATCHCONSTRAINT.
SPDINTERCONNECTORCONSTRAINT : InputsByMatchDispatchConstraints
The interconnector lhs terms in generic constraints, their applicability to a particular dispatch interval is
determined by reference to DISPATCHCONSTRAINT.
INTERCONNECTOR : InputsNoFilter
The the regions that each interconnector links.
INTERCONNECTORCONSTRAINT : InputsByEffectiveDateVersionNoAndDispatchInterconnector
Interconnector properties FROMREGIONLOSSSHARE, LOSSCONSTANT, LOSSFLOWCOEFFICIENT, MAXMWIN, MAXMWOUT by
EFFECTIVEDATE and VERSIONNO.
LOSSMODEL : InputsByEffectiveDateVersionNoAndDispatchInterconnector
Break points used in linearly interpolating interconnector loss funtctions by EFFECTIVEDATE and VERSIONNO.
LOSSFACTORMODEL : InputsByEffectiveDateVersionNoAndDispatchInterconnector
Coefficients of demand terms in interconnector loss functions.
DISPATCHINTERCONNECTORRES : InputsBySettlementDate
Record of which interconnector were used in a particular dispatch interval.
"""
def __init__(self, connection):
self.con = connection
self.DISPATCHREGIONSUM = InputsBySettlementDate(
table_name='DISPATCHREGIONSUM', table_columns=['SETTLEMENTDATE', 'REGIONID', 'TOTALDEMAND',
'DEMANDFORECAST', 'INITIALSUPPLY'],
table_primary_keys=['SETTLEMENTDATE', 'REGIONID'], con=self.con)
self.DISPATCHLOAD = InputsBySettlementDate(
table_name='DISPATCHLOAD', table_columns=['SETTLEMENTDATE', 'DUID', 'DISPATCHMODE', 'AGCSTATUS',
'INITIALMW', 'TOTALCLEARED', 'RAMPDOWNRATE', 'RAMPUPRATE',
'AVAILABILITY', 'RAISEREGENABLEMENTMAX', 'RAISEREGENABLEMENTMIN',
'LOWERREGENABLEMENTMAX', 'LOWERREGENABLEMENTMIN',
'SEMIDISPATCHCAP', 'LOWER5MIN', 'LOWER60SEC', 'LOWER6SEC',
'RAISE5MIN', 'RAISE60SEC', 'RAISE6SEC', 'LOWERREG', 'RAISEREG',
'RAISEREGAVAILABILITY', 'RAISE6SECACTUALAVAILABILITY',
'RAISE60SECACTUALAVAILABILITY', 'RAISE5MINACTUALAVAILABILITY',
'RAISEREGACTUALAVAILABILITY', 'LOWER6SECACTUALAVAILABILITY',
'LOWER60SECACTUALAVAILABILITY', 'LOWER5MINACTUALAVAILABILITY',
'LOWERREGACTUALAVAILABILITY'],
table_primary_keys=['SETTLEMENTDATE', 'DUID'], con=self.con)
self.DISPATCHPRICE = InputsBySettlementDate(
table_name='DISPATCHPRICE', table_columns=['SETTLEMENTDATE', 'REGIONID', 'ROP', 'RAISE6SECROP',
'RAISE60SECROP', 'RAISE5MINROP', 'RAISEREGROP',
'LOWER6SECROP', 'LOWER60SECROP', 'LOWER5MINROP',
'LOWERREGROP'],
table_primary_keys=['SETTLEMENTDATE', 'REGIONID'], con=self.con)
self.DUDETAILSUMMARY = InputsStartAndEnd(
table_name='DUDETAILSUMMARY', table_columns=['DUID', 'START_DATE', 'END_DATE', 'DISPATCHTYPE',
'CONNECTIONPOINTID', 'REGIONID', 'TRANSMISSIONLOSSFACTOR',
'DISTRIBUTIONLOSSFACTOR', 'SCHEDULE_TYPE'],
table_primary_keys=['START_DATE', 'DUID'], con=self.con)
self.DUDETAIL = InputsByEffectiveDateVersionNo(
table_name='DUDETAIL', table_columns=['DUID', 'EFFECTIVEDATE', 'VERSIONNO', 'REGISTEREDCAPACITY'],
table_primary_keys=['DUID', 'EFFECTIVEDATE', 'VERSIONNO'], con=self.con)
self.DISPATCHCONSTRAINT = InputsBySettlementDate(
table_name='DISPATCHCONSTRAINT', table_columns=['SETTLEMENTDATE', 'CONSTRAINTID', 'RHS',
'GENCONID_EFFECTIVEDATE', 'GENCONID_VERSIONNO',
'LHS', 'VIOLATIONDEGREE', 'MARGINALVALUE'],
table_primary_keys=['SETTLEMENTDATE', 'CONSTRAINTID'], con=self.con)
self.GENCONDATA = InputsByMatchDispatchConstraints(
table_name='GENCONDATA', table_columns=['GENCONID', 'EFFECTIVEDATE', 'VERSIONNO', 'CONSTRAINTTYPE',
'GENERICCONSTRAINTWEIGHT'],
table_primary_keys=['GENCONID', 'EFFECTIVEDATE', 'VERSIONNO'], con=self.con)
self.SPDREGIONCONSTRAINT = InputsByMatchDispatchConstraints(
table_name='SPDREGIONCONSTRAINT', table_columns=['REGIONID', 'EFFECTIVEDATE', 'VERSIONNO', 'GENCONID',
'BIDTYPE', 'FACTOR'],
table_primary_keys=['REGIONID', 'GENCONID', 'EFFECTIVEDATE', 'VERSIONNO', 'BIDTYPE'], con=self.con)
self.SPDCONNECTIONPOINTCONSTRAINT = InputsByMatchDispatchConstraints(
table_name='SPDCONNECTIONPOINTCONSTRAINT', table_columns=['CONNECTIONPOINTID', 'EFFECTIVEDATE', 'VERSIONNO',
'GENCONID', 'BIDTYPE', 'FACTOR'],
table_primary_keys=['CONNECTIONPOINTID', 'GENCONID', 'EFFECTIVEDATE', 'VERSIONNO', 'BIDTYPE'], con=self.con)
self.SPDINTERCONNECTORCONSTRAINT = InputsByMatchDispatchConstraints(
table_name='SPDINTERCONNECTORCONSTRAINT', table_columns=['INTERCONNECTORID', 'EFFECTIVEDATE', 'VERSIONNO',
'GENCONID', 'FACTOR'],
table_primary_keys=['INTERCONNECTORID', 'GENCONID', 'EFFECTIVEDATE', 'VERSIONNO'], con=self.con)
self.INTERCONNECTOR = InputsNoFilter(
table_name='INTERCONNECTOR', table_columns=['INTERCONNECTORID', 'REGIONFROM', 'REGIONTO'],
table_primary_keys=['INTERCONNECTORID'], con=self.con)
self.INTERCONNECTORCONSTRAINT = InputsByEffectiveDateVersionNoAndDispatchInterconnector(
table_name='INTERCONNECTORCONSTRAINT', table_columns=['INTERCONNECTORID', 'EFFECTIVEDATE', 'VERSIONNO',
'FROMREGIONLOSSSHARE', 'LOSSCONSTANT', 'ICTYPE',
'LOSSFLOWCOEFFICIENT', 'IMPORTLIMIT', 'EXPORTLIMIT'],
table_primary_keys=['INTERCONNECTORID', 'EFFECTIVEDATE', 'VERSIONNO'], con=self.con)
self.LOSSMODEL = InputsByEffectiveDateVersionNoAndDispatchInterconnector(
table_name='LOSSMODEL', table_columns=['INTERCONNECTORID', 'EFFECTIVEDATE', 'VERSIONNO', 'LOSSSEGMENT',
'MWBREAKPOINT'],
table_primary_keys=['INTERCONNECTORID', 'EFFECTIVEDATE', 'VERSIONNO'], con=self.con)
self.LOSSFACTORMODEL = InputsByEffectiveDateVersionNoAndDispatchInterconnector(
table_name='LOSSFACTORMODEL', table_columns=['INTERCONNECTORID', 'EFFECTIVEDATE', 'VERSIONNO', 'REGIONID',
'DEMANDCOEFFICIENT'],
table_primary_keys=['INTERCONNECTORID', 'EFFECTIVEDATE', 'VERSIONNO'], con=self.con)
self.DISPATCHINTERCONNECTORRES = InputsBySettlementDate(
table_name='DISPATCHINTERCONNECTORRES', table_columns=['INTERCONNECTORID', 'SETTLEMENTDATE', 'MWFLOW',
'MWLOSSES'],
table_primary_keys=['INTERCONNECTORID', 'SETTLEMENTDATE'], con=self.con)
self.MNSP_INTERCONNECTOR = InputsByEffectiveDateVersionNo(
table_name='MNSP_INTERCONNECTOR', table_columns=['INTERCONNECTORID', 'LINKID', 'EFFECTIVEDATE', 'VERSIONNO',
'FROMREGION', 'TOREGION', 'FROM_REGION_TLF',
'TO_REGION_TLF', 'LHSFACTOR', 'MAXCAPACITY'],
table_primary_keys=['INTERCONNECTORID', 'LINKID', 'EFFECTIVEDATE', 'VERSIONNO'], con=self.con)
def create_tables(self):
"""Drops any existing default tables and creates new ones, this method is generally called a new database.
Examples
--------
Create the database or connect to an existing one.
>>> import sqlite3
>>> con = sqlite3.connect('historical.db')
Create the database manager.
>>> historical = DBManager(con)
Create a set of default table in the database.
>>> historical.create_tables()
Default tables will now exist, but will be empty.
>>> print(pd.read_sql("Select * from DISPATCHREGIONSUM", con=con))
Empty DataFrame
Columns: [SETTLEMENTDATE, REGIONID, TOTALDEMAND, DEMANDFORECAST, INITIALSUPPLY]
Index: []
If you added data and then call create_tables again then any added data will be emptied.
>>> historical.DISPATCHREGIONSUM.add_data(year=2020, month=1)
>>> print(pd.read_sql("Select * from DISPATCHREGIONSUM limit 3", con=con))
SETTLEMENTDATE REGIONID TOTALDEMAND DEMANDFORECAST INITIALSUPPLY
0 2020/01/01 00:05:00 NSW1 7245.31 -26.35352 7284.32178
1 2020/01/01 00:05:00 QLD1 6095.75 -24.29639 6129.36279
2 2020/01/01 00:05:00 SA1 1466.53 1.47190 1452.25647
>>> historical.create_tables()
>>> print(pd.read_sql("Select * from DISPATCHREGIONSUM", con=con))
Empty DataFrame
Columns: [SETTLEMENTDATE, REGIONID, TOTALDEMAND, DEMANDFORECAST, INITIALSUPPLY]
Index: []
Returns
-------
None
"""
for name, attribute in self.__dict__.items():
if hasattr(attribute, 'create_table_in_sqlite_db'):
attribute.create_table_in_sqlite_db()
def _create_sample_database(self, date_time):
for name, attribute in self.__dict__.items():
if hasattr(attribute, '_create_sample_table'):
attribute._create_sample_table(date_time)
def populate(self, start_year, start_month, end_year, end_month, verbose=True):
self.create_tables()
if start_month == 1:
start_year -= 1
start_month = 12
else:
start_month -= 1
# Download data were inputs are needed on a monthly basis.
finished = False
for year in range(start_year, end_year + 1):
for month in range(start_month, 13):
if year == end_year and month == end_month + 1:
finished = True
break
if verbose:
print('Downloading MMS table for year={} month={}'.format(year, month))
self.DISPATCHINTERCONNECTORRES.add_data(year=year, month=month)
self.DISPATCHREGIONSUM.add_data(year=year, month=month)
self.DISPATCHLOAD.add_data(year=year, month=month)
self.DISPATCHCONSTRAINT.add_data(year=year, month=month)
self.DISPATCHPRICE.add_data(year=year, month=month)
if finished:
break
start_month = 1
# Download data where inputs are just needed from the latest month.
self.INTERCONNECTOR.set_data(year=end_year, month=end_month)
self.LOSSFACTORMODEL.set_data(year=end_year, month=end_month)
self.LOSSMODEL.set_data(year=end_year, month=end_month)
self.DUDETAILSUMMARY.create_table_in_sqlite_db()
self.DUDETAILSUMMARY.set_data(year=end_year, month=end_month)
self.DUDETAIL.set_data(year=end_year, month=end_month)
self.INTERCONNECTORCONSTRAINT.set_data(year=end_year, month=end_month)
self.GENCONDATA.set_data(year=end_year, month=end_month)
self.SPDCONNECTIONPOINTCONSTRAINT.set_data(year=end_year, month=end_month)
self.SPDREGIONCONSTRAINT.set_data(year=end_year, month=end_month)
self.SPDINTERCONNECTORCONSTRAINT.set_data(year=end_year, month=end_month)
self.INTERCONNECTOR.set_data(year=end_year, month=end_month)
self.MNSP_INTERCONNECTOR.create_table_in_sqlite_db()
self.MNSP_INTERCONNECTOR.set_data(year=end_year, month=end_month)
self.DUDETAIL.create_table_in_sqlite_db()
self.DUDETAIL.set_data(year=end_year, month=end_month)
def _download_to_df(url, table_name, year, month):
"""Downloads a zipped csv file and converts it to a pandas DataFrame, returns the DataFrame.
Examples
--------
This will only work if you are connected to the internet.
>>> url = ('http://nemweb.com.au/Data_Archive/Wholesale_Electricity/MMSDM/{year}/MMSDM_{year}_{month}/' +
... 'MMSDM_Historical_Data_SQLLoader/DATA/PUBLIC_DVD_{table}_{year}{month}010000.zip')
>>> table_name = 'DISPATCHREGIONSUM'
>>> df = _download_to_df(url, table_name='DISPATCHREGIONSUM', year=2020, month=1)
>>> print(df)
I DISPATCH ... SEMISCHEDULE_CLEAREDMW SEMISCHEDULE_COMPLIANCEMW
0 D DISPATCH ... 549.30600 0.00000
1 D DISPATCH ... 102.00700 0.00000
2 D DISPATCH ... 387.40700 0.00000
3 D DISPATCH ... 145.43200 0.00000
4 D DISPATCH ... 136.85200 0.00000
... .. ... ... ... ...
45380 D DISPATCH ... 757.47600 0.00000
45381 D DISPATCH ... 142.71600 0.00000
45382 D DISPATCH ... 310.28903 0.36103
45383 D DISPATCH ... 83.94100 0.00000
45384 D DISPATCH ... 196.69610 0.69010
<BLANKLINE>
[45385 rows x 109 columns]
Parameters
----------
url : str
A url of the format 'PUBLIC_DVD_{table}_{year}{month}010000.zip', typically this will be a location on AEMO's
nemweb portal where data is stored in monthly archives.
table_name : str
The name of the table you want to download from nemweb.
year : int
The year the table is from.
month : int
The month the table is form.
Returns
-------
pd.DataFrame
Raises
------
MissingData
If internet connection is down, nemweb is down or data requested is not on nemweb.
"""
# Insert the table_name, year and month into the url.
url = url.format(table=table_name, year=year, month=str(month).zfill(2))
# Download the file.
r = requests.get(url)
if r.status_code != 200:
raise _MissingData(("""Requested data for table: {}, year: {}, month: {}
not downloaded. Please check your internet connection. Also check
http://nemweb.com.au/#mms-data-model, to see if your requested
data is uploaded.""").format(table_name, year, month))
# Convert the contents of the response into a zipfile object.
zf = zipfile.ZipFile(io.BytesIO(r.content))
# Get the name of the file inside the zip object, assuming only one file is zipped inside.
file_name = zf.namelist()[0]
# Read the file into a DataFrame.
data = pd.read_csv(zf.open(file_name), skiprows=1)
# Discard last row of DataFrame
data = data[:-1]
return data
class _MissingData(Exception):
"""Raise for nemweb not returning status 200 for file request."""
class _MMSTable:
"""Manages Market Management System (MMS) tables stored in an sqlite database.
This class creates the table in the data base when the object is instantiated. Methods for adding adding and
retrieving data are added by sub classing.
"""
def __init__(self, table_name, table_columns, table_primary_keys, con):
"""Creates a table in sqlite database that the connection is provided for.
Examples
--------
>>> import sqlite3
>>> import os
Set up a database or connect to an existing one.
>>> con = sqlite3.connect('historical.db')
Create the table object.
>>> table = _MMSTable(table_name='a_table', table_columns=['col_1', 'col_2'], table_primary_keys=['col_1'],
... con=con)
Clean up by deleting database created.
>>> con.close()
>>> os.remove('historical.db')
Parameters
----------
table_name : str
Name of the table.
table_columns : list(str)
List of table column names.
table_primary_keys : list(str)
Table columns to use as primary keys.
con : sqlite3.Connection
Connection to an existing database.
"""
self.con = con
self.table_name = table_name
self.table_columns = table_columns
self.table_primary_keys = table_primary_keys
# url that sub classes will use to pull MMS tables from nemweb.
self.url = 'http://nemweb.com.au/Data_Archive/Wholesale_Electricity/MMSDM/{year}/MMSDM_{year}_{month}/' + \
'MMSDM_Historical_Data_SQLLoader/DATA/PUBLIC_DVD_{table}_{year}{month}010000.zip'
self.columns_types = {
'INTERVAL_DATETIME': 'TEXT', 'DUID': 'TEXT', 'BIDTYPE': 'TEXT', 'BANDAVAIL1': 'REAL', 'BANDAVAIL2': 'REAL',
'BANDAVAIL3': 'REAL', 'BANDAVAIL4': 'REAL', 'BANDAVAIL5': 'REAL', 'BANDAVAIL6': 'REAL',
'BANDAVAIL7': 'REAL', 'BANDAVAIL8': 'REAL', 'BANDAVAIL9': 'REAL', 'BANDAVAIL10': 'REAL', 'MAXAVAIL': 'REAL',
'ENABLEMENTMIN': 'REAL', 'ENABLEMENTMAX': 'REAL', 'LOWBREAKPOINT': 'REAL', 'HIGHBREAKPOINT': 'REAL',
'SETTLEMENTDATE': 'TEXT', 'PRICEBAND1': 'REAL', 'PRICEBAND2': 'REAL', 'PRICEBAND3': 'REAL',
'PRICEBAND4': 'REAL', 'PRICEBAND5': 'REAL', 'PRICEBAND6': 'REAL', 'PRICEBAND7': 'REAL',
'PRICEBAND8': 'REAL', 'PRICEBAND9': 'REAL', 'PRICEBAND10': 'REAL', 'T1': 'REAL', 'T2': 'REAL',
'T3': 'REAL', 'T4': 'REAL', 'REGIONID': 'TEXT', 'TOTALDEMAND': 'REAL', 'DEMANDFORECAST': 'REAL',
'INITIALSUPPLY': 'REAL', 'DISPATCHMODE': 'TEXT', 'AGCSTATUS': 'TEXT', 'INITIALMW': 'REAL',
'TOTALCLEARED': 'REAL', 'RAMPDOWNRATE': 'REAL', 'RAMPUPRATE': 'REAL', 'AVAILABILITY': 'REAL',
'RAISEREGENABLEMENTMAX': 'REAL', 'RAISEREGENABLEMENTMIN': 'REAL', 'LOWERREGENABLEMENTMAX': 'REAL',
'LOWERREGENABLEMENTMIN': 'REAL', 'START_DATE': 'TEXT', 'END_DATE': 'TEXT', 'DISPATCHTYPE': 'TEXT',
'CONNECTIONPOINTID': 'TEXT', 'TRANSMISSIONLOSSFACTOR': 'REAL', 'DISTRIBUTIONLOSSFACTOR': 'REAL',
'CONSTRAINTID': 'TEXT', 'RHS': 'REAL', 'GENCONID_EFFECTIVEDATE': 'TEXT', 'GENCONID_VERSIONNO': 'TEXT',
'GENCONID': 'TEXT', 'EFFECTIVEDATE': 'TEXT', 'VERSIONNO': 'TEXT', 'CONSTRAINTTYPE': 'TEXT',
'GENERICCONSTRAINTWEIGHT': 'REAL', 'FACTOR': 'REAL', 'FROMREGIONLOSSSHARE': 'REAL', 'LOSSCONSTANT': 'REAL',
'LOSSFLOWCOEFFICIENT': 'REAL', 'IMPORTLIMIT': 'REAL', 'EXPORTLIMIT': 'REAL', 'LOSSSEGMENT': 'TEXT',
'MWBREAKPOINT': 'REAL', 'DEMANDCOEFFICIENT': 'REAL', 'INTERCONNECTORID': 'TEXT', 'REGIONFROM': 'TEXT',
'REGIONTO': 'TEXT', 'MWFLOW': 'REAL', 'MWLOSSES': 'REAL', 'MINIMUMLOAD': 'REAL', 'MAXCAPACITY': 'REAL',
'SEMIDISPATCHCAP': 'REAL', 'RRP': 'REAL', 'SCHEDULE_TYPE': 'TEXT', 'LOWER5MIN': 'REAL',
'LOWER60SEC': 'REAL', 'LOWER6SEC': 'REAL', 'RAISE5MIN': 'REAL', 'RAISE60SEC': 'REAL', 'RAISE6SEC': 'REAL',
'LOWERREG': 'REAL', 'RAISEREG': 'REAL', 'RAISEREGAVAILABILITY': 'REAL',
'RAISE6SECACTUALAVAILABILITY': 'REAL', 'RAISE60SECACTUALAVAILABILITY': 'REAL',
'RAISE5MINACTUALAVAILABILITY': 'REAL', 'RAISEREGACTUALAVAILABILITY': 'REAL',
'LOWER6SECACTUALAVAILABILITY': 'REAL', 'LOWER60SECACTUALAVAILABILITY': 'REAL',
'LOWER5MINACTUALAVAILABILITY': 'REAL', 'LOWERREGACTUALAVAILABILITY': 'REAL', 'LHS': 'REAL',
'VIOLATIONDEGREE': 'REAL', 'MARGINALVALUE': 'REAL', 'RAISE6SECROP': 'REAL',
'RAISE60SECROP': 'REAL', 'RAISE5MINROP': 'REAL', 'RAISEREGROP': 'REAL', 'LOWER6SECROP': 'REAL',
'LOWER60SECROP': 'REAL', 'LOWER5MINROP': 'REAL', 'LOWERREGROP': 'REAL', 'FROM_REGION_TLF': 'REAL',
'TO_REGION_TLF': 'REAL', 'ICTYPE': 'TEXT', 'LINKID': 'TEXT', 'FROMREGION': 'TEXT', 'TOREGION': 'TEXT',
'REGISTEREDCAPACITY': 'REAL', 'LHSFACTOR': 'FACTOR', 'ROP': 'REAL'
}
def create_table_in_sqlite_db(self):
"""Creates a table in the sqlite database that the object has a connection to.
Note
----
This method and its documentation is inherited from the _MMSTable class.
Examples
--------
>>> import sqlite3
>>> import os
Set up a database or connect to an existing one.
>>> con = sqlite3.connect('historical.db')
Create the table object.
>>> table = _MMSTable(table_name='EXAMPLE', table_columns=['DUID', 'BIDTYPE'], table_primary_keys=['DUID'],
... con=con)
Create the corresponding table in the sqlite database, note this step many not be needed if you have connected
to an existing database.
>>> table.create_table_in_sqlite_db()
Now a table exists in the database, but its empty.
>>> print(pd.read_sql("Select * from example", con=con))
Empty DataFrame
Columns: [DUID, BIDTYPE]
Index: []
Clean up by closing the database and deleting if its no longer needed.
>>> con.close()
>>> os.remove('historical.db')
"""
with self.con:
cur = self.con.cursor()
cur.execute("""DROP TABLE IF EXISTS {};""".format(self.table_name))
base_create_query = """CREATE TABLE {}({}, PRIMARY KEY ({}));"""
columns = ','.join(['{} {}'.format(col, self.columns_types[col]) for col in self.table_columns])
primary_keys = ','.join(['{}'.format(col) for col in self.table_primary_keys])
create_query = base_create_query.format(self.table_name, columns, primary_keys)
cur.execute(create_query)
self.con.commit()
def _create_sample_table(self, date_time):
print(self.table_name)
try:
interval_data = self.get_data(date_time)
except:
interval_data = self.get_data()
with self.con:
interval_data.to_sql(self.table_name, con=self.con, if_exists='replace', index=False)
self.con.commit()
class _SingleDataSource(_MMSTable):
"""Manages downloading data from nemweb for tables where all relevant data is stored in lasted data file."""
def __init__(self, table_name, table_columns, table_primary_keys, con):
_MMSTable.__init__(self, table_name, table_columns, table_primary_keys, con)
def set_data(self, year, month):
""""Download data for the given table and time, replace any existing data.
Note
----
This method and its documentation is inherited from the _SingleDataSource class.
Examples
--------
>>> import sqlite3
>>> import os
Set up a database or connect to an existing one.
>>> con = sqlite3.connect('historical.db')
Create the table object.
>>> table = _SingleDataSource(table_name='DUDETAILSUMMARY',
... table_columns=['DUID', 'START_DATE', 'CONNECTIONPOINTID', 'REGIONID'],
... table_primary_keys=['START_DATE', 'DUID'], con=con)
Create the table in the database.
>>> table.create_table_in_sqlite_db()
Downloading data from http://nemweb.com.au/#mms-data-model into the table.
>>> table.set_data(year=2020, month=1)
Now the database should contain data for this table that is up to date as the end of Janurary.
>>> query = "Select * from DUDETAILSUMMARY order by START_DATE DESC limit 1;"
>>> print(pd.read_sql_query(query, con=con))
DUID START_DATE CONNECTIONPOINTID REGIONID
0 URANQ11 2020/02/04 00:00:00 NURQ1U NSW1
However if we subsequently set data from a previous date then any existing data will be replaced. Note the
change in the most recent record in the data set below.
>>> table.set_data(year=2019, month=1)
>>> print(pd.read_sql_query(query, con=con))
DUID START_DATE CONNECTIONPOINTID REGIONID
0 WEMENSF1 2019/03/04 00:00:00 VWES2W VIC1
Clean up by closing the database and deleting if its no longer needed.
>>> con.close()
>>> os.remove('historical.db')
Parameters
----------
year : int
The year to download data for.
month : int
The month to download data for.
Return
------
None
"""
data = _download_to_df(self.url, self.table_name, year, month)
data = data.loc[:, self.table_columns]
with self.con:
data.to_sql(self.table_name, con=self.con, if_exists='replace', index=False)
self.con.commit()
class _MultiDataSource(_MMSTable):
"""Manages downloading data from nemweb for tables where data main be stored across multiple monthly files."""
def __init__(self, table_name, table_columns, table_primary_keys, con):
_MMSTable.__init__(self, table_name, table_columns, table_primary_keys, con)
def add_data(self, year, month):
""""Download data for the given table and time, appends to any existing data.
Note
----
This method and its documentation is inherited from the _MultiDataSource class.
Examples
--------
>>> import sqlite3
>>> import os
Set up a database or connect to an existing one.
>>> con = sqlite3.connect('historical.db')
Create the table object.
>>> table = _MultiDataSource(table_name='DISPATCHREGIONSUM',
... table_columns=['SETTLEMENTDATE', 'REGIONID', 'TOTALDEMAND',
... 'DEMANDFORECAST', 'INITIALSUPPLY'],
... table_primary_keys=['SETTLEMENTDATE', 'REGIONID'], con=con)
Create the table in the database.
>>> table.create_table_in_sqlite_db()
Downloading data from http://nemweb.com.au/#mms-data-model into the table.
>>> table.add_data(year=2020, month=1)
Now the database should contain data for this table that is up to date as the end of Janurary.
>>> query = "Select * from DISPATCHREGIONSUM order by SETTLEMENTDATE DESC limit 1;"
>>> print(pd.read_sql_query(query, con=con))
SETTLEMENTDATE REGIONID TOTALDEMAND DEMANDFORECAST INITIALSUPPLY
0 2020/02/01 00:00:00 VIC1 5935.1 -15.9751 5961.77002
If we subsequently add data from an earlier month the old data remains in the table, in addition to the new
data.
>>> table.add_data(year=2019, month=1)
>>> print(pd.read_sql_query(query, con=con))
SETTLEMENTDATE REGIONID TOTALDEMAND DEMANDFORECAST INITIALSUPPLY
0 2020/02/01 00:00:00 VIC1 5935.1 -15.9751 5961.77002
Clean up by closing the database and deleting if its no longer needed.
>>> con.close()
>>> os.remove('historical.db')
Parameters
----------
year : int
The year to download data for.
month : int
The month to download data for.
Return
------
None
"""
data = _download_to_df(self.url, self.table_name, year, month)
if 'INTERVENTION' in data.columns:
data = data[data['INTERVENTION'] == 0]
data = data.loc[:, self.table_columns]
data = data.drop_duplicates(subset=self.table_primary_keys)
with self.con:
data.to_sql(self.table_name, con=self.con, if_exists='append', index=False)
self.con.commit()
class _AllHistDataSource(_MMSTable):
"""Manages downloading data from nemweb for tables where relevant data could be stored in any previous monthly file.
"""
def __init__(self, table_name, table_columns, table_primary_keys, con):
_MMSTable.__init__(self, table_name, table_columns, table_primary_keys, con)
def set_data(self, year, month):
""""Download data for the given table and time, replace any existing data.
Note
----
This method and its documentation is inherited from the _SingleDataSource class.
Examples
--------
>>> import sqlite3
>>> import os
Set up a database or connect to an existing one.
>>> con = sqlite3.connect('historical.db')
Create the table object.
>>> table = _SingleDataSource(table_name='DUDETAILSUMMARY',
... table_columns=['DUID', 'START_DATE', 'CONNECTIONPOINTID', 'REGIONID'],
... table_primary_keys=['START_DATE', 'DUID'], con=con)
Create the table in the database.
>>> table.create_table_in_sqlite_db()
Downloading data from http://nemweb.com.au/#mms-data-model into the table.
>>> table.set_data(year=2020, month=1)
Now the database should contain data for this table that is up to date as the end of Janurary.
>>> query = "Select * from DUDETAILSUMMARY order by START_DATE DESC limit 1;"
>>> print(pd.read_sql_query(query, con=con))
DUID START_DATE CONNECTIONPOINTID REGIONID
0 URANQ11 2020/02/04 00:00:00 NURQ1U NSW1
However if we subsequently set data from a previous date then any existing data will be replaced. Note the
change in the most recent record in the data set below.
>>> table.set_data(year=2019, month=1)
>>> print(pd.read_sql_query(query, con=con))
DUID START_DATE CONNECTIONPOINTID REGIONID
0 WEMENSF1 2019/03/04 00:00:00 VWES2W VIC1
Clean up by closing the database and deleting if its no longer needed.
>>> con.close()
>>> os.remove('historical.db')
Parameters
----------
year : int
The year to download data for.
month : int
The month to download data for.
Return
------
None
"""
cumulative_data = pd.DataFrame()
for y in range(year, 2009, -1):
for m in range(12, 0, -1):
if y == year and m > month:
continue
try:
data = _download_to_df(self.url, self.table_name, y, m)
if not set(self.table_columns) < set(data.columns):
continue
data = data.loc[:, self.table_columns]
with self.con:
if cumulative_data.empty:
data.to_sql(self.table_name, con=self.con, if_exists='replace', index=False)
cumulative_data = data.loc[:, self.table_primary_keys]
else:
# Filter data to only include rows unique to the new data and not in data
# previously downloaded.
data = pd.merge(data, cumulative_data, 'outer', on=self.table_primary_keys, indicator=True)
data = data[data['_merge'] == 'left_only'].drop('_merge', axis=1)
# Insert data.
data.to_sql(self.table_name, con=self.con, if_exists='append', index=False)
cumulative_data = pd.concat([cumulative_data, data.loc[:, self.table_primary_keys]])
self.con.commit()
except _MissingData:
pass
class InputsBySettlementDate(_MultiDataSource):
"""Manages retrieving dispatch inputs by SETTLEMENTDATE."""
def __init__(self, table_name, table_columns, table_primary_keys, con):
_MMSTable.__init__(self, table_name, table_columns, table_primary_keys, con)
def get_data(self, date_time):
"""Retrieves data for the specified date_time e.g. 2019/01/01 11:55:00"
Examples
--------
>>> import sqlite3
>>> import os
Set up a database or connect to an existing one.
>>> con = sqlite3.connect('historical.db')
Create the table object.
>>> table = InputsBySettlementDate(table_name='EXAMPLE', table_columns=['SETTLEMENTDATE', 'INITIALMW'],
... table_primary_keys=['SETTLEMENTDATE'], con=con)
Create the table in the database.
>>> table.create_table_in_sqlite_db()
Normally you would use the add_data method to add historical data, but here we will add data directly to the
database so some simple example data can be added.
>>> data = pd.DataFrame({
... 'SETTLEMENTDATE': ['2019/01/01 11:55:00', '2019/01/01 12:00:00'],
... 'INITIALMW': [1.0, 2.0]})
>>> _ = data.to_sql('EXAMPLE', con=con, if_exists='append', index=False)
When we call get_data the output is filtered by SETTLEMENTDATE.
>>> print(table.get_data(date_time='2019/01/01 12:00:00'))
SETTLEMENTDATE INITIALMW
0 2019/01/01 12:00:00 2.0
Clean up by closing the database and deleting if its no longer needed.
>>> con.close()
>>> os.remove('historical.db')
Parameters
----------
date_time : str
Should be of format '%Y/%m/%d %H:%M:%S', and always a round 5 min interval e.g. 2019/01/01 11:55:00.
Returns
-------
pd.DataFrame
"""
query = "Select * from {table} where SETTLEMENTDATE == '{datetime}'"
query = query.format(table=self.table_name, datetime=date_time)
return pd.read_sql_query(query, con=self.con)
class InputsByIntervalDateTime(_MultiDataSource):
"""Manages retrieving dispatch inputs by INTERVAL_DATETIME."""
def __init__(self, table_name, table_columns, table_primary_keys, con):
_MMSTable.__init__(self, table_name, table_columns, table_primary_keys, con)
def get_data(self, date_time):
"""Retrieves data for the specified date_time e.g. 2019/01/01 11:55:00"
Examples
--------
>>> import sqlite3
>>> import os
Set up a database or connect to an existing one.
>>> con = sqlite3.connect('historical.db')
Create the table object.
>>> table = InputsByIntervalDateTime(table_name='EXAMPLE', table_columns=['INTERVAL_DATETIME', 'INITIALMW'],
... table_primary_keys=['INTERVAL_DATETIME'], con=con)
Create the table in the database.
>>> table.create_table_in_sqlite_db()
Normally you would use the add_data method to add historical data, but here we will add data directly to the
database so some simple example data can be added.
>>> data = pd.DataFrame({
... 'INTERVAL_DATETIME': ['2019/01/01 11:55:00', '2019/01/01 12:00:00'],
... 'INITIALMW': [1.0, 2.0]})
>>> _ = data.to_sql('EXAMPLE', con=con, if_exists='append', index=False)
When we call get_data the output is filtered by INTERVAL_DATETIME.
>>> print(table.get_data(date_time='2019/01/01 12:00:00'))
INTERVAL_DATETIME INITIALMW
0 2019/01/01 12:00:00 2.0
Clean up by closing the database and deleting if its no longer needed.
>>> con.close()
>>> os.remove('historical.db')
Parameters
----------
date_time : str
Should be of format '%Y/%m/%d %H:%M:%S', and always a round 5 min interval e.g. 2019/01/01 11:55:00.
Returns
-------
pd.DataFrame
"""
query = "Select * from {table} where INTERVAL_DATETIME == '{datetime}'"
query = query.format(table=self.table_name, datetime=date_time)
return pd.read_sql_query(query, con=self.con)
class InputsByDay(_MultiDataSource):
"""Manages retrieving dispatch inputs by SETTLEMENTDATE, where inputs are stored on a daily basis."""
def __init__(self, table_name, table_columns, table_primary_keys, con):
_MMSTable.__init__(self, table_name, table_columns, table_primary_keys, con)
def get_data(self, date_time):
"""Retrieves data for the specified date_time e.g. 2019/01/01 11:55:00, where inputs are stored on daily basis.
Note that a market day begins with the first 5 min interval as 04:05:00, there for if and input date_time of
2019/01/01 04:05:00 is given inputs where the SETTLEMENDATE is 2019/01/01 00:00:00 will be retrieved and if
a date_time of 2019/01/01 04:00:00 or earlier is given then inputs where the SETTLEMENDATE is
2018/12/31 00:00:00 will be retrieved.
Examples
--------
>>> import sqlite3
>>> import os
Set up a database or connect to an existing one.
>>> con = sqlite3.connect('historical.db')
Create the table object.
>>> table = InputsByDay(table_name='EXAMPLE', table_columns=['SETTLEMENTDATE', 'INITIALMW'],
... table_primary_keys=['SETTLEMENTDATE'], con=con)
Create the table in the database.
>>> table.create_table_in_sqlite_db()
Normally you would use the add_data method to add historical data, but here we will add data directly to the
database so some simple example data can be added.
>>> data = pd.DataFrame({
... 'SETTLEMENTDATE': ['2019/01/01 00:00:00', '2019/01/02 00:00:00'],
... 'INITIALMW': [1.0, 2.0]})
>>> _ = data.to_sql('EXAMPLE', con=con, if_exists='append', index=False)
When we call get_data the output is filtered by SETTLEMENTDATE and the results from the appropriate market
day starting at 04:05:00 are retrieved. In the results below note when the output changes
>>> print(table.get_data(date_time='2019/01/01 12:00:00'))
SETTLEMENTDATE INITIALMW
0 2019/01/01 00:00:00 1.0
>>> print(table.get_data(date_time='2019/01/02 04:00:00'))
SETTLEMENTDATE INITIALMW
0 2019/01/01 00:00:00 1.0
>>> print(table.get_data(date_time='2019/01/02 04:05:00'))
SETTLEMENTDATE INITIALMW
0 2019/01/02 00:00:00 2.0
>>> print(table.get_data(date_time='2019/01/02 12:00:00'))
SETTLEMENTDATE INITIALMW
0 2019/01/02 00:00:00 2.0
Clean up by closing the database and deleting if its no longer needed.
>>> con.close()
>>> os.remove('historical.db')
Parameters
----------
date_time : str
Should be of format '%Y/%m/%d %H:%M:%S', and always a round 5 min interval e.g. 2019/01/01 11:55:00.
Returns
-------
pd.DataFrame
"""
# Convert to datetime object
date_time = datetime.strptime(date_time, '%Y/%m/%d %H:%M:%S')
# Change date_time provided so any time less than 04:05:00 will have the previous days date.
date_time = date_time - timedelta(hours=4, seconds=1)
# Convert back to string.
date_time = datetime.isoformat(date_time).replace('-', '/').replace('T', ' ')
# Remove the time component.
date_time = date_time[:10]
date_padding = ' 00:00:00'
date_time = date_time + date_padding
query = "Select * from {table} where SETTLEMENTDATE == '{datetime}'"
query = query.format(table=self.table_name, datetime=date_time)
return pd.read_sql_query(query, con=self.con)
class InputsStartAndEnd(_SingleDataSource):
"""Manages retrieving dispatch inputs by START_DATE and END_DATE."""
def __init__(self, table_name, table_columns, table_primary_keys, con):
_MMSTable.__init__(self, table_name, table_columns, table_primary_keys, con)
def get_data(self, date_time):
"""Retrieves data for the specified date_time by START_DATE and END_DATE.
Records with a START_DATE before or equal to the date_times and an END_DATE after the date_time will be
returned.
Examples
--------
>>> import sqlite3
>>> import os
Set up a database or connect to an existing one.
>>> con = sqlite3.connect('historical.db')
Create the table object.
>>> table = InputsStartAndEnd(table_name='EXAMPLE', table_columns=['START_DATE', 'END_DATE', 'INITIALMW'],
... table_primary_keys=['START_DATE'], con=con)
Create the table in the database.
>>> table.create_table_in_sqlite_db()
Normally you would use the add_data method to add historical data, but here we will add data directly to the
database so some simple example data can be added.
>>> data = pd.DataFrame({
... 'START_DATE': ['2019/01/01 00:00:00', '2019/01/02 00:00:00'],
... 'END_DATE': ['2019/01/02 00:00:00', '2019/01/03 00:00:00'],
... 'INITIALMW': [1.0, 2.0]})
>>> _ = data.to_sql('EXAMPLE', con=con, if_exists='append', index=False)
When we call get_data the output is filtered by START_DATE and END_DATE.
>>> print(table.get_data(date_time='2019/01/01 00:00:00'))
START_DATE END_DATE INITIALMW
0 2019/01/01 00:00:00 2019/01/02 00:00:00 1.0
>>> print(table.get_data(date_time='2019/01/01 12:00:00'))
START_DATE END_DATE INITIALMW
0 2019/01/01 00:00:00 2019/01/02 00:00:00 1.0
>>> print(table.get_data(date_time='2019/01/02 00:00:00'))
START_DATE END_DATE INITIALMW
0 2019/01/02 00:00:00 2019/01/03 00:00:00 2.0
>>> print(table.get_data(date_time='2019/01/02 00:12:00'))
START_DATE END_DATE INITIALMW
0 2019/01/02 00:00:00 2019/01/03 00:00:00 2.0
Clean up by closing the database and deleting if its no longer needed.
>>> con.close()
>>> os.remove('historical.db')
Parameters
----------
date_time : str
Should be of format '%Y/%m/%d %H:%M:%S', and always a round 5 min interval e.g. 2019/01/01 11:55:00.
Returns
-------
pd.DataFrame
"""
query = "Select * from {table} where START_DATE <= '{datetime}' and END_DATE > '{datetime}'"
query = query.format(table=self.table_name, datetime=date_time)
return pd.read_sql_query(query, con=self.con)
class InputsByMatchDispatchConstraints(_AllHistDataSource):
"""Manages retrieving dispatch inputs by matching against the DISPATCHCONSTRAINTS table"""
def __init__(self, table_name, table_columns, table_primary_keys, con):
_MMSTable.__init__(self, table_name, table_columns, table_primary_keys, con)
def get_data(self, date_time):
"""Retrieves data for the specified date_time by matching against the DISPATCHCONSTRAINT table.
First the DISPATCHCONSTRAINT table is filtered by SETTLEMENTDATE and then the contents of the classes table
is matched against that.
Examples
--------
>>> import sqlite3
>>> import os
Set up a database or connect to an existing one.
>>> con = sqlite3.connect('historical.db')
Create the table object.
>>> table = InputsByMatchDispatchConstraints(table_name='EXAMPLE',
... table_columns=['GENCONID', 'EFFECTIVEDATE', 'VERSIONNO', 'RHS'],
... table_primary_keys=['GENCONID', 'EFFECTIVEDATE', 'VERSIONNO'], con=con)
Create the table in the database.
>>> table.create_table_in_sqlite_db()
Normally you would use the set_data method to add historical data, but here we will add data directly to the
database so some simple example data can be added.
>>> data = pd.DataFrame({
... 'GENCONID': ['X', 'X', 'Y', 'Y'],
... 'EFFECTIVEDATE': ['2019/01/02 00:00:00', '2019/01/03 00:00:00', '2019/01/01 00:00:00',
... '2019/01/03 00:00:00'],
... 'VERSIONNO': [1, 2, 2, 3],
... 'RHS': [1.0, 2.0, 2.0, 3.0]})
>>> _ = data.to_sql('EXAMPLE', con=con, if_exists='append', index=False)
>>> data = pd.DataFrame({
... 'SETTLEMENTDATE' : ['2019/01/02 00:00:00', '2019/01/02 00:00:00', '2019/01/03 00:00:00',
... '2019/01/03 00:00:00'],
... 'CONSTRAINTID': ['X', 'Y', 'X', 'Y'],
... 'GENCONID_EFFECTIVEDATE': ['2019/01/02 00:00:00', '2019/01/01 00:00:00', '2019/01/03 00:00:00',
... '2019/01/03 00:00:00'],
... 'GENCONID_VERSIONNO': [1, 2, 2, 3]})
>>> _ = data.to_sql('DISPATCHCONSTRAINT', con=con, if_exists='append', index=False)
When we call get_data the output is filtered by the contents of DISPATCHCONSTRAINT.
>>> print(table.get_data(date_time='2019/01/02 00:00:00'))
GENCONID EFFECTIVEDATE VERSIONNO RHS
0 X 2019/01/02 00:00:00 1 1.0
1 Y 2019/01/01 00:00:00 2 2.0
>>> print(table.get_data(date_time='2019/01/03 00:00:00'))
GENCONID EFFECTIVEDATE VERSIONNO RHS
0 X 2019/01/03 00:00:00 2 2.0
1 Y 2019/01/03 00:00:00 3 3.0
Clean up by closing the database and deleting if its no longer needed.
>>> con.close()
>>> os.remove('historical.db')
Parameters
----------
date_time : str
Should be of format '%Y/%m/%d %H:%M:%S', and always a round 5 min interval e.g. 2019/01/01 11:55:00.
Returns
-------
pd.DataFrame
"""
columns = ','.join(['{}'.format(col) for col in self.table_columns])
query = """Select {columns} from (
{table}
inner join
(Select * from DISPATCHCONSTRAINT where SETTLEMENTDATE == '{datetime}')
on GENCONID == CONSTRAINTID
and EFFECTIVEDATE == GENCONID_EFFECTIVEDATE
and VERSIONNO == GENCONID_VERSIONNO);"""
query = query.format(columns=columns, table=self.table_name, datetime=date_time)
return pd.read_sql_query(query, con=self.con)
class InputsByEffectiveDateVersionNoAndDispatchInterconnector(_SingleDataSource):
"""Manages retrieving dispatch inputs by EFFECTTIVEDATE and VERSIONNO."""
def __init__(self, table_name, table_columns, table_primary_keys, con):
_MMSTable.__init__(self, table_name, table_columns, table_primary_keys, con)
def get_data(self, date_time):
"""Retrieves data for the specified date_time by EFFECTTIVEDATE and VERSIONNO.
For each unique record (by the remaining primary keys, not including EFFECTTIVEDATE and VERSIONNO) the record
with the most recent EFFECTIVEDATE
Examples
--------
>>> import sqlite3
>>> import os
Set up a database or connect to an existing one.
>>> con = sqlite3.connect('historical_inputs.db')
Create the table object.
>>> table = InputsByEffectiveDateVersionNoAndDispatchInterconnector(table_name='EXAMPLE',
... table_columns=['INTERCONNECTORID', 'EFFECTIVEDATE', 'VERSIONNO', 'INITIALMW'],
... table_primary_keys=['INTERCONNECTORID', 'EFFECTIVEDATE', 'VERSIONNO'], con=con)
Create the table in the database.
>>> table.create_table_in_sqlite_db()
Normally you would use the set_data method to add historical_inputs data, but here we will add data directly to the
database so some simple example data can be added.
>>> data = pd.DataFrame({
... 'INTERCONNECTORID': ['X', 'X', 'Y', 'Y'],
... 'EFFECTIVEDATE': ['2019/01/02 00:00:00', '2019/01/03 00:00:00', '2019/01/01 00:00:00',
... '2019/01/03 00:00:00'],
... 'VERSIONNO': [1, 2, 2, 3],
... 'INITIALMW': [1.0, 2.0, 2.0, 3.0]})
>>> _ = data.to_sql('EXAMPLE', con=con, if_exists='append', index=False)
We also need to add data to DISPATCHINTERCONNECTORRES because the results of the get_data method are filtered
against this table
>>> data = pd.DataFrame({
... 'INTERCONNECTORID': ['X', 'X', 'Y'],
... 'SETTLEMENTDATE': ['2019/01/02 00:00:00', '2019/01/03 00:00:00', '2019/01/02 00:00:00']})
>>> _ = data.to_sql('DISPATCHINTERCONNECTORRES', con=con, if_exists='append', index=False)
When we call get_data the output is filtered by the contents of DISPATCHCONSTRAINT.
>>> print(table.get_data(date_time='2019/01/02 00:00:00'))
INTERCONNECTORID EFFECTIVEDATE VERSIONNO INITIALMW
0 X 2019/01/02 00:00:00 1 1.0
1 Y 2019/01/01 00:00:00 2 2.0
In the next interval interconnector Y is not present in DISPATCHINTERCONNECTORRES.
>>> print(table.get_data(date_time='2019/01/03 00:00:00'))
INTERCONNECTORID EFFECTIVEDATE VERSIONNO INITIALMW
0 X 2019/01/03 00:00:00 2 2.0
Clean up by closing the database and deleting if its no longer needed.
>>> con.close()
>>> os.remove('historical_inputs.db')
Parameters
----------
date_time : str
Should be of format '%Y/%m/%d %H:%M:%S', and always a round 5 min interval e.g. 2019/01/01 11:55:00.
Returns
-------
pd.DataFrame
"""
id_columns = ','.join([col for col in self.table_primary_keys if col not in ['EFFECTIVEDATE', 'VERSIONNO']])
return_columns = ','.join(self.table_columns)
with self.con:
cur = self.con.cursor()
cur.execute("DROP TABLE IF EXISTS temp;")
cur.execute("DROP TABLE IF EXISTS temp2;")
cur.execute("DROP TABLE IF EXISTS temp3;")
cur.execute("DROP TABLE IF EXISTS temp4;")
# Store just the unique sets of ids that came into effect before the the datetime in a temporary table.
query = """CREATE TEMPORARY TABLE temp AS
SELECT *
FROM {table}
WHERE EFFECTIVEDATE <= '{datetime}';"""
cur.execute(query.format(table=self.table_name, datetime=date_time))
# For each unique set of ids and effective dates get the latest versionno and sore in temporary table.
query = """CREATE TEMPORARY TABLE temp2 AS
SELECT {id}, EFFECTIVEDATE, MAX(VERSIONNO) AS VERSIONNO
FROM temp
GROUP BY {id}, EFFECTIVEDATE;"""
cur.execute(query.format(id=id_columns))
# For each unique set of ids get the record with the most recent effective date.
query = """CREATE TEMPORARY TABLE temp3 as
SELECT {id}, VERSIONNO, max(EFFECTIVEDATE) as EFFECTIVEDATE
FROM temp2
GROUP BY {id};"""
cur.execute(query.format(id=id_columns))
# Inner join the original table to the set of most recent effective dates and version no.
query = """CREATE TEMPORARY TABLE temp4 AS
SELECT *
FROM {table}
INNER JOIN temp3
USING ({id}, VERSIONNO, EFFECTIVEDATE);"""
cur.execute(query.format(table=self.table_name, id=id_columns))
# Inner join the most recent data with the interconnectors used in the actual interval of interest.
query = """SELECT {cols}
FROM temp4
INNER JOIN (SELECT *
FROM DISPATCHINTERCONNECTORRES
WHERE SETTLEMENTDATE == '{datetime}')
USING (INTERCONNECTORID);"""
query = query.format(datetime=date_time, id=id_columns, cols=return_columns)
data = | pd.read_sql_query(query, con=self.con) | pandas.read_sql_query |
import datetime
import dill
import tqdm.auto
import pathlib
import zipfile
import numpy as np
import pandas as pd
def parse_interaction_events(data_path, first_day_date, from_date_incl, to_date_excl, num_timesteps=48, bidirectional=True):
dfs = []
for filename in tqdm.auto.tqdm(sorted(list(pathlib.Path(data_path).glob('**/*.zip')))):
index = int(filename.name.split('.')[0].split('_')[1])
day, _ = divmod(index, num_timesteps)
date = first_day_date + datetime.timedelta(days=day)
if (date < from_date_incl) or (date >= to_date_excl):
continue
day = (date - from_date_incl).days
with zipfile.ZipFile(filename) as resultsfile:
with resultsfile.open('results.dill') as results:
t = dill.loads(results.read())
dfs.append(pd.DataFrame(t, columns=['timestamp', 'bee_id_0', 'bee_id_1']))
df = pd.concat(dfs)
df.drop_duplicates(inplace=True)
if bidirectional:
df_swapped = df.copy()
df_swapped.loc[:, ['bee_id_0','bee_id_1']] = df_swapped.loc[:, ['bee_id_1','bee_id_0']].values
df = pd.concat((df, df_swapped))
df.sort_values('timestamp', inplace=True)
df.reset_index(inplace=True, drop=True)
return df
def load_alive_df(alive_path):
alive_df = | pd.read_csv(alive_path, parse_dates=['annotated_tagged_date', 'inferred_death_date']) | pandas.read_csv |
import matplotlib.pyplot as plt # type: ignore
import numpy as np # type: ignore
import pandas as pd # type: ignore
class CalculationsMixin(object):
__perf_charts = False # TODO move
def _constructDf(self, dfs):
# join along time axis
if dfs:
df = pd.concat(dfs, sort=True)
df.sort_index(inplace=True)
df = df.groupby(df.index).last()
df.drop_duplicates(inplace=True)
df.fillna(method='ffill', inplace=True)
else:
df = pd.DataFrame()
return df
def _getInstruments(self):
instruments = []
for position in self.positions():
instrument = position.instrument
instruments.append(instrument)
return instruments
def _getPrice(self):
portfolio = []
price_cols = []
for instrument, price_history in self.priceHistory().items():
#########
# Price #
#########
price_col = instrument.name
price_cols.append(price_col)
price_history.set_index('when', inplace=True)
portfolio.append(price_history)
return self._constructDf(portfolio)
def _getAssetPrice(self):
portfolio = []
price_cols = []
for position in self.positions():
instrument = position.instrument
#########
# Price #
#########
price_col = instrument.name
price_cols.append(price_col)
price_history = pd.DataFrame(position.instrumentPriceHistory, columns=[price_col, 'when'])
price_history.set_index('when', inplace=True)
portfolio.append(price_history)
return self._constructDf(portfolio)
def _getPnl(self):
portfolio = []
pnl_cols = []
total_pnl_cols = []
for position in self.positions():
instrument = position.instrument
#######
# Pnl #
#######
total_pnl_col = 'pnl:{}'.format(instrument.name)
unrealized_pnl_col = 'ur:{}'.format(instrument.name)
pnl_cols.append(unrealized_pnl_col)
unrealized_pnl_history = pd.DataFrame(position.unrealizedPnlHistory, columns=[unrealized_pnl_col, 'when'])
unrealized_pnl_history.set_index('when', inplace=True)
realized_pnl_col = 'r:{}'.format(instrument.name)
pnl_cols.append(realized_pnl_col)
realized_pnl_history = pd.DataFrame(position.pnlHistory, columns=[realized_pnl_col, 'when'])
realized_pnl_history.set_index('when', inplace=True)
unrealized_pnl_history[realized_pnl_col] = realized_pnl_history[realized_pnl_col]
unrealized_pnl_history[total_pnl_col] = unrealized_pnl_history.sum(axis=1)
total_pnl_cols.append(total_pnl_col)
portfolio.append(unrealized_pnl_history)
df_pnl = self._constructDf(portfolio)
################
# calculations #
################
# calculate total pnl
df_pnl['alpha'] = df_pnl.sum(axis=1)
return df_pnl
def _getSize(self):
portfolio = []
size_cols = []
for position in self.positions():
instrument = position.instrument
#################
# Position Size #
#################
size_col = 's:{}'.format(instrument.name)
size_cols.append(size_col)
size_history = pd.DataFrame(position.sizeHistory, columns=[size_col, 'when'])
size_history.set_index('when', inplace=True)
portfolio.append(size_history)
price_col = instrument.name
price_history = pd.DataFrame(position.instrumentPriceHistory, columns=[price_col, 'when'])
price_history.set_index('when', inplace=True)
portfolio.append(price_history)
return self._constructDf(portfolio)[size_cols]
def _getNotional(self):
portfolio = []
notional_cols = []
for position in self.positions():
instrument = position.instrument
#################
# Position Size #
#################
notional_col = 'n:{}'.format(instrument.name)
notional_cols.append(notional_col)
notional_history = pd.DataFrame(position.notionalHistory, columns=[notional_col, 'when'])
notional_history.set_index('when', inplace=True)
portfolio.append(notional_history)
price_col = instrument.name
price_history = | pd.DataFrame(position.instrumentPriceHistory, columns=[price_col, 'when']) | pandas.DataFrame |
"""
"""
"""
>>> # ---
>>> # SETUP
>>> # ---
>>> import os
>>> import logging
>>> logger = logging.getLogger('PT3S.Rm')
>>> # ---
>>> # path
>>> # ---
>>> if __name__ == "__main__":
... try:
... dummy=__file__
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ','path = os.path.dirname(__file__)'," ."))
... path = os.path.dirname(__file__)
... except NameError:
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ',"path = '.' because __file__ not defined and: "," from Rm import Rm"))
... path = '.'
... from Rm import Rm
... else:
... path = '.'
... logger.debug("{0:s}{1:s}".format('Not __main__ Context: ',"path = '.' ."))
>>> try:
... from PT3S import Mx
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Mx: ImportError: ","trying import Mx instead ... maybe pip install -e . is active ..."))
... import Mx
>>> try:
... from PT3S import Xm
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Xm: ImportError: ","trying import Xm instead ... maybe pip install -e . is active ..."))
... import Xm
>>> # ---
>>> # testDir
>>> # ---
>>> # globs={'testDir':'testdata'}
>>> try:
... dummy= testDir
... except NameError:
... testDir='testdata'
>>> # ---
>>> # dotResolution
>>> # ---
>>> # globs={'dotResolution':''}
>>> try:
... dummy= dotResolution
... except NameError:
... dotResolution=''
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> pd.set_option('display.max_columns',None)
>>> pd.set_option('display.width',666666666)
>>> # ---
>>> # LocalHeatingNetwork SETUP
>>> # ---
>>> xmlFile=os.path.join(os.path.join(path,testDir),'LocalHeatingNetwork.XML')
>>> xm=Xm.Xm(xmlFile=xmlFile)
>>> mx1File=os.path.join(path,os.path.join(testDir,'WDLocalHeatingNetwork\B1\V0\BZ1\M-1-0-1'+dotResolution+'.MX1'))
>>> mx=Mx.Mx(mx1File=mx1File,NoH5Read=True,NoMxsRead=True)
>>> mx.setResultsToMxsFile(NewH5Vec=True)
5
>>> xm.MxSync(mx=mx)
>>> rm=Rm(xm=xm,mx=mx)
>>> # ---
>>> # Plot 3Classes False
>>> # ---
>>> plt.close('all')
>>> ppi=72 # matplotlib default
>>> dpi_screen=2*ppi
>>> fig=plt.figure(dpi=dpi_screen,linewidth=1.)
>>> timeDeltaToT=mx.df.index[2]-mx.df.index[0]
>>> # 3Classes und FixedLimits sind standardmaessig Falsch; RefPerc ist standardmaessig Wahr
>>> # die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt immer ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66,pFWVBGCategory=['BLNZ1u5u7'],pVICsDf=pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']}))
>>> # ---
>>> # Check pFWVB Return
>>> # ---
>>> f=lambda x: "{0:8.5f}".format(x)
>>> print(pFWVB[['Measure','MCategory','GCategory','VIC']].round(2).to_string(formatters={'Measure':f}))
Measure MCategory GCategory VIC
0 0.81000 Top BLNZ1u5u7 NaN
1 0.67000 Middle NaN
2 0.66000 Middle BLNZ1u5u7 NaN
3 0.66000 Bottom BLNZ1u5u7 VIC1
4 0.69000 Middle NaN
>>> # ---
>>> # Print
>>> # ---
>>> (wD,fileName)=os.path.split(xm.xmlFile)
>>> (base,ext)=os.path.splitext(fileName)
>>> plotFileName=wD+os.path.sep+base+'.'+'pdf'
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
>>> plt.savefig(plotFileName,dpi=2*dpi_screen)
>>> os.path.exists(plotFileName)
True
>>> # ---
>>> # Plot 3Classes True
>>> # ---
>>> plt.close('all')
>>> # FixedLimits wird automatisch auf Wahr gesetzt wenn 3Classes Wahr ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasure3Classes=True,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66)
>>> # ---
>>> # LocalHeatingNetwork Clean Up
>>> # ---
>>> if os.path.exists(mx.h5File):
... os.remove(mx.h5File)
>>> if os.path.exists(mx.mxsZipFile):
... os.remove(mx.mxsZipFile)
>>> if os.path.exists(mx.h5FileVecs):
... os.remove(mx.h5FileVecs)
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
"""
__version__='172.16.58.3.dev1'
import warnings # 3.6
#...\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
# from ._conv import register_converters as _register_converters
warnings.simplefilter(action='ignore', category=FutureWarning)
#C:\Users\Wolters\Anaconda3\lib\site-packages\matplotlib\cbook\deprecation.py:107: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# warnings.warn(message, mplDeprecation, stacklevel=1)
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import os
import sys
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import pandas as pd
import h5py
from collections import namedtuple
from operator import attrgetter
import subprocess
import warnings
import tables
import math
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.colorbar import make_axes
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
from matplotlib import markers
from matplotlib.path import Path
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import scipy
import networkx as nx
from itertools import chain
import math
import sys
from copy import deepcopy
from itertools import chain
import scipy
from scipy.signal import savgol_filter
import logging
# ---
# --- PT3S Imports
# ---
logger = logging.getLogger('PT3S')
if __name__ == "__main__":
logger.debug("{0:s}{1:s}".format('in MODULEFILE: __main__ Context','.'))
else:
logger.debug("{0:s}{1:s}{2:s}{3:s}".format('in MODULEFILE: Not __main__ Context: ','__name__: ',__name__," ."))
try:
from PT3S import Mx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Mx - trying import Mx instead ... maybe pip install -e . is active ...'))
import Mx
try:
from PT3S import Xm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Xm - trying import Xm instead ... maybe pip install -e . is active ...'))
import Xm
try:
from PT3S import Am
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Am - trying import Am instead ... maybe pip install -e . is active ...'))
import Am
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
# ---
# --- main Imports
# ---
import argparse
import unittest
import doctest
import math
from itertools import tee
# --- Parameter Allgemein
# -----------------------
DINA6 = (4.13 , 5.83)
DINA5 = (5.83 , 8.27)
DINA4 = (8.27 , 11.69)
DINA3 = (11.69 , 16.54)
DINA2 = (16.54 , 23.39)
DINA1 = (23.39 , 33.11)
DINA0 = (33.11 , 46.81)
DINA6q = ( 5.83, 4.13)
DINA5q = ( 8.27, 5.83)
DINA4q = ( 11.69, 8.27)
DINA3q = ( 16.54,11.69)
DINA2q = ( 23.39,16.54)
DINA1q = ( 33.11,23.39)
DINA0q = ( 46.81,33.11)
dpiSize=72
DINA4_x=8.2677165354
DINA4_y=11.6929133858
DINA3_x=DINA4_x*math.sqrt(2)
DINA3_y=DINA4_y*math.sqrt(2)
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
ylimpD=(-5,70)
ylimpDmlc=(600,1350) #(300,1050)
ylimQD=(-75,300)
ylim3rdD=(0,3)
yticks3rdD=[0,1,2,3]
yGridStepsD=30
yticksALD=[0,3,4,10,20,30,40]
ylimALD=(yticksALD[0],yticksALD[-1])
yticksRD=[0,2,4,10,15,30,45]
ylimRD=(-yticksRD[-1],yticksRD[-1])
ylimACD=(-5,5)
yticksACD=[-5,0,5]
yticksTVD=[0,100,135,180,200,300]
ylimTVD=(yticksTVD[0],yticksTVD[-1])
plotTVAmLabelD='TIMER u. AM [Sek. u. (N)m3*100]'
def getDerivative(df,col,shiftSize=1,windowSize=60,fct=None,savgol_polyorder=None):
"""
returns a df
df: the df
col: the col of df to be derived
shiftsize: the Difference between 2 indices for dValue and dt
windowSize: size for rolling mean or window_length of savgol_filter; choosen filtertechnique is applied after fct
windowsSize must be an even number
for savgol_filter windowsSize-1 is used
fct: function to be applied on dValue/dt
savgol_polyorder: if not None savgol_filter is applied; pandas' rolling.mean() is applied otherwise
new cols:
dt (with shiftSize)
dValue (from col)
dValueDt (from col); fct applied
dValueDtFiltered; choosen filtertechnique is applied
"""
mDf=df.dropna().copy(deep=True)
try:
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
if savgol_polyorder == None:
mDf['dValueDtFiltered']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
else:
mDf['dValueDtFiltered']=savgol_filter(mDf['dValueDt'].values,windowSize-1, savgol_polyorder)
mDf=mDf.iloc[windowSize/2+1+savgol_polyorder-1:]
#mDf=mDf.iloc[windowSize-1:]
except Exception as e:
raise e
finally:
return mDf
def fCVDNodesFromName(x):
Nodes=x.replace('°','~')
Nodes=Nodes.split('~')
Nodes =[Node.lstrip().rstrip() for Node in Nodes if len(Node)>0]
return Nodes
def fgetMaxpMinFromName(CVDName,dfSegsNodesNDataDpkt):
"""
returns max. pMin for alle NODEs in CVDName
"""
nodeLst=fCVDNodesFromName(CVDName)
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['NODEsName'].isin(nodeLst)][['pMin','pMinMlc']]
s=df.max()
return s.pMin
# --- Funktionen Allgemein
# -----------------------
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def genTimespans(timeStart
,timeEnd
,timeSpan=pd.Timedelta('12 Minutes')
,timeOverlap=pd.Timedelta('0 Seconds')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
):
# generates timeSpan-Sections
# if timeStart is
# an int, it is considered as the number of desired Sections before timeEnd; timeEnd must be a time
# a time, it is considered as timeStart
# if timeEnd is
# an int, it is considered as the number of desired Sections after timeStart; timeStart must be a time
# a time, it is considered as timeEnd
# if timeSpan is
# an int, it is considered as the number of desired Sections
# a time, it is considered as timeSpan
# returns an array of tuples
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
xlims=[]
try:
if type(timeStart) == int:
numOfDesiredSections=timeStart
timeStartEff=timeEnd+timeEndPostfix-numOfDesiredSections*timeSpan+(numOfDesiredSections-1)*timeOverlap-timeStartPraefix
else:
timeStartEff=timeStart-timeStartPraefix
logger.debug("{0:s}timeStartEff: {1:s}".format(logStr,str(timeStartEff)))
if type(timeEnd) == int:
numOfDesiredSections=timeEnd
timeEndEff=timeStart-timeStartPraefix+numOfDesiredSections*timeSpan-(numOfDesiredSections-1)*timeOverlap+timeEndPostfix
else:
timeEndEff=timeEnd+timeEndPostfix
logger.debug("{0:s}timeEndEff: {1:s}".format(logStr,str(timeEndEff)))
if type(timeSpan) == int:
numOfDesiredSections=timeSpan
dt=timeEndEff-timeStartEff
timeSpanEff=dt/numOfDesiredSections+(numOfDesiredSections-1)*timeOverlap
else:
timeSpanEff=timeSpan
logger.debug("{0:s}timeSpanEff: {1:s}".format(logStr,str(timeSpanEff)))
logger.debug("{0:s}timeOverlap: {1:s}".format(logStr,str(timeOverlap)))
timeStartAct = timeStartEff
while timeStartAct < timeEndEff:
logger.debug("{0:s}timeStartAct: {1:s}".format(logStr,str(timeStartAct)))
timeEndAct=timeStartAct+timeSpanEff
xlim=(timeStartAct,timeEndAct)
xlims.append(xlim)
timeStartAct = timeEndAct - timeOverlap
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def gen2Timespans(
timeStart # Anfang eines "Prozesses"
,timeEnd # Ende eines "Prozesses"
,timeSpan=pd.Timedelta('12 Minutes')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
,roundStr=None # i.e. '5min': timeStart.round(roundStr) und timeEnd dito
):
"""
erzeugt 2 gleich lange Zeitbereiche
1 um timeStart herum
1 um timeEnd herum
"""
#print("timeStartPraefix: {:s}".format(str(timeStartPraefix)))
#print("timeEndPostfix: {:s}".format(str(timeEndPostfix)))
xlims=[]
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if roundStr != None:
timeStart=timeStart.round(roundStr)
timeEnd=timeEnd.round(roundStr)
xlims.append((timeStart-timeStartPraefix,timeStart-timeStartPraefix+timeSpan))
xlims.append((timeEnd+timeEndPostfix-timeSpan,timeEnd+timeEndPostfix))
return xlims
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def fTotalTimeFromPairs(
x
,denominator=None # i.e. pd.Timedelta('1 minute') for totalTime in Minutes
,roundToInt=True # round to and return as int if denominator is specified; else td is rounded by 2
):
tdTotal= | pd.Timedelta('0 seconds') | pandas.Timedelta |
import re
import pandas as pd
import numpy as np
from glob import glob
import os
from tqdm import tqdm
import sys
from itertools import combinations
from p_tqdm import p_map, p_umap
from scipy import sparse
from src.utils import UniqueIdAssigner
class SmaliApp():
LINE_PATTERN = re.compile('^(\.method.*)|^(\.end method)|^[ ]{4}(invoke-.*)', flags=re.M)
INVOKE_PATTERN = re.compile(
"(invoke-\w+)(?:\/range)? {.*}, " # invoke
+ "(\[*[ZBSCFIJD]|\[*L[\w\/$-]+;)->" # package
+ "([\w$]+|<init>).+" # method
)
def __init__(self, app_dir):
self.app_dir = app_dir
self.package = app_dir.split('/')[-2]
self.smali_fn_ls = sorted(glob(
os.path.join(app_dir, 'smali*/**/*.smali'), recursive=True
))
if len(self.smali_fn_ls) == 0:
print('Skipping invalid app dir:', self.app_dir, file=sys.stdout)
return
raise Exception('Invalid app dir:', app_dir)
self.info = self.extract_info()
def _extract_line_file(self, fn):
with open(fn) as f:
data = SmaliApp.LINE_PATTERN.findall(f.read())
if len(data) == 0: return None
data = np.array(data)
assert data.shape[1] == 3 # 'start', 'end', 'call'
relpath = os.path.relpath(fn, start=self.app_dir)
data = np.hstack((data, np.full(data.shape[0], relpath).reshape(-1, 1)))
return data
def _assign_code_block(df):
df['code_block_id'] = (df.start.str.len() != 0).cumsum()
return df
def _assign_package_invoke_method(df):
res = (
df.call.str.extract(SmaliApp.INVOKE_PATTERN)
.rename(columns={0: 'invocation', 1: 'library', 2: 'method_name'})
)
return pd.concat([df, res], axis=1)
def extract_info(self):
agg = [self._extract_line_file(f) for f in self.smali_fn_ls]
df = pd.DataFrame(
np.vstack([i for i in agg if i is not None]),
columns=['start', 'end', 'call', 'relpath']
)
df = SmaliApp._assign_code_block(df)
df = SmaliApp._assign_package_invoke_method(df)
# clean
assert (df.start.str.len() > 0).sum() == (df.end.str.len() > 0).sum(), f'Number of start and end are not equal in {self.app_dir}'
df = (
df[df.call.str.len() > 0]
.drop(columns=['start', 'end']).reset_index(drop=True)
)
# verify no nans
extract_nans = df.isna().sum(axis=1)
assert (extract_nans == 0).all(), f'nan in {extract_nans.values.nonzero()} for {self.app_dir}'
# self.info.loc[self.info.isna().sum(axis=1) != 0, :]
return df
class HINProcess():
def __init__(self, csvs, out_dir, nproc=4):
self.csvs = csvs
self.out_dir = out_dir
self.nproc = nproc
self.packages = [os.path.basename(csv)[:-4] for csv in csvs]
print('Processing CSVs')
self.infos = p_map(HINProcess.csv_proc, csvs, num_cpus=nproc)
self.prep_ids()
def prep_ids(self):
print('Processing APIs', file=sys.stdout)
self.API_uid = UniqueIdAssigner()
for info in tqdm(self.infos):
info['api_id'] = self.API_uid.add(*info.api)
self.APP_uid = UniqueIdAssigner()
for package in self.packages:
self.APP_uid.add(package)
def csv_proc(csv):
df = pd.read_csv(
csv, dtype={'method_name': str}, keep_default_na=False
)
df['api'] = df.library + '->' + df.method_name
return df
def construct_graph_A(self):
unique_APIs_app = [set(info.api_id) for info in self.infos]
unique_APIs_all = set.union(*unique_APIs_app)
A_cols = []
for unique in unique_APIs_all:
bag_of_API = [
1 if unique in app_set else 0
for app_set in unique_APIs_app
]
A_cols.append(bag_of_API)
A_mat = np.array(A_cols).T # shape: (# of apps, # of unique APIs)
A_mat = sparse.csr_matrix(A_mat)
return A_mat
def _prep_graph_B(info):
func_pairs = lambda d: list(combinations(d.api_id.unique(), 2))
edges = pd.DataFrame(
info.groupby('code_block_id').apply(func_pairs).explode()
.reset_index(drop=True).drop_duplicates().dropna()
.values.tolist()
).values.T.astype('uint32')
return edges
def _prep_graph_P(info):
func_pairs = lambda d: list(combinations(d.api_id.unique(), 2))
edges = pd.DataFrame(
info.groupby('library').apply(func_pairs).explode()
.reset_index(drop=True).drop_duplicates().dropna()
.values.tolist()
).values.T.astype('uint32')
return edges
def _save_interim_BP(Bs, Ps, csvs, nproc):
print('Saving B and P', file=sys.stdout)
p_umap(
lambda arr, file: np.save(file, arr),
Bs + Ps,
[f[:-4] + '.B' for f in csvs] + [f[:-4] + '.P' for f in csvs],
num_cpus=nproc
)
def prep_graph_BP(self, out=True):
print('Preparing B', file=sys.stdout)
Bs = p_map(HINProcess._prep_graph_B, self.infos, num_cpus=self.nproc)
print('Preparing P', file=sys.stdout)
Ps = p_map(HINProcess._prep_graph_P, self.infos, num_cpus=self.nproc)
if out:
HINProcess._save_interim_BP(Bs, Ps, self.csvs, self.nproc)
return Bs, Ps
def _build_coo(arr_ls, shape):
arr = np.hstack([a for a in arr_ls if a.shape[0] == 2])
arr = np.hstack([arr, arr[::-1, :]])
arr = np.unique(arr, axis=1) # drop dupl pairs
values = np.full(shape=arr.shape[1], fill_value=1, dtype='i1')
sparse_arr = sparse.coo_matrix(
(values, (arr[0], arr[1])), shape=shape
)
sparse_arr.setdiag(1)
return sparse_arr
def construct_graph_BP(self, Bs, Ps):
shape = (len(self.API_uid), len(self.API_uid))
print('Constructing B', file=sys.stdout)
B_mat = HINProcess._build_coo(Bs, shape).tocsc()
print('Constructing P', file=sys.stdout)
P_mat = HINProcess._build_coo(Ps, shape).tocsc()
return B_mat, P_mat
def save_matrices(self):
print('Saving matrices', file=sys.stdout)
path = self.out_dir
sparse.save_npz(os.path.join(path, 'A'), self.A_mat)
sparse.save_npz(os.path.join(path, 'B'), self.B_mat)
sparse.save_npz(os.path.join(path, 'P'), self.P_mat)
def save_info(self):
print('Saving infos', file=sys.stdout)
path = self.out_dir
s_API = | pd.Series(self.API_uid.value_by_id, name='api') | pandas.Series |
# -*- coding: utf-8 -*-
# @Author: liuyulin
# @Date: 2018-10-08 15:33:11
# @Last Modified by: liuyulin
# @Last Modified time: 2018-10-08 15:37:06
import numpy as np
import pandas as pd
def generate_testing_set(actual_track_datapath = '../../DATA/DeepTP/processed_flight_tracks.csv',
flight_plan_datapath = '../../DATA/DeepTP/processed_flight_plans.csv',
flight_plan_utilize_datapath = '../../DATA/DeepTP/IAH_BOS_Act_Flt_Trk_20130101_1231.CSV',
testing_fid = [20130118900394, 20130426357386, 20130713836889, 20130810273857, 20131109716864],
num_feed_pnt = 20,
testing_track_dir = '../../DATA/DeepTP/test_flight_tracks.csv',
testing_fp_dir = '../../DATA/DeepTP/test_flight_plans.csv',
):
act_track_data = pd.read_csv(actual_track_datapath, header = 0)
FP_track = | pd.read_csv(flight_plan_datapath) | pandas.read_csv |
##? not sure what this is ...
from numpy.core.numeric import True_
import pandas as pd
import numpy as np
## this function gives detailed info on NaN values of input df
from data_clean import perc_null
#these functionas add a date column (x2) and correct mp season format
from data_fix_dates import game_add_mp_date, bet_add_mp_date, fix_mp_season
#these functions assign nhl_names eg 'NYR' to bet, mp, and game;
# functions use simple dictionaries
from data_fix_team_names import bet_to_nhl, mp_to_nhl, game_to_nhl
##these are two different functions for assigning game_id to df_betting, based on team, date, H/A
##one uses df_game as look up table ... other uses df_mp_teams as look up table
from data_bet_add_game_id import mp_to_bet_add_game_id, game_to_bet_add_game_id
##import all the files
##file paths
Kaggle_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Kaggle_Data_Ellis/"
mp_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Money_Puck_Data/"
betting_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Betting_Data/"
#Kaggle_path = "/Users/joejohns/data_bootcamp/Final_Project_NHL_prediction/Data/Kaggle_Data_Ellis/"
#mp_path = "/Users/joejohns/data_bootcamp/Final_Project_NHL_prediction/Data/Money_Puck_Data/"
#betting_path = "/Users/joejohns/data_bootcamp/Final_Project_NHL_prediction/Data/Betting_Data/"
##Kaggle files
df_game = pd.read_csv(Kaggle_path+'game.csv')
df_game_team_stats = pd.read_csv(Kaggle_path+'game_teams_stats.csv')
df_game_skater_stats = pd.read_csv(Kaggle_path+'game_skater_stats.csv')
df_game_goalie_stats = pd.read_csv(Kaggle_path+'game_goalie_stats.csv')
##more subtle Kaggle features:
df_game_scratches = pd.read_csv(Kaggle_path+'game_scratches.csv')
df_game_officials = pd.read_csv(Kaggle_path+'game_officials.csv')
df_team_info = pd.read_csv(Kaggle_path+'team_info.csv')
## grab all the moneypuck data
df_mp_teams = pd.read_csv(mp_path+'all_teams.csv')
## grab all betting data
df1 = pd.read_excel(io = betting_path+'nhl odds 2007-08.xlsx')
df2 = pd.read_excel(io = betting_path+'nhl odds 2008-09.xlsx')
df3 = pd.read_excel(io = betting_path+'nhl odds 2009-10.xlsx')
df4 = | pd.read_excel(io = betting_path+'nhl odds 2010-11.xlsx') | pandas.read_excel |
import os.path
import json
import pandas as pd
import xgboost as xgb
import joblib
from IPython import get_ipython
from sklearn.preprocessing import scale
from sklearn.model_selection import KFold
from time import time
from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix, \
classification_report, accuracy_score, auc, roc_auc_score, roc_curve
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_classification
# from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import make_scorer
from scipy.stats import expon
from clean_data import make_directory
from sklearn.datasets import make_classification
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
ipy = get_ipython()
if ipy is not None:
ipy.run_line_magic('matplotlib', 'inline')
def prepare_data(data, drop_na=True):
''' Drops unnecessary columns, Fill or Drop rows containing N/A, and pre-processes the columns.'''
data = data.drop(columns=['Date', 'HomeTeam', 'AwayTeam'])
data = data.drop(columns=['FTHG', 'FTAG'])
data = data.drop(columns=['HT_goal_for', 'AT_goal_for', 'HT_goal_against', 'AT_goal_against'])
# data = data.drop(columns=['HT_3_win_streak', 'HT_5_win_streak', 'HT_3_lose_Streak', 'HT_5_lose_Streak',
# 'AT_3_win_streak', 'AT_5_win_streak', 'AT_3_lose_Streak', 'AT_5_lose_Streak'])
data = data.loc[data['HT_match_played'] == data['HT_match_played']]
if drop_na:
data = data.dropna()
else:
data.fillna(value=-99999, inplace=True)
# Columns that are not normalized: (Ordinal, Categorical)
# [FTR, HT_match_played, AT_match_played, HT_3_win_streak, HT_5_win_streak,
# HT_3_lose_Streak, HT_5_lose_Streak, AT_3_win_streak, AT_5_win_streak, AT_3_lose_Streak, AT_5_lose_Streak]
# Columns that are normalised: (Continuous variables)
normalized_columns = ['HomeOVA', 'AwayOVA', 'OVA_diff']
normalized_columns += ['HT_current_standing', 'AT_current_standing']
normalized_columns += ['HT_goal_diff', 'HT_win_rate_season', 'AT_goal_diff', 'AT_win_rate_season']
normalized_columns += ['HT_past_standing', 'HT_past_goal_diff', 'HT_past_win_rate',
'AT_past_standing', 'AT_past_goal_diff', 'AT_past_win_rate']
normalized_columns += ['HT_5_win_rate', 'AT_5_win_rate', 'HT_win_rate_against', 'AT_win_rate_against']
normalized_columns += ['current_standing_diff', 'win_rate_season_diff', 'goal_diff_diff']
normalized_columns += ['past_standing_diff', 'past_goal_diff_diff', 'past_win_rate_diff']
# normalized_columns += ['HT_goal_for', 'AT_goal_for', 'HT_goal_against', 'AT_goal_against']
for column in normalized_columns:
data[column] = scale(list(data[column]))
return data
def train_classifier(clf, x_train, y_train):
# This function is fitting a classifier to the training data
# A clock is started - train the classifier - stop the clock.
start = time()
clf.fit(x_train, y_train)
end = time()
print("Trained model in {:.4f} seconds".format(end - start))
def predict_labels(clf, features, target):
# This function makes a prediction using a fit classifier based on F! score.
# A clocked is started - it then makes a prediction - stops the clock.
start = time()
y_pred = clf.predict(features)
end = time()
print("Made predictions in {:.4f} seconds.".format(end - start))
return f1_score(target, y_pred, labels=['H', 'D', 'A'], average=None), sum(target == y_pred) / float(len(y_pred)), \
clf.score(features, target), y_pred
def train_and_predict(clf, x_train, y_train, x_test, y_test):
# Train and predict using a classifier based on F1 score.
# This indicates the classifier and the training set size.
print("Training a {} using a training set size of {}. . .".format(clf.__class__.__name__, len(x_train)))
# Train the classifier
train_classifier(clf, x_train, y_train)
# Print the results of prediction for both training and testing
f1, acc, confidence, _ = predict_labels(clf, x_train, y_train)
# print("F1 score and accuracy score for training set: {} , {}.".format(f1, acc))
# print("Confidence score for training set: {}.".format(confidence))
print()
f1, acc, confidence, predictions = predict_labels(clf, x_test, y_test)
print("F1 score and accuracy score for test set: {} , {}.".format(f1, acc))
# print("Confidence score for test set: {}.".format(confidence))
print()
return confidence, predictions
def get_grid_clf(clf, scoring, param, x_all, y_all):
grid_search = GridSearchCV(clf,
scoring=scoring,
param_grid=param,
verbose=100)
grid_obj = grid_search.fit(x_all, y_all)
clf = grid_obj.best_estimator_
params = grid_obj.best_params_
print(clf)
print(params)
return clf
def get_random_clf(clf, scoring, param, x_all, y_all):
random_search = RandomizedSearchCV(clf, param,
n_iter=10,
scoring=scoring,
verbose=100)
random_obj = random_search.fit(x_all, y_all)
clf = random_obj.best_estimator_
params = random_obj.best_params_
print(clf)
print(params)
return clf
def process_print_result(clfs, res):
def average(lst):
return sum(lst) / len(lst)
avg_dict = {}
best_clf_so_far = 0
best_avg_so_far = -1
for i in range(len(clfs)):
clf_name = clfs[i].__class__.__name__
if clf_name in avg_dict:
clf_name += json.dumps(clfs[i].get_params())
avg = average(res[i])
avg_dict[clf_name] = avg
if avg > best_avg_so_far:
best_avg_so_far = avg
best_clf_so_far = i
for clf_name in sorted(avg_dict, key=avg_dict.get, reverse=True):
print("{}: {}".format(clf_name, avg_dict[clf_name]))
return avg_dict, clfs[best_clf_so_far]
def getCLF(finalFilePath, model_confidence_csv_path, clf_file, recalculate=True):
if not recalculate:
# prediction result (y_result) not available
return joblib.load(clf_file), None
# First load the data from csv file
data = pd.read_csv(finalFilePath)
# Drop columns that are not needed and normalized each columns
data = prepare_data(data, drop_na=True)
data = data.loc[(data['FTR'] == 'H') | (data['FTR'] == 'D') | (data['FTR'] == 'A')]
# Divide data into features and label
x_all = data.drop(columns=['FTR'])
y_all = data['FTR']
# List of Classifiers that we are going to run
classifiers = [
# Logistic Regressions
LogisticRegression(),
# Best param in this grid search
LogisticRegression(penalty='l2', solver='newton-cg', multi_class='ovr',
C=0.1, warm_start=True),
LogisticRegression(penalty='l2', solver='lbfgs', multi_class='multinomial',
C=0.4, warm_start=False),
# SVC
SVC(probability=True),
SVC(C=0.3, class_weight=None, decision_function_shape='ovo', degree=1,
kernel='rbf', probability=True, shrinking=True, tol=0.0005),
SVC(C=0.28, class_weight=None, decision_function_shape='ovo', degree=1,
kernel='rbf', probability=True, shrinking=True, tol=0.0002),
# XGBoost
xgb.XGBClassifier(),
xgb.XGBClassifier(learning_rate=0.01, n_estimators=1000, max_depth=2,
min_child_weight=5, gamma=0, subsample=0.8, colsample_bytree=0.7,
scale_pos_weight=0.8, reg_alpha=1e-5, booster='gbtree', objective='multi:softprob'),
KNeighborsClassifier(),
RandomForestClassifier(),
GaussianNB(),
DecisionTreeClassifier(),
GradientBoostingClassifier(),
LinearSVC(),
SGDClassifier()
]
# clf_L = LogisticRegression()
# parameters_L = {'penalty': ['l2'],
# 'solver': ['lbfgs', 'newton-cg', 'sag'],
# 'multi_class': ['ovr', 'multinomial'],
# 'C': [x * 0.1 + 0.1 for x in range(10)],
# 'warm_start': [True, False],
# 'fit_intercept': [True, False],
# 'class_weight': ['balanced', None]}
# f1_scorer_l = make_scorer(f1_score, labels=['H', 'D', 'A'], average='micro')
# clf_L = get_grid_clf(clf_L, f1_scorer_l, parameters_L, x_all, y_all)
# classifiers.append(clf_L)
## SVC
# clf_L = SVC()
# parameters_L = {
# 'C': [x * 0.01 + 0.27 for x in range(5)],
# 'kernel': ['linear', 'poly', 'rbf', 'sigmoid'],
# 'degree': [x + 1 for x in range(3)],
# 'shrinking': [True, False],
# 'tol': [x * 0.0005 + 0.0005 for x in range(3)],
# 'class_weight': ['balanced', None],
# 'decision_function_shape': ['ovo', 'ovr']
# }
# f1_scorer_l = make_scorer(f1_score, labels=['H', 'D', 'A'], average='micro')
# clf_L = get_grid_clf(clf_L, f1_scorer_l, parameters_L, x_all, y_all)
# classifiers.append(clf_L)
## XGBoost
clf_L = xgb.XGBClassifier()
parameters_L = {
'learning_rate': [0.01],
'n_estimators': [1000],
'max_depth': [2],
'min_child_weight': [5],
'gamma': [0],
'subsample': [0.8],
'colsample_bytree': [0.7],
'scale_pos_weight': [0.8],
'reg_alpha': [1e-5],
'booster': ['gbtree'],
'objective': ['multi:softprob']
}
f1_scorer_l = make_scorer(f1_score, labels=['H', 'D', 'A'], average='micro')
clf_L = get_grid_clf(clf_L, f1_scorer_l, parameters_L, x_all, y_all)
classifiers.append(clf_L)
# We are going to record accuracies of each classifier prediction iteration
len_classifiers = len(classifiers)
result = [[] for _ in range(len_classifiers)]
y_results = [[] for _ in range(len_classifiers + 1)]
# Using 10-fold cross validation (Dividing the data into sub groups (90% to fit, 10% to test), and run
# prediction with each classifiers using the sub groups as a dataset)
# This will test the skill of the classifiers.
split = 10
kf = KFold(n_splits=split, shuffle=True)
for split_index, (train_index, test_index) in enumerate(kf.split(x_all)):
print("Processing {}/{} of KFold Cross Validation...".format(split_index + 1, split))
x_train, x_test = x_all.iloc[train_index], x_all.iloc[test_index]
y_train, y_test = y_all.iloc[train_index], y_all.iloc[test_index]
y_results[len_classifiers] += y_test.tolist()
for index, clf in enumerate(classifiers):
print("KFold: {}/{}. clf_index: {}/{}.".format(split_index + 1, split, index + 1, len(classifiers)))
confidence, predicted_result = train_and_predict(clf, x_train, y_train, x_test, y_test)
result[index].append(confidence)
y_results[index] += predicted_result.tolist()
# Make a dictionary of average accuracies for each classifiers
avg_dict, best_clf = process_print_result(classifiers, result)
# Put the result into csv file
if os.path.isfile(model_confidence_csv_path):
df = pd.read_csv(model_confidence_csv_path)
newdf = pd.DataFrame(avg_dict, index=[df.shape[1]])
df = | pd.concat([df, newdf], ignore_index=True, sort=False) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
from datetime import timedelta
from math import log10, floor
import warnings
import numpy as np
import pandas as pd
import ruptures as rpt
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from covsirphy.util.error import deprecate, ScenarioNotFoundError, UnExecutedError
from covsirphy.util.plotting import line_plot, box_plot
from covsirphy.cleaning.oxcgrt import OxCGRTData
from covsirphy.analysis.param_tracker import ParamTracker
from covsirphy.analysis.data_handler import DataHandler
class Scenario(DataHandler):
"""
Scenario analysis.
Args:
jhu_data (covsirphy.JHUData): object of records
population_data (covsirphy.PopulationData): PopulationData object
country (str): country name
province (str or None): province name
tau (int or None): tau value
auto_complement (bool): if True and necessary, the number of cases will be complemented
"""
def __init__(self, jhu_data, population_data, country, province=None, tau=None, auto_complement=True):
super().__init__(
jhu_data, population_data, country, province=province, auto_complement=auto_complement)
self.tau = self.ensure_tau(tau)
self._update_range(first_date=None, last_date=None)
# Linear regression modes for prediction of ODE parameters
self._oxcgrt_data = None
self._lm_dict = {}
# Default delay days
self.delay_days = jhu_data.recovery_period
def __getitem__(self, key):
"""
Return the phase series object for the scenario.
Args:
key (str): scenario name
Raises:
ScenarioNotFoundError: the scenario is not registered
Returns:
covsirphy.PhaseSeries
"""
if key in self._tracker_dict:
return self._tracker_dict[key].series
raise ScenarioNotFoundError(key)
def __setitem__(self, key, value):
"""
Register a phase series.
Args:
key (str): scenario name
value (covsirphy.PhaseSeries): phase series object
"""
self._tracker_dict[key] = ParamTracker(
self.record_df, value, area=self.area, tau=self.tau)
@property
def first_date(self):
"""
str: the first date of the records
"""
return self._first_date
@first_date.setter
def first_date(self, date):
self._update_range(first_date=date)
@property
def last_date(self):
"""
str: the last date of the records
"""
return self._last_date
@last_date.setter
def last_date(self, date):
self._update_range(last_date=date)
def _update_range(self, first_date=None, last_date=None):
"""
Set first/last dates of the records.
Records to analyse will be updated and all scenarios will be cleared.
Args:
first_date (str or None): the first date of the records
last_date (str or None): the last date of the records
"""
if first_date is not None:
self._first_date = self._ensure_date_in_range(first_date)
if last_date is not None:
self._last_date = self._ensure_date_in_range(last_date)
super().init_records()
self._init_trackers()
def _ensure_date_in_range(self, date):
"""
Ensure that the date is in the range of (the first date, the last date).
Args:
date (str): date, like 01Jan2020
"""
self.ensure_date_order(self._first_date, date, name="date")
self.ensure_date_order(date, self._last_date, name="date")
return date
def _init_trackers(self):
"""
Initialize dictionary of trackers.
"""
series = ParamTracker.create_series(
first_date=self._first_date, last_date=self._last_date, population=self.population)
tracker = ParamTracker(
record_df=self.record_df, phase_series=series, area=self.area, tau=self.tau)
self._tracker_dict = {self.MAIN: tracker}
def _tracker(self, name, template="Main"):
"""
Ensure that the phases series is registered.
If not registered, copy the template phase series.
Args:
name (str): phase series name
template (str): name of template phase series
Returns:
covsirphy.ParamTracker
"""
# Registered
if name in self._tracker_dict:
return self._tracker_dict[name]
# Un-registered and create it
if template not in self._tracker_dict:
raise ScenarioNotFoundError(template)
tracker = copy.deepcopy(self._tracker_dict[template])
self._tracker_dict[name] = tracker
return tracker
@deprecate(old="Scenario.add_phase()", new="Scenario.add()")
def add_phase(self, **kwargs):
return self.add(**kwargs)
def add(self, name="Main", end_date=None, days=None,
population=None, model=None, **kwargs):
"""
Add a new phase.
The start date will be the next date of the last registered phase.
Args:
name (str): phase series name, 'Main' or user-defined name
end_date (str): end date of the new phase
days (int): the number of days to add
population (int or None): population value of the start date
model (covsirphy.ModelBase or None): ODE model
kwargs: keyword arguments of ODE model parameters, not including tau value.
Returns:
covsirphy.Scenario: self
Note:
- If the phases series has not been registered, new phase series will be created.
- Either @end_date or @days must be specified.
- If @end_date and @days are None, the end date will be the last date of the records.
- If both of @end_date and @days were specified, @end_date will be used.
- If @popultion is None, initial value will be used.
- If @model is None, the model of the last phase will be used.
- Tau will be fixed as the last phase's value.
- kwargs: Default values are the parameter values of the last phase.
"""
if end_date is not None:
self.ensure_date(end_date, name="end_date")
tracker = self._tracker(name)
try:
tracker.add(
end_date=end_date, days=days, population=population,
model=model, **kwargs)
except ValueError:
last_date = tracker.series.unit("last").end_date
raise ValueError(
f'@end_date must be over {last_date}. However, {end_date} was applied.') from None
self._tracker_dict[name] = tracker
return self
def clear(self, name="Main", include_past=False, template="Main"):
"""
Clear phase information.
Args:
name (str): scenario name
include_past (bool): if True, past phases will be removed as well as future phases
template (str): name of template scenario
Returns:
covsirphy.Scenario: self
Note:
If un-registered scenario name was specified, new scenario will be created.
Future phases will be always deleted.
"""
tracker = self._tracker(name, template=template)
if include_past:
self[name] = tracker.delete_all()
else:
self[name] = tracker.delete(phases=tracker.future_phases()[0])
return self
def _delete_series(self, name):
"""
Delete a scenario or initialise main scenario.
Args:
name (str): name of phase series
Returns:
covsirphy.Scenario: self
"""
if name == self.MAIN:
self[self.MAIN] = self._tracker(self.MAIN).delete_all()
else:
self._tracker_dict.pop(name)
return self
def delete(self, phases=None, name="Main"):
"""
Delete phases.
Args:
phase (list[str] or None): phase names, or ['last']
name (str): name of phase series
Returns:
covsirphy.Scenario: self
Note:
If @phases is None, the phase series will be deleted.
When @phase is '0th', disable 0th phase. 0th phase will not be deleted.
If the last phase is included in @phases, the dates will be released from phases.
If the last phase is not included, the dates will be assigned to the previous phase.
"""
# Clear main series or delete sub phase series
if phases is None:
return self._delete_series(name)
# Delete phases
tracker = self._tracker(name)
self[name] = tracker.delete(phases=phases)
return self
def disable(self, phases, name="Main"):
"""
The phases will be disabled and removed from summary.
Args:
phase (list[str] or None): phase names or None (all enabled phases)
name (str): scenario name
Returns:
covsirphy.Scenario: self
"""
self[name] = self._tracker(name).disable(phases)
return self
def enable(self, phases, name="Main"):
"""
The phases will be enabled and appear in summary.
Args:
phase (list[str] or None): phase names or None (all disabled phases)
name (str): scenario name
Returns:
covsirphy.Scenario: self
"""
self[name] = self._tracker(name).enable(phases)
return self
def combine(self, phases, name="Main", population=None, **kwargs):
"""
Combine the sequential phases as one phase.
New phase name will be automatically determined.
Args:
phases (list[str]): list of phases
name (str, optional): name of phase series
population (int): population value of the start date
kwargs: keyword arguments to save as phase information
Raises:
TypeError: @phases is not a list
Returns:
covsirphy.Scenario: self
"""
self[name] = self._tracker(name).combine(
phases=phases, population=population, **kwargs)
return self
def separate(self, date, name="Main", population=None, **kwargs):
"""
Create a new phase with the change point.
New phase name will be automatically determined.
Args:
date (str): change point, i.e. start date of the new phase
name (str): scenario name
population (int): population value of the change point
kwargs: keyword arguments of PhaseUnit.set_ode() if update is necessary
Returns:
covsirphy.Scenario: self
"""
self[name] = self._tracker(name).separate(
date=date, population=population, **kwargs)
return self
def _summary(self, name=None):
"""
Summarize the series of phases and return a dataframe.
Args:
name (str): phase series name
- name of alternative phase series registered by Scenario.add()
- if None, all phase series will be shown
Returns:
pandas.DataFrame:
- if @name not None, as the same as PhaseSeries().summary()
- if @name is None, index will be phase series name and phase name
Note:
If 'Main' was used as @name, main PhaseSeries will be used.
"""
if name is None:
if len(self._tracker_dict.keys()) > 1:
dataframes = []
for (_name, tracker) in self._tracker_dict.items():
summary_df = tracker.series.summary()
summary_df = summary_df.rename_axis(self.PHASE)
summary_df[self.SERIES] = _name
dataframes.append(summary_df.reset_index())
df = pd.concat(dataframes, ignore_index=True, sort=False)
return df.set_index([self.SERIES, self.PHASE])
name = self.MAIN
return self._tracker(name).series.summary()
def summary(self, columns=None, name=None):
"""
Summarize the series of phases and return a dataframe.
Args:
name (str): phase series name
- name of alternative phase series registered by Scenario.add()
- if None, all phase series will be shown
columns (list[str] or None): columns to show
Returns:
pandas.DataFrame:
- if @name not None, as the same as PhaseSeries().summary()
- if @name is None, index will be phase series name and phase name
Note:
If 'Main' was used as @name, main PhaseSeries will be used.
If @columns is None, all columns will be shown.
"""
df = self._summary(name=name)
all_cols = df.columns.tolist()
if set(self.EST_COLS).issubset(all_cols):
all_cols = [col for col in all_cols if col not in self.EST_COLS]
all_cols += self.EST_COLS
columns = columns or all_cols
self.ensure_list(columns, candidates=all_cols, name="columns")
df = df.loc[:, columns]
return df.dropna(how="all", axis=1).fillna(self.UNKNOWN)
def trend(self, force=True, name="Main", show_figure=True, filename=None, **kwargs):
"""
Perform S-R trend analysis and set phases.
Args:
force (bool): if True, change points will be over-written
name (str): phase series name
show_figure (bool): if True, show the result as a figure
filename (str): filename of the figure, or None (display)
kwargs: keyword arguments of ChangeFinder()
Returns:
covsirphy.Scenario: self
"""
# Arguments
if "n_points" in kwargs.keys():
raise ValueError(
"@n_points argument is un-necessary"
" because the number of change points will be automatically determined."
)
try:
include_init_phase = kwargs.pop("include_init_phase")
warnings.warn(
"@include_init_phase was deprecated. Please use Scenario.disable('0th').",
DeprecationWarning, stacklevel=2)
except KeyError:
include_init_phase = True
try:
force = kwargs.pop("set_phases")
except KeyError:
pass
# S-R trend analysis
tracker = self._tracker(name)
if not self._interactive and filename is None:
show_figure = False
filename = None if self._interactive else filename
self[name] = tracker.trend(
force=force, show_figure=show_figure, filename=filename, **kwargs)
# Disable 0th phase, if necessary
if not include_init_phase:
self[name] = tracker.disable(phases=["0th"])
return self
def estimate(self, model, phases=None, name="Main", n_jobs=-1, **kwargs):
"""
Perform parameter estimation for each phases.
Args:
model (covsirphy.ModelBase): ODE model
phases (list[str]): list of phase names, like 1st, 2nd...
name (str): phase series name
n_jobs (int): the number of parallel jobs or -1 (CPU count)
kwargs: keyword arguments of model parameters and covsirphy.Estimator.run()
Note:
- If 'Main' was used as @name, main PhaseSeries will be used.
- If @name phase was not registered, new PhaseSeries will be created.
- If @phases is None, all past phase will be used.
- Phases with estimated parameter values will be ignored.
- In kwargs, tau value cannot be included.
"""
if self.TAU in kwargs:
raise ValueError(
"@tau must be specified when scenario = Scenario(), and cannot be specified here.")
self.tau, self[name] = self._tracker(name).estimate(
model=model, phases=phases, n_jobs=n_jobs, **kwargs)
def phase_estimator(self, phase, name="Main"):
"""
Return the estimator of the phase.
Args:
phase (str): phase name, like 1st, 2nd...
name (str): phase series name
Return:
covsirphy.Estimator: estimator of the phase
"""
estimator = self._tracker_dict[name].series.unit(phase).estimator
if estimator is None:
raise UnExecutedError(
f'Scenario.estimate(model, phases=["{phase}"], name={name})')
return estimator
def estimate_history(self, phase, name="Main", **kwargs):
"""
Show the history of optimization.
Args:
phase (str): phase name, like 1st, 2nd...
name (str): phase series name
kwargs: keyword arguments of covsirphy.Estimator.history()
Note:
If 'Main' was used as @name, main PhaseSeries will be used.
"""
estimator = self.phase_estimator(phase=phase, name=name)
estimator.history(**kwargs)
def estimate_accuracy(self, phase, name="Main", **kwargs):
"""
Show the accuracy as a figure.
Args:
phase (str): phase name, like 1st, 2nd...
name (str): phase series name
kwargs: keyword arguments of covsirphy.Estimator.accuracy()
Note:
If 'Main' was used as @name, main PhaseSeries will be used.
"""
estimator = self.phase_estimator(phase=phase, name=name)
estimator.accuracy(**kwargs)
def simulate(self, variables=None, phases=None, name="Main", y0_dict=None, **kwargs):
"""
Simulate ODE models with set/estimated parameter values and show it as a figure.
Args:
variables (list[str] or None): variables to include, Infected/Fatal/Recovered when None
phases (list[str] or None): phases to shoe or None (all phases)
name (str): phase series name. If 'Main', main PhaseSeries will be used
y0_dict(dict[str, float] or None): dictionary of initial values of variables
kwargs: the other keyword arguments of Scenario.line_plot()
Returns:
pandas.DataFrame
Index:
reset index
Columns:
- Date (pd.TimeStamp): Observation date
- Country (str): country/region name
- Province (str): province/prefecture/state name
- Variables of the model and dataset (int): Confirmed etc.
"""
tracker = copy.deepcopy(self._tracker(name))
# Select phases
if phases is not None:
tracker.disable(phases=None)
tracker.enable(phases=phases)
# Simulation
try:
sim_df = tracker.simulate(y0_dict=y0_dict)
except UnExecutedError:
raise UnExecutedError(
"Scenario.trend() or Scenario.add(), and Scenario.estimate(model)") from None
# Show figure
df = sim_df.set_index(self.DATE)
fig_cols = self.ensure_list(
variables or [self.CI, self.F, self.R], candidates=df.columns.tolist(), name="variables")
title = f"{self.area}: Simulated number of cases ({name} scenario)"
self.line_plot(
df=df[fig_cols], title=title, y_integer=True, v=tracker.change_dates(), **kwargs)
return sim_df
def get(self, param, phase="last", name="Main"):
"""
Get the parameter value of the phase.
Args:
param (str): parameter name (columns in self.summary())
phase (str): phase name or 'last'
- if 'last', the value of the last phase will be returned
name (str): phase series name
Returns:
str or int or float
Note:
If 'Main' was used as @name, main PhaseSeries will be used.
"""
df = self.summary(name=name)
if param not in df.columns:
raise KeyError(f"@param must be in {', '.join(df.columns)}.")
if phase == "last":
phase = df.index[-1]
return df.loc[phase, param]
def _param_history(self, targets, name):
"""
Return the subset of summary dataframe to select the target of parameter history.
Args:
targets (list[str] or str): parameters to show (Rt etc.)
name (str): phase series name
Returns:
pandas.DataFrame: selected summary dataframe
Raises:
KeyError: targets are not in the columns of summary dataframe
"""
series = self._tracker_dict[name].series
model_set = {unit.model for unit in series}
model_set = model_set - set([None])
parameters = self.flatten([m.PARAMETERS for m in model_set])
day_params = self.flatten([m.DAY_PARAMETERS for m in model_set])
selectable_cols = [self.N, *parameters, self.RT, *day_params]
selectable_set = set(selectable_cols)
df = series.summary().replace(self.UNKNOWN, None)
if not selectable_set.issubset(df.columns):
raise UnExecutedError(
f'Scenario.estimate(model, phases=None, name="{name}")')
targets = [targets] if isinstance(targets, str) else targets
targets = targets or selectable_cols
if not set(targets).issubset(selectable_set):
raise KeyError(
f"@targets must be selected from {', '.join(selectable_cols)}."
)
df = df.loc[:, targets].dropna(how="any", axis=0)
return df.astype(np.float64)
@deprecate(
old="Scenario.param_history(targets: list)",
new="Scenario.history(target: str)",
version="2.7.3-alpha")
def param_history(self, targets=None, name="Main", divide_by_first=True,
show_figure=True, filename=None, show_box_plot=True, **kwargs):
"""
Return subset of summary and show a figure to show the history.
Args:
targets (list[str] or str): parameters to show (Rt etc.)
name (str): phase series name
divide_by_first (bool): if True, divide the values by 1st phase's values
show_box_plot (bool): if True, box plot. if False, line plot
show_figure (bool): If True, show the result as a figure
filename (str): filename of the figure, or None (show figure)
kwargs: keyword arguments of pd.DataFrame.plot or line_plot()
Returns:
pandas.DataFrame
Note:
If 'Main' was used as @name, main PhaseSeries will be used.
"""
self._tracker(name)
# Select target to show
df = self._param_history(targets, name)
# Divide by the first phase parameters
if divide_by_first:
df = df / df.iloc[0, :]
title = f"{self.area}: Ratio to 1st phase parameters ({name} scenario)"
else:
title = f"{self.area}: History of parameter values ({name} scenario)"
if not show_figure:
return df
if show_box_plot:
h_values = [1.0] if divide_by_first or self.RT in targets else None
box_plot(df, title, h=h_values, filename=filename)
return df
_df = df.reset_index(drop=True)
_df.index = _df.index + 1
h = 1.0 if divide_by_first else None
line_plot(
_df, title=title,
xlabel="Phase", ylabel=str(), math_scale=False, h=h,
filename=filename
)
return df
def _describe(self, y0_dict=None):
"""
Describe representative values.
Args:
y0_dict (dict or None): dictionary of initial values or None
- key (str): variable name
- value (float): initial value
Returns:
pandas.DataFrame
Index:
(int): scenario name
Columns:
- max(Infected): max value of Infected
- argmax(Infected): the date when Infected shows max value
- Confirmed({date}): Confirmed on the next date of the last phase
- Infected({date}): Infected on the next date of the last phase
- Fatal({date}): Fatal on the next date of the last phase
"""
_dict = {}
for (name, _) in self._tracker_dict.items():
# Predict the number of cases
df = self.simulate(name=name, y0_dict=y0_dict, show_figure=False)
df = df.set_index(self.DATE)
cols = df.columns[:]
last_date = df.index[-1]
# Max value of Infected
max_ci = df[self.CI].max()
argmax_ci = df[self.CI].idxmax().strftime(self.DATE_FORMAT)
# Confirmed on the next date of the last phase
last_c = df.loc[last_date, self.C]
# Infected on the next date of the last phase
last_ci = df.loc[last_date, self.CI]
# Fatal on the next date of the last phase
last_f = df.loc[last_date, self.F] if self.F in cols else None
# Save representative values
last_date_str = last_date.strftime(self.DATE_FORMAT)
_dict[name] = {
f"max({self.CI})": max_ci,
f"argmax({self.CI})": argmax_ci,
f"{self.C} on {last_date_str}": last_c,
f"{self.CI} on {last_date_str}": last_ci,
f"{self.F} on {last_date_str}": last_f,
}
return | pd.DataFrame.from_dict(_dict, orient="index") | pandas.DataFrame.from_dict |
import psycopg2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import display
from wordcloud import WordCloud, ImageColorGenerator
from sklearn.feature_extraction import text
from sklearn.decomposition import LatentDirichletAllocation as LDA
from sklearn.preprocessing import MinMaxScaler
from psycopg2.extensions import register_adapter, AsIs
from PIL import Image
topic_names = [
'climate change strategy',
'water efficiency',
'energy efficiency',
'carbon intensity',
'environmental management system',
'equal opportunities',
'human rights',
'customer responsibility',
'health and safety',
'support community',
'business ethics',
'compliance',
'shareholder democracy',
'socially responsible',
'executive compensation'
]
def get_dataframe_from_database(conn):
select = '''
SELECT name, r.statement, r.lemma FROM reports r
LEFT JOIN company c ON c.ticker = r.company_id;
'''
return pd.io.sql.read_sql_query(select,conn)
def get_stop_words(df):
"""
removing specific keywords, company names as well as
english stop words not to be used in topic modelling
"""
sp_stop_words = [
'plc', 'group', 'target',
'track', 'capital', 'holding',
'annualreport', 'esg', 'bank',
'report','long', 'make',
'table','content', 'wells', 'fargo', 'nxp',
'letter', 'ceo', 'about', 'united', 'states', 'scope'
]
for name in df.name.unique():
for word in name.split():
sp_stop_words.append(word.lower())
return text.ENGLISH_STOP_WORDS.union(sp_stop_words)
def corpus_wide_term_frequencies(df, stop_words,image_file):
"""
create a word cloud for the whole corpus displaying the most frequent terms
using a given back ground image
"""
large_string = ' '.join(df.lemma)
mask = np.array(Image.open(image_file))
font = 'arial.ttf'
colors = ImageColorGenerator(mask)
word_cloud = WordCloud(font_path = font,
background_color="#effbf9",
mask = mask,
max_words=5000,
width=900,
height=700,
stopwords=stop_words,
color_func=colors,
contour_width=1,
contour_color='white'
)
plt.figure(figsize=(20,20))
word_cloud.generate(large_string)
plt.imshow(word_cloud, interpolation='bilinear')
plt.axis("off")
return plt
def bigram_analysis(df, stop_words):
"""
using Tf-Idf vectorization to find the most frequent bigrams in the corpus
"""
bigram_tf_idf_vectorizer = text.TfidfVectorizer(stop_words=stop_words, ngram_range=(2,2), min_df=10, use_idf=True)
bigram_tf_idf = bigram_tf_idf_vectorizer.fit_transform(df.lemma)
words = bigram_tf_idf_vectorizer.get_feature_names()
total_counts = np.zeros(len(words))
for t in bigram_tf_idf:
total_counts += t.toarray()[0]
count_dict = (zip(words, total_counts))
count_dict = sorted(count_dict, key=lambda x:x[1], reverse=True)[0:10]
words = [w[0] for w in count_dict]
counts = [w[1] for w in count_dict]
x_pos = np.arange(len(words))
plt.figure(figsize=(15, 5))
plt.subplot(title='10 most common bi-grams')
sns.barplot(x_pos, counts, color = '#E9C46A') #palette='crest'
plt.xticks(x_pos, words, rotation=45)
plt.xlabel('bi-grams')
plt.ylabel('tfidf')
ax = plt.gca()
ax.set_facecolor('None')
plt.rcParams['figure.facecolor'] = 'None'
return plt
def top_words(model, feature_names, n_top_words):
rows = []
for topic_idx, topic in enumerate(model.components_):
message = ", ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])
rows.append(["Topic #%d: " % (topic_idx + 1), message])
return pd.DataFrame(rows, columns=['topic', 'keywords'])
def topic_modelling(df, stop_words, n_components=5): #ichanged here
"""
extract topics from the corpus using Latent Dirichlet Allocation model
"""
word_tf_vectorizer = text.CountVectorizer(stop_words=stop_words, ngram_range=(1,1))
word_tf = word_tf_vectorizer.fit_transform(df.lemma)
lda = LDA(random_state=42, n_components=n_components,learning_decay=0.3)
lda.fit(word_tf)
tf_feature_names = word_tf_vectorizer.get_feature_names()
return lda,tf_feature_names,word_tf
def word_cloud(model, tf_feature_names, index):
imp_words_topic=""
comp = model.components_[index]
vocab_comp = zip(tf_feature_names, comp)
sorted_words = sorted(vocab_comp, key = lambda x:x[1], reverse=True)[:50]
for word in sorted_words:
imp_words_topic = imp_words_topic + " " + word[0]
return WordCloud(
background_color="white",
width=600,
height=600,
contour_width=3,
contour_color='steelblue'
).generate(imp_words_topic)
def display_topics(lda, tf_feature_names):
topics = len(lda.components_)
fig = plt.figure(figsize=(20, 20 * topics / 5))
for i, topic in enumerate(lda.components_):
ax = fig.add_subplot(topics, 3, i + 1)
ax.set_title(topic_names[i], fontsize=20)
wordcloud = word_cloud(lda, tf_feature_names, i)
ax.imshow(wordcloud)
ax.set_facecolor('None')
ax.axis('off')
return plt
def attach_topic_distribution(df, lda,word_tf):
transformed = lda.transform(word_tf)
a = [np.argmax(distribution) for distribution in transformed]
b = [np.max(distribution) for distribution in transformed]
df2 = pd.DataFrame(zip(a,b,transformed), columns=['topic', 'probability', 'probabilities'])
return pd.concat([df, df2], axis=1)
def clear_database(conn):
cursor = conn.cursor()
cursor.execute('''DELETE FROM topics;''')
conn.commit()
cursor.close()
return
def insert_topic_names(conn, topics):
cursor = conn.cursor()
for topic in topics:
cursor.execute('''INSERT INTO topics(topic) VALUES (%s);''', (topic,))
conn.commit()
cursor.close()
return
def update_database(conn,df):
cursor = conn.cursor()
create = '''CREATE TABLE IF NOT EXISTS reports_alt (
id SERIAL PRIMARY KEY,
company_id VARCHAR(5) NOT NULL,
statement TEXT,
lemma TEXT,
topic INTEGER,
probability NUMERIC,
probabilities NUMERIC[],
FOREIGN KEY (company_id)
REFERENCES company(ticker)
ON UPDATE CASCADE ON DELETE CASCADE
);'''
insert = '''
INSERT INTO reports_alt (company_id, statement, lemma, topic, probability, probabilities)
VALUES ((SELECT ticker FROM company WHERE LOWER(company.name) LIKE LOWER(%(name)s) LIMIT 1),
%(statement)s, %(lemma)s, %(topic)s, %(probability)s, ARRAY[%(probabilities)s]);
'''
drop = '''DROP TABLE reports;'''
alter = '''ALTER TABLE reports_alt RENAME TO reports;'''
cursor.execute(create)
for record in df.to_dict('records'):
cursor.mogrify(insert, record)
cursor.execute(insert, record)
cursor.execute(drop)
cursor.execute(alter)
conn.commit()
cursor.close()
return
def adapt_numpy_array(numpy_array):
list = numpy_array.tolist()
return AsIs(list)
def compare_core_initiatives(df):
esg_focus = pd.crosstab(df.name, df.topic)
scaler = MinMaxScaler(feature_range = (0, 1))
esg_focus_norm = pd.DataFrame(scaler.fit_transform(esg_focus), columns=esg_focus.columns)
esg_focus_norm.index = esg_focus.index
sns.set(rc={'figure.figsize':(15,10)})
sns.heatmap(esg_focus_norm, annot=False, linewidths=.5, cmap='Blues')
return plt
def retrieve_key_statements_for_topic(df, topic, topic_idx):
"""
extracts the statements which are releveant to a given topic
"""
topic_discussions = df[df['topic'] == topic_idx]
topic_discussions = topic_discussions[topic_discussions['probability'] > 0.89]
topic_discussions = topic_discussions.sort_values('probability', ascending=False)
rows = []
for i, row in topic_discussions.iterrows():
rows.append([row['topic'], row.probability, row.statement])
return | pd.DataFrame(rows, columns=['topic', 'probability', 'statement']) | pandas.DataFrame |
import os
import html5lib
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from datetime import date, timedelta, datetime as dt
from pymongo import MongoClient
from itertools import cycle
import numpy as np
# from kdriver import RemoteDriverStartService
class RemoteDriverStartService():
options = webdriver.ChromeOptions()
# Set user app data to a new directory
options.add_argument("user-data-dir=C:\\Users\\Donley\\App Data\\Google\\Chrome\\Application\\User Data\\Kit")
options.add_experimental_option("Proxy", "null")
options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors"])
# Create a download path for external data sources as default:
options.add_experimental_option("prefs", {
"download.default_directory": r"C:\Users\Donley\Documents\GA_TECH\SUBMISSIONS\PROJECT2-CHALLENGE\data\external",
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True
}),
# Add those optional features to capabilities
caps = options.to_capabilities()
def start_driver(self):
return webdriver.Remote(command_executor='http://127.0.0.1:4444',
desired_capabilities=self.caps)
# Connect to MongoDB
client = MongoClient("mongodb://localhost:27017")
db = client['investopedia']
def invsto_scrape():
# Set class equal to new capabilities:
DesiredCapabilities = RemoteDriverStartService()
# Create variables for scraping:
investo = "https://www.investopedia.com/top-communications-stocks-4583180"
# Download data to paths, csv's, json, etc:
# for external data sources
external = "../data/external/"
# for processed data sources with ID's
processed = "../data/processed/"
# Locate Driver in system
current_path = os.getcwd()
# save the .exe file under the same directory of the web-scrape python script.
Path = os.path.join(current_path, "chromedriver")
# Initialize Chrome driver and start browser session controlled by automated test software under Kit profile.
caps = webdriver.DesiredCapabilities.CHROME.copy()
caps['acceptInsecureCerts'] = True
# caps = webdriver.DesiredCapabilities.CHROME.copy()
# caps['acceptInsecureCerts'] = True
# driver = webdriver.Chrome(options=options, desired_capabilities=caps)
driver = webdriver.Chrome(executable_path='chromedriver', desired_capabilities=caps)
##Step 3: Find the IDs of the items we want to scrape for [5]
# Start Grabbing Information from investopedia:
driver.get(investo)
driver.maximize_window()
timeout = 30
# Find an ID on the page and wait before executing anything until found:
try:
WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.ID, "main_1-0")))
except TimeoutException:
driver.quit()
##Step 5: The full code that runs the scraper and save the data to .csv files
itable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
itables = pd.read_html(itable)
communications_bv = itables[0]
communications_bv.columns = ["Communictaions Best Value", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
communications_bv
# Locate column containing ticker symbols:
communications_bv_df = communications_bv.iloc[1:]
# Only keep tick information within parentheses:
communications_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in communications_bv_df["Communictaions Best Value"]]
communications_bv_ticks
communications_fg = itables[1]
communications_fg.columns = ["Communications Fastest Growing", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
communications_fg_df = communications_fg.iloc[1:]
communications_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in communications_fg_df["Communications Fastest Growing"]]
communications_fg_ticks
communications_mm = itables[2]
communications_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
communications_mm_df = communications_mm.iloc[1:]
communications_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in communications_mm_df["Communications Most Momentum"]]
del communications_mm_ticks[-2:]
communications_mm_ticks
discretionary = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(3) > a')
discretionary
discretionary[0].click()
dtable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
dtables = pd.read_html(dtable)
discretionary_bv = dtables[0]
discretionary_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
discretionary_bv
# Locate column containing ticker symbols:
discretionary_bv_df = discretionary_bv.iloc[1:]
# Only keep tick information within parentheses:
discretionary_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in discretionary_bv_df["tick"]]
discretionary_bv_ticks
discretionary_fg = dtables[1]
discretionary_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
discretionary_fg_df = discretionary_fg.iloc[1:]
discretionary_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in discretionary_fg_df["stock"]]
discretionary_fg_ticks
discretionary_mm = itables[2]
discretionary_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
discretionary_mm_df = discretionary_mm.iloc[1:]
discretionary_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in discretionary_mm_df["Communications Most Momentum"]]
del discretionary_mm_ticks[-2:]
discretionary_mm_ticks
staples = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(4) > a')
staples[0].click()
stable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
stables = pd.read_html(stable)
staples_bv = stables[0]
staples_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
staples_bv
# Locate column containing ticker symbols:
staples_bv_df = staples_bv.iloc[1:]
# Only keep tick information within parentheses:
staples_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in staples_bv_df["tick"]]
staples_bv_ticks
staples_fg = stables[1]
staples_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
staples_fg_df = staples_fg.iloc[1:]
staples_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in staples_fg_df["stock"]]
staples_fg_ticks
staples_mm = stables[2]
staples_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
staples_mm_df = staples_mm.iloc[1:]
staples_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in staples_mm_df["Communications Most Momentum"]]
del staples_mm_ticks[-2:]
staples_mm_ticks
energy = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(5) > a')
energy[0].click()
etable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
etables = pd.read_html(etable)
energy_bv = etables[0]
energy_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
energy_bv
# Locate column containing ticker symbols:
energy_bv_df = energy_bv.iloc[1:]
# Only keep tick information within parentheses:
energy_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in energy_bv_df["tick"]]
energy_bv_ticks
energy_fg = etables[1]
energy_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
energy_fg_df = energy_fg.iloc[1:]
energy_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in energy_fg_df["stock"]]
energy_fg_ticks
energy_mm = etables[2]
energy_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
energy_mm_df = energy_mm.iloc[1:]
energy_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in energy_mm_df["Communications Most Momentum"]]
del energy_mm_ticks[-2:]
energy_mm_ticks
financial = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(6) > a')
financial[0].click()
ftable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
ftables = pd.read_html(ftable)
financial_bv = ftables[0]
financial_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
financial_bv
# Locate column containing ticker symbols:
financial_bv_df = financial_bv.iloc[1:]
# Only keep tick information within parentheses:
financial_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in financial_bv_df["tick"]]
financial_bv_ticks
financial_fg = ftables[1]
financial_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
financial_fg_df = financial_fg.iloc[1:]
financial_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in financial_fg_df["stock"]]
financial_fg_ticks
financial_mm = itables[2]
financial_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
financial_mm_df = financial_mm.iloc[1:]
financial_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in financial_mm_df["Communications Most Momentum"]]
del financial_mm_ticks[-2:]
financial_mm_ticks
healthcare = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(7) > a')
healthcare[0].click()
htable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
htables = pd.read_html(htable)
healthcare_bv = htables[0]
healthcare_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
healthcare_bv
# Locate column containing ticker symbols:
healthcare_bv_df = healthcare_bv.iloc[1:]
# Only keep tick information within parentheses:
healthcare_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in healthcare_bv_df["tick"]]
healthcare_bv_ticks
healthcare_fg = htables[1]
healthcare_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
healthcare_fg_df = healthcare_fg.iloc[1:]
healthcare_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in healthcare_fg_df["stock"]]
healthcare_fg_ticks
healthcare_mm = htables[2]
healthcare_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
healthcare_mm_df = healthcare_mm.iloc[1:]
healthcare_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in healthcare_mm_df["Communications Most Momentum"]]
del healthcare_mm_ticks[-2:]
healthcare_mm_ticks
industrial = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(8) > a')
industrial[0].click()
intable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
intables = pd.read_html(intable)
industrial_bv = intables[0]
industrial_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
industrial_bv
# Locate column containing ticker symbols:
industrial_bv_df = industrial_bv.iloc[1:]
# Only keep tick information within parentheses:
industrial_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in industrial_bv_df["tick"]]
industrial_bv_ticks
industrial_fg = intables[1]
industrial_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
industrial_fg_df = industrial_fg.iloc[1:]
industrial_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in industrial_fg_df["stock"]]
industrial_fg_ticks
industrial_mm = intables[2]
industrial_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
industrial_mm_df = industrial_mm.iloc[1:]
industrial_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in industrial_mm_df["Communications Most Momentum"]]
del industrial_mm_ticks[-2:]
industrial_mm_ticks
materials = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(9) > a')
materials[0].click()
motable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
motables = pd.read_html(motable)
materials_bv = motables[0]
materials_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
materials_bv
# Locate column containing ticker symbols:
materials_bv_df = discretionary_bv.iloc[1:]
# Only keep tick information within parentheses:
materials_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in materials_bv_df["tick"]]
materials_bv_ticks
materials_fg = motables[1]
materials_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
materials_fg_df = materials_fg.iloc[1:]
materials_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in materials_fg_df["stock"]]
materials_fg_ticks
materials_mm = motables[2]
materials_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
materials_mm_df = materials_mm.iloc[1:]
materials_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in materials_mm_df["Communications Most Momentum"]]
del materials_mm_ticks[-2:]
materials_mm_ticks
real_estate = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(10) > a')
real_estate[0].click()
retable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
retables = | pd.read_html(retable) | pandas.read_html |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import operator
import functools
from contextlib import contextmanager
from numbers import Integral
import numpy as np
import pandas as pd
from pandas.api.types import is_string_dtype
from pandas.core.dtypes.cast import find_common_type
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pass
from ..core import Entity, ExecutableTuple
from ..lib.mmh3 import hash as mmh_hash
from ..tensor.utils import dictify_chunk_size, normalize_chunk_sizes
from ..utils import tokenize, sbytes
def hash_index(index, size):
def func(x, size):
return mmh_hash(sbytes(x)) % size
f = functools.partial(func, size=size)
idx_to_grouped = index.groupby(index.map(f))
return [idx_to_grouped.get(i, list()) for i in range(size)]
def hash_dataframe_on(df, on, size, level=None):
if on is None:
idx = df.index
if level is not None:
idx = idx.to_frame(False)[level]
hashed_label = pd.util.hash_pandas_object(idx, categorize=False)
elif callable(on):
# todo optimization can be added, if ``on`` is a numpy ufunc or sth can be vectorized
hashed_label = pd.util.hash_pandas_object(df.index.map(on), categorize=False)
else:
if isinstance(on, list):
to_concat = []
for v in on:
if isinstance(v, pd.Series):
to_concat.append(v)
else:
to_concat.append(df[v])
data = pd.concat(to_concat, axis=1)
else:
data = df[on]
hashed_label = pd.util.hash_pandas_object(data, index=False, categorize=False)
idx_to_grouped = df.index.groupby(hashed_label % size)
return [idx_to_grouped.get(i, pd.Index([])).unique() for i in range(size)]
def hash_dtypes(dtypes, size):
hashed_indexes = hash_index(dtypes.index, size)
return [dtypes[index] for index in hashed_indexes]
def sort_dataframe_inplace(df, *axis):
for ax in axis:
df.sort_index(axis=ax, inplace=True)
return df
def _get_range_index_start(pd_range_index):
try:
return pd_range_index.start
except AttributeError: # pragma: no cover
return pd_range_index._start
def _get_range_index_stop(pd_range_index):
try:
return pd_range_index.stop
except AttributeError: # pragma: no cover
return pd_range_index._stop
def _get_range_index_step(pd_range_index):
try:
return pd_range_index.step
except AttributeError: # pragma: no cover
return pd_range_index._step
def is_pd_range_empty(pd_range_index):
start, stop, step = _get_range_index_start(pd_range_index), \
_get_range_index_stop(pd_range_index), \
_get_range_index_step(pd_range_index)
return (start >= stop and step >= 0) or (start <= stop and step < 0)
def decide_dataframe_chunk_sizes(shape, chunk_size, memory_usage):
"""
Decide how a given DataFrame can be split into chunk.
:param shape: DataFrame's shape
:param chunk_size: if dict provided, it's dimension id to chunk size;
if provided, it's the chunk size for each dimension.
:param memory_usage: pandas Series in which each column's memory usage
:type memory_usage: pandas.Series
:return: the calculated chunk size for each dimension
:rtype: tuple
"""
from ..config import options
chunk_size = dictify_chunk_size(shape, chunk_size)
average_memory_usage = memory_usage / shape[0]
nleft = len(shape) - len(chunk_size)
if nleft < 0:
raise ValueError("chunks have more than two dimensions")
if nleft == 0:
return normalize_chunk_sizes(shape, tuple(chunk_size[j] for j in range(len(shape))))
max_chunk_size = options.chunk_store_limit
# for the row side, along axis 0
if 0 not in chunk_size:
row_chunk_size = []
row_left_size = shape[0]
else:
row_chunk_size = normalize_chunk_sizes((shape[0],), (chunk_size[0],))[0]
row_left_size = -1
# for the column side, along axis 1
if 1 not in chunk_size:
col_chunk_size = []
col_chunk_store = []
col_left_size = shape[1]
else:
col_chunk_size = normalize_chunk_sizes((shape[1],), (chunk_size[1],))[0]
acc = [0] + np.cumsum(col_chunk_size).tolist()
col_chunk_store = [average_memory_usage[acc[i]: acc[i + 1]].sum()
for i in range(len(col_chunk_size))]
col_left_size = -1
while True:
nbytes_occupied = np.prod([max(it) for it in (row_chunk_size, col_chunk_store) if it])
dim_size = np.maximum(int(np.power(max_chunk_size / nbytes_occupied, 1 / float(nleft))), 1)
if col_left_size == 0:
col_chunk_size.append(0)
if row_left_size == 0:
row_chunk_size.append(0)
# check col first
if col_left_size > 0:
cs = min(col_left_size, dim_size)
col_chunk_size.append(cs)
start = int(np.sum(col_chunk_size[:-1]))
col_chunk_store.append(average_memory_usage.iloc[start: start + cs].sum())
col_left_size -= cs
if row_left_size > 0:
max_col_chunk_store = max(col_chunk_store)
cs = min(row_left_size, int(max_chunk_size / max_col_chunk_store))
row_chunk_size.append(cs)
row_left_size -= cs
if col_left_size <= 0 and row_left_size <= 0:
break
return tuple(row_chunk_size), tuple(col_chunk_size)
def decide_series_chunk_size(shape, chunk_size, memory_usage):
from ..config import options
chunk_size = dictify_chunk_size(shape, chunk_size)
average_memory_usage = memory_usage / shape[0] if shape[0] != 0 else memory_usage
if len(chunk_size) == len(shape):
return normalize_chunk_sizes(shape, chunk_size[0])
max_chunk_size = options.chunk_store_limit
series_chunk_size = max_chunk_size / average_memory_usage
return normalize_chunk_sizes(shape, int(series_chunk_size))
def parse_index(index_value, *args, store_data=False, key=None):
from .core import IndexValue
def _extract_property(index, tp, ret_data):
kw = {
'_min_val': _get_index_min(index),
'_max_val': _get_index_max(index),
'_min_val_close': True,
'_max_val_close': True,
'_key': key or _tokenize_index(index, *args),
}
if ret_data:
kw['_data'] = index.values
for field in tp._FIELDS:
if field in kw or field == '_data':
continue
val = getattr(index, field.lstrip('_'), None)
if val is not None:
kw[field] = val
return kw
def _tokenize_index(index, *token_objects):
if not index.empty:
return tokenize(index)
else:
return tokenize(index, *token_objects)
def _get_index_min(index):
try:
return index.min()
except ValueError:
if isinstance(index, pd.IntervalIndex):
return None
raise
except TypeError:
return None
def _get_index_max(index):
try:
return index.max()
except ValueError:
if isinstance(index, pd.IntervalIndex):
return None
raise
except TypeError:
return None
def _serialize_index(index):
tp = getattr(IndexValue, type(index).__name__)
properties = _extract_property(index, tp, store_data)
return tp(**properties)
def _serialize_range_index(index):
if is_pd_range_empty(index):
properties = {
'_is_monotonic_increasing': True,
'_is_monotonic_decreasing': False,
'_is_unique': True,
'_min_val': _get_index_min(index),
'_max_val': _get_index_max(index),
'_min_val_close': True,
'_max_val_close': False,
'_key': key or _tokenize_index(index, *args),
'_name': index.name,
'_dtype': index.dtype,
}
else:
properties = _extract_property(index, IndexValue.RangeIndex, False)
return IndexValue.RangeIndex(_slice=slice(_get_range_index_start(index),
_get_range_index_stop(index),
_get_range_index_step(index)),
**properties)
def _serialize_multi_index(index):
kw = _extract_property(index, IndexValue.MultiIndex, store_data)
kw['_sortorder'] = index.sortorder
kw['_dtypes'] = [lev.dtype for lev in index.levels]
return IndexValue.MultiIndex(**kw)
if index_value is None:
return IndexValue(_index_value=IndexValue.Index(
_is_monotonic_increasing=False,
_is_monotonic_decreasing=False,
_is_unique=False,
_min_val=None,
_max_val=None,
_min_val_close=True,
_max_val_close=True,
_key=key or tokenize(*args),
))
if isinstance(index_value, pd.RangeIndex):
return IndexValue(_index_value=_serialize_range_index(index_value))
elif isinstance(index_value, pd.MultiIndex):
return IndexValue(_index_value=_serialize_multi_index(index_value))
else:
return IndexValue(_index_value=_serialize_index(index_value))
def gen_unknown_index_value(index_value, *args):
pd_index = index_value.to_pandas()
if isinstance(pd_index, pd.RangeIndex):
return parse_index(pd.RangeIndex(-1), *args)
elif not isinstance(pd_index, pd.MultiIndex):
return parse_index(pd.Index([], dtype=pd_index.dtype), *args)
else:
i = pd.MultiIndex.from_arrays([c[:0] for c in pd_index.levels],
names=pd_index.names)
return parse_index(i, *args)
def split_monotonic_index_min_max(left_min_max, left_increase, right_min_max, right_increase):
"""
Split the original two min_max into new min_max. Each min_max should be a list
in which each item should be a 4-tuple indicates that this chunk's min value,
whether the min value is close, the max value, and whether the max value is close.
The return value would be a nested list, each item is a list
indicates that how this chunk should be split into.
:param left_min_max: the left min_max
:param left_increase: if the original data of left is increased
:param right_min_max: the right min_max
:param right_increase: if the original data of right is increased
:return: nested list in which each item indicates how min_max is split
>>> left_min_max = [(0, True, 3, True), (4, True, 8, True), (12, True, 18, True),
... (20, True, 22, True)]
>>> right_min_max = [(2, True, 6, True), (7, True, 9, True), (10, True, 14, True),
... (18, True, 19, True)]
>>> l, r = split_monotonic_index_min_max(left_min_max, True, right_min_max, True)
>>> l
[[(0, True, 2, False), (2, True, 3, True)], [(3, False, 4, False), (4, True, 6, True), (6, False, 7, False),
(7, True, 8, True)], [(8, False, 9, True), (10, True, 12, False), (12, True, 14, True), (14, False, 18, False),
(18, True, 18, True)], [(18, False, 19, True), [20, True, 22, True]]]
>>> r
[[(0, True, 2, False), (2, True, 3, True), (3, False, 4, False), (4, True, 6, True)],
[(6, False, 7, False), (7, True, 8, True), (8, False, 9, True)], [(10, True, 12, False), (12, True, 14, True)],
[(14, False, 18, False), (18, True, 18, True), (18, False, 19, True), [20, True, 22, True]]]
"""
left_idx_to_min_max = [[] for _ in left_min_max]
right_idx_to_min_max = [[] for _ in right_min_max]
left_curr_min_max = list(left_min_max[0])
right_curr_min_max = list(right_min_max[0])
left_curr_idx = right_curr_idx = 0
left_terminate = right_terminate = False
while not left_terminate or not right_terminate:
if left_terminate:
left_idx_to_min_max[left_curr_idx].append(tuple(right_curr_min_max))
right_idx_to_min_max[right_curr_idx].append(tuple(right_curr_min_max))
if right_curr_idx + 1 >= len(right_min_max):
right_terminate = True
else:
right_curr_idx += 1
right_curr_min_max = list(right_min_max[right_curr_idx])
elif right_terminate:
right_idx_to_min_max[right_curr_idx].append(tuple(left_curr_min_max))
left_idx_to_min_max[left_curr_idx].append(tuple(left_curr_min_max))
if left_curr_idx + 1 >= len(left_min_max):
left_terminate = True
else:
left_curr_idx += 1
left_curr_min_max = list(left_min_max[left_curr_idx])
elif left_curr_min_max[0] < right_curr_min_max[0]:
# left min < right min
right_min = [right_curr_min_max[0], not right_curr_min_max[1]]
max_val = min(left_curr_min_max[2:], right_min)
assert len(max_val) == 2
min_max = (left_curr_min_max[0], left_curr_min_max[1],
max_val[0], max_val[1])
left_idx_to_min_max[left_curr_idx].append(min_max)
right_idx_to_min_max[right_curr_idx].append(min_max)
if left_curr_min_max[2:] == max_val:
# left max < right min
if left_curr_idx + 1 >= len(left_min_max):
left_terminate = True
else:
left_curr_idx += 1
left_curr_min_max = list(left_min_max[left_curr_idx])
else:
# from left min(left min close) to right min(exclude right min close)
left_curr_min_max[:2] = right_curr_min_max[:2]
elif left_curr_min_max[0] > right_curr_min_max[0]:
# left min > right min
left_min = [left_curr_min_max[0], not left_curr_min_max[1]]
max_val = min(right_curr_min_max[2:], left_min)
min_max = (right_curr_min_max[0], right_curr_min_max[1],
max_val[0], max_val[1])
left_idx_to_min_max[left_curr_idx].append(min_max)
right_idx_to_min_max[right_curr_idx].append(min_max)
if right_curr_min_max[2:] == max_val:
# right max < left min
if right_curr_idx + 1 >= len(right_min_max):
right_terminate = True
else:
right_curr_idx += 1
right_curr_min_max = list(right_min_max[right_curr_idx])
else:
# from left min(left min close) to right min(exclude right min close)
right_curr_min_max[:2] = left_curr_min_max[:2]
else:
# left min == right min
max_val = min(left_curr_min_max[2:], right_curr_min_max[2:])
assert len(max_val) == 2
min_max = (left_curr_min_max[0], left_curr_min_max[1], max_val[0], max_val[1])
left_idx_to_min_max[left_curr_idx].append(min_max)
right_idx_to_min_max[right_curr_idx].append(min_max)
if max_val == left_curr_min_max[2:]:
if left_curr_idx + 1 >= len(left_min_max):
left_terminate = True
else:
left_curr_idx += 1
left_curr_min_max = list(left_min_max[left_curr_idx])
else:
left_curr_min_max[:2] = max_val[0], not max_val[1]
if max_val == right_curr_min_max[2:]:
if right_curr_idx + 1 >= len(right_min_max):
right_terminate = True
else:
right_curr_idx += 1
right_curr_min_max = list(right_min_max[right_curr_idx])
else:
right_curr_min_max[:2] = max_val[0], not max_val[1]
if left_increase is False:
left_idx_to_min_max = list(reversed(left_idx_to_min_max))
if right_increase is False:
right_idx_to_min_max = list(reversed(right_idx_to_min_max))
return left_idx_to_min_max, right_idx_to_min_max
def build_split_idx_to_origin_idx(splits, increase=True):
# splits' len is equal to the original chunk size on a specified axis,
# splits is sth like [[(0, True, 2, True), (2, False, 3, True)]]
# which means there is one input chunk, and will be split into 2 out chunks
# in this function, we want to build a new dict from the out chunk index to
# the original chunk index and the inner position, like {0: (0, 0), 1: (0, 1)}
if increase is False:
splits = list(reversed(splits))
out_idx = itertools.count(0)
res = dict()
for origin_idx, _ in enumerate(splits):
for pos in range(len(splits[origin_idx])):
if increase is False:
o_idx = len(splits) - origin_idx - 1
else:
o_idx = origin_idx
res[next(out_idx)] = o_idx, pos
return res
def _generate_value(dtype, fill_value):
# special handle for datetime64 and timedelta64
dispatch = {
np.datetime64: pd.Timestamp,
np.timedelta64: pd.Timedelta,
pd.CategoricalDtype.type: lambda x: pd.CategoricalDtype([x]),
# for object, we do not know the actual dtype,
# just convert to str for common usage
np.object_: lambda x: str(fill_value),
}
# otherwise, just use dtype.type itself to convert
convert = dispatch.get(dtype.type, dtype.type)
return convert(fill_value)
def build_empty_df(dtypes, index=None):
columns = dtypes.index
# duplicate column may exist,
# so use RangeIndex first
df = pd.DataFrame(columns=pd.RangeIndex(len(columns)), index=index)
length = len(index) if index is not None else 0
for i, d in enumerate(dtypes):
df[i] = pd.Series([_generate_value(d, 1) for _ in range(length)],
dtype=d, index=index)
df.columns = columns
return df
def build_df(df_obj, fill_value=1, size=1):
empty_df = build_empty_df(df_obj.dtypes, index=df_obj.index_value.to_pandas()[:0])
dtypes = empty_df.dtypes
record = [_generate_value(dtype, fill_value) for dtype in dtypes]
if isinstance(empty_df.index, pd.MultiIndex):
index = tuple(_generate_value(level.dtype, fill_value) for level in empty_df.index.levels)
empty_df = empty_df.reindex(
index= | pd.MultiIndex.from_tuples([index], names=empty_df.index.names) | pandas.MultiIndex.from_tuples |
import pandas as pd
import numpy as np
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
# https://www.nomisweb.co.uk/query
###
# Assemble joint distribution of: region - sex - age - ethnicity
###
census_11_male_white = pd.read_csv(script_dir + '/male_white.csv')
census_11_male_asian = pd.read_csv(script_dir + '/male_asian.csv')
census_11_male_black = pd.read_csv(script_dir + '/male_black.csv')
census_11_male_mixed = pd.read_csv(script_dir + '/male_mixed.csv')
census_11_male_other = | pd.read_csv(script_dir + '/male_other.csv') | pandas.read_csv |
import pytplot
import pandas as pd
import copy
def spec_mult(tvar,new_tvar=None):
"""
Multiplies the data by the stored spectrogram bins and created a new tplot variable
.. note::
This analysis routine assumes the data is no more than 2 dimensions. If there are more, they may become flattened!
Parameters:
tvar : str
Name of tplot variable
times : int/list
Desired times for interpolation.
new_tvar : str
Name of new tvar in which to store interpolated data. If none is specified, a name will be created.
Returns:
None
Examples:
>>> pytplot.store_data('h', data={'x':[0,4,8,12,16,19,21], 'y':[[8,1,1],[100,2,3],[4,2,47],[4,39,5],[5,5,99],[6,6,25],[7,-2,-5]],'v':[[1,1,50],[2,2,3],[100,4,47],[4,90,5],[5,5,99],[6,6,25],[7,7,-5]]})
>>> pytplot.spec_mult('h','h_specmult')
>>> print(pytplot.data_quants['h_specmult'].data)
"""
if new_tvar is None:
new_tvar = tvar+'_specmult'
if 'spec_bins' not in pytplot.data_quants[tvar].coords:
print("Specified variable must have spec bins stored. Returning...")
return
d, s = pytplot.tplot_utilities.convert_tplotxarray_to_pandas_dataframe(tvar)
dataframe = d.values
specframe = s.values
new_df = | pd.DataFrame(dataframe*specframe, columns=d.columns, index=d.index) | pandas.DataFrame |
# Import the class
import kmapper as km
import pandas
import sklearn
import numpy
import matplotlib.pyplot as plt
#========== Define Data and Labels here==========
b_data=pandas.read_csv("./../Results/bronchieactasis_data.csv",index_col=0)
c_data=pandas.read_csv("./../Results/COPD.csv",index_col=0)
#=======Data creation merging=======
data=pandas.concat([b_data,c_data])
allergens=data.columns.values
labels_b=pandas.read_csv("./../../Bronchiectasis_Clinical_Metadata_V3.4.csv",index_col=0)
labels_c=pandas.read_csv("./../../COPD_Clinical_Metadata_V3.3_tpyxlsx.csv",index_col=0)
labels_b=labels_b[["BCOS"]]
labels_c=labels_c[["COPD_Comorbidities_CoExisting_Bronchiectasis"]]
#Create new column names and concat
labels_b.columns=["y"]
labels_b=labels_b.replace({"No":0,"Yes":1})
labels_c.columns=["y"]
labels_c=labels_c.replace({"No":2,"Yes":1})
labels=pandas.concat([labels_b,labels_c])
#O - bronchieactasis, 1- BCOS and 2 - COPD
data=data.loc[labels.index,:].values
tooltip_s = numpy.array(
labels.index
)
# Initialize
mapper = km.KeplerMapper(verbose=1)
# Fit to and transform the data
#projected_data = mapper.fit_transform(data, projection="l2norm") # X-Y axis
#Can define custom lenses here
projected_data = numpy.linalg.norm(data,ord=2,axis=1,keepdims=True)
plt.hist(projected_data,bins=100)
plt.savefig("./../Results/lens_hist.png",dpi=300)
# Create dictionary called 'graph' with nodes, edges and meta-information
graph = mapper.map(projected_data, data, cover=km.Cover(n_cubes=10,perc_overlap=0.1),
clusterer=sklearn.cluster.KMeans(n_clusters=2, random_state=5454564))
#Color_Scale
colorscale_default = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
#Look up the below link - functions already written to convert color scales to the above format
#https://kepler-mapper.scikit-tda.org/en/latest/_modules/kmapper/visuals.html
# Visualize it
mapper.visualize(graph, path_html="./../Results/data_keplermapper_output.html", color_values=labels,color_function_name=["Bronchieactasis or BCOS or Copd"],title="",custom_tooltips=tooltip_s,colorscale=colorscale_default,
custom_meta={"Metadata":"you can add"},X=data,X_names=allergens,lens=projected_data,lens_names=["L2 norm"])
'''
mapper.visualize(graph, path_html="./../Results/data_keplermapper_output.html", color_values=labels.values,color_function_name=[i for i in labels.columns],title="",custom_tooltips=tooltip_s,colorscale=colorscale_default,
custom_meta={"Metadata":"you can add"},X=data,X_names=allergens,lens=projected_data,lens_names=["L2 norm"])
'''
#=====Parse graph object to original data to map back clusters=====
cluster= | pandas.Series(graph['nodes']) | pandas.Series |
"""
Genereate ablated modality images. One time use code.
Modality ablation experiment. Generate and save the ablated brats images
Generate dataset with
Save in the directory: Path(brats_path).parent / "ablated_brats", and can be loaded with the script:
T1 = os.path.join(image_path_list[0], bratsID, bratsID+'_t1.nii.gz') # (240, 240, 155)
T1c = os.path.join(image_path_list[1], bratsID, bratsID+'_t1ce.nii.gz')
T2 = os.path.join(image_path_list[2], bratsID, bratsID+'_t2.nii.gz')
FLAIR = os.path.join(image_path_list[3], bratsID, bratsID+'_flair.nii.gz')
For the original brats image, ablate it by filling in the non-zero value with random values ~ N(image_mean, image_std)
"""
import nibabel
import os
from pathlib import Path
import numpy as np
import pandas as pd
import itertools, math
from monai.data import write_nifti
from monai.transforms import LoadNifti
import monai
# from .heatmap_utils import get_heatmaps
from .heatmap_utlis import *
from scipy import stats
from scipy.stats import spearmanr as spr
from scipy.stats import kendalltau
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, recall_score, precision_score, confusion_matrix
import csv
import itertools, math
import copy
from validate import test
from datetime import datetime
# from skimage.morphology import binary_dilation
# print(monai.__version__)
from sklearn.metrics import auc, roc_curve
def generate_ablated_dataset(modalities = ["t1", "t1ce", "t2", "flair"], ablation_mode = 'allzero'):
"""
One time function to get and save the ablated modalities.
:param allzero: replace the modality with all zeros
:return:
"""
data_root = "/local-scratch/authorid/dld_data/brats2020/MICCAI_BraTS2020_TrainingData/all_tmp"
data_root = Path(data_root)
if ablation_mode == 'allzero': # ablate the whole modality, and replace with 0s
saved_path = data_root.parent / "zero_ablated_brats"
elif ablation_mode == 'allnoise': # ablate the whole modality, and replace with nontumor signal noises
saved_path = data_root.parent / "ablated_brats"
elif ablation_mode == 'lesionzero': # ablate the lesion only on the modality, and replace with 0s
saved_path = data_root.parent / "lesionzero"
# read brain MRI
ids = [f for f in os.listdir(data_root) if os.path.isdir(os.path.join(data_root, f))]
for id in ids:
seg_path = data_root / id / "{}_seg.nii.gz".format(id)
seg = nibabel.load(seg_path).get_fdata()
for m in modalities:
path = data_root/id / "{}_{}.nii.gz".format(id, m)
# mri = nibabel.load(path)
# img_data = mri.get_fdata()
loader = LoadNifti(image_only = False)
img_data, header = loader(path)
if ablation_mode == "allzero":
ablate_array = np.zeros(img_data.shape)
elif ablation_mode == 'allnoise':
ablate_array = ablate_signal(img_data, seg)
elif ablation_mode == 'lesionzero':
ablate_array = ablate_tumor_only(img_data, seg)
# nibabel.save(ablate_array, "{}_{}.nii.gz".format(id, m))
output_root = saved_path/id
output_root.mkdir(exist_ok=True, parents=True)
print(header['affine'], header['original_affine'])
write_nifti(ablate_array,
affine= header['affine'],
target_affine = header['original_affine'],
file_name = output_root/"{}_{}.nii.gz".format(id, m))
# saver = NiftiSaver(data_root_dir = output_root, output_postfix = None, output_ext='.nii.gz')
# saver.save(ablate_array, {'filename_or_obj': "{}_{}".format(id, m)})
def ablate_tumor_only(array, seg):
edge = 10
dilated_seg = []
for s in range(array.shape[-1]):
dilated= binary_dilation(seg[:,:,s], selem = np.ones([edge for i in range(seg[:,:,s].ndim)]))
dilated_seg.append(dilated)
dilated_seg = np.stack(dilated_seg, axis=-1)
ablated_array = np.copy(array)
ablated_array[dilated_seg > 0] = 0
return ablated_array
def ablate_signal(array, seg):
"""Helper: given a image array, replace the non-zero value by sampling from the rest non-tumor regions (with replacement, so
that to keep the same distribution)
"""
non_tumor = array[(array != 0) & (seg != 1) & (seg != 2) & (seg != 4)].flatten() # get brain region with non-tumor part [0. 1. 2. 4.]
print(np.unique(seg))
print(non_tumor.shape)
# mean = np.mean(array)
# std = np.std(array)
ablated_array = np.random.choice(non_tumor, size=array.shape, replace=True)
ablated_array[array == 0] = 0
print('ablated_array', ablated_array.shape)
return ablated_array
### Utlities to get gt modality shapley value, and compare hm value with this gt ###
def modality_shapley(config, ablated_image_folder, csv_save_dir = "/local-scratch/authorid/BRATS_IDH/log/mod_shapley"):
"""
Modality ablation experiment. Generate and save the ablated brats images
Generate dataset with
Save in the directory: Path(brats_path).parent / "ablated_brats", and can be loaded with the script:
T1 = os.path.join(image_path_list[0], bratsID, bratsID+'_t1.nii.gz') # (240, 240, 155)
T1c = os.path.join(image_path_list[1], bratsID, bratsID+'_t1ce.nii.gz')
T2 = os.path.join(image_path_list[2], bratsID, bratsID+'_t2.nii.gz')
FLAIR = os.path.join(image_path_list[3], bratsID, bratsID+'_flair.nii.gz')
For the original brats image, ablate it by filling in the non-zero value with random values ~ N(image_mean, image_std)
"""
modalities = config['xai']['modality']
print(modalities)
# generate modality combinations
N_sets = list(itertools.product([0, 1], repeat=len(modalities)) ) # set of all_combinations
for modality_selection in N_sets:
test(config, timestamp = False, ablated_image_folder = ablated_image_folder, csv_save_dir = csv_save_dir, modality_selection= modality_selection)
def shapley_result_csv(fold = 1, root = '/local-scratch/authorid/BRATS_IDH/log/mod_shapley/test/', modalities= ["t1", "t1ce", "t2", "flair"], metric = 'acc'):
"""
From the individual test records, get the summarized csv of modality: accuracy pair.
:param fold:
:param path:
:return:
"""
# get all csvs in the folder
save_path = Path(root)/"shapley"
save_path.mkdir(parents = True, exist_ok= True)
csv_filename = save_path / 'aggregated_performance_fold_{}.csv'.format(fold)
file_exists = os.path.isfile(csv_filename)
fnames = modalities+["accuracy"]
with open(csv_filename, 'w', newline='') as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=fnames)
# if not file_exists:
csv_writer.writeheader()
for f in Path(root).rglob('*cv_result_fold*.csv'):
fn = f.name.split(".")[0].split("-")
if len(fn) == len(modalities)+1:
fold_num = fn[0].split("_")[-1]
else:
fold_num = fn[1].split("_")[-1]
fold_num = int(fold_num)
if fold_num == fold:
if len(fn) == len(modalities) + 1:
modelity_selection = [int(i) for i in fn[1:]]
else:
modelity_selection = [int(i) for i in fn[2:]]
# print( fold_num, modelity_selection)
results = pd.read_csv(f)
gt = results['gt']
pred = results['pred']
if metric == 'auc':
fpr, tpr, threshold = roc_curve(results['gt'].to_list(), results['pred'].to_list())
accuracy = auc(fpr, tpr)
else:
accuracy = accuracy_score(gt, pred)
csv_record = {'accuracy': accuracy}
for i, m in enumerate(modalities):
csv_record[m]= modelity_selection[i]
csv_writer.writerow(csv_record)
print("Fold {}: modality: {}, accuracy: {}".format(fold, modelity_selection, accuracy))
print("Saved at {}".format(csv_filename))
return csv_filename
def get_shapley(csv_filename, modalities = ["t1", "t1ce", "t2", "flair"]):
"""
calculate modality shapeley value
CSV with column: t1, t1c, t2, flair, of 0 / 1. and perforamnce value.
:param csv:
:return:
"""
# convert csv to dict: {(0, 0, 1, 0): 10} {tuple: performance}
df = pd.read_csv(csv_filename)
fold = Path(csv_filename).name.split('.')[0].split('_')[-1]
# print(fold)
df_dict = df.to_dict(orient='records')
# print(df_dict)
v_dict = {} #
for row in df_dict:
mod_lst = []
for m in modalities:
mod_lst.append(row[m])
v_dict[tuple(mod_lst)] = row['accuracy']
# print(v_dict)
n = len(modalities)
# sanity check if all mod combinations are exists
N_sets = list(itertools.product([0,1],repeat = len(modalities))) # set of all_combinations
for s in N_sets:
if tuple(s) not in v_dict:
print("ERROR in get_shapley! {} missing".format(s))
N_sets_array = np.array(N_sets) # array([[0, 0, 0, 0], [0, 0, 0, 1],
mod_shapley = {}
# for each mod, calculate its shapley value:
for i, mod in enumerate(modalities):
# get combination not including mod
n_not_i = N_sets_array[N_sets_array[:, i]==0]# # a list containing all subsets that don't contains i todo
# print(n_not_i, i)
phi_i= 0
for s in n_not_i:
# print('s', s)
v_s = v_dict[tuple(s)]
sANDi = copy.deepcopy(s)
sANDi[i] =1
v_sANDi = v_dict[tuple(sANDi)]
# print(s , s.sum(), i, mod)
phi_i += (v_sANDi - v_s) * math.factorial(s.sum()) * (math.factorial(n - s.sum() - 1)) / math.factorial(n)
mod_shapley[mod] = phi_i
mod_shapley['fold'] = fold
print(mod_shapley)
# save gt shapley to csv
with open(Path(csv_filename).parent/'fold_{}_modality_shapley.csv'.format(fold), 'w') as f:
csv_writer = csv.DictWriter(f, fieldnames=list(mod_shapley.keys()))
csv_writer.writeheader()
csv_writer.writerow(mod_shapley)
# for key in mod_shapley.keys():
# f.write("%s,%s\n" % (key, mod_shapley[key]))
return mod_shapley
def get_shapley_gt_multiple_runs_pipeline(config, run_num, ablated_image_folder, csv_save_dir):
"""Since the shapley value gt is not deterministic, run multiple run_num to get the distribution of gt modality shapley value."""
modalities = config['xai']['modality']
fold = config['data_loader']['args']['fold']
# support multiple runtime, check if file exists
existing_runs = [f for f in os.listdir(csv_save_dir) if os.path.isdir(os.path.join(csv_save_dir, f))]
existing_runs.sort()
starting_run = -1
for i in existing_runs:
i = int(i)
shapley_csv = os.path.join(csv_save_dir, "{}".format(i), 'shapley', 'fold_{}_modality_shapley.csv'.format(fold))
file_exists = os.path.isfile(shapley_csv)
if file_exists:
starting_run = i
else:
break
if starting_run >= run_num:
return
for run_i in range(starting_run+1, run_num):
run_dir = os.path.join(csv_save_dir, "{}".format(run_i))
modality_shapley(config, ablated_image_folder = ablated_image_folder, csv_save_dir= run_dir)
csv_filename = shapley_result_csv(fold = fold, modalities=modalities, root = run_dir)
print(csv_filename)
get_shapley(csv_filename, modalities=modalities)
def aggregate_shapley_gt_mean_std(fold, csv_save_dir, modalities):
# calculate the mean and std of the multiple run shapley
result_list = []
runs = [f for f in os.listdir(csv_save_dir) if os.path.isdir(os.path.join(csv_save_dir, f))]
for run_i in runs:
shapley_csv = os.path.join(csv_save_dir, "{}".format(run_i), 'shapley', 'fold_{}_modality_shapley.csv'.format(fold))
file_exists = os.path.isfile(shapley_csv)
if file_exists:
df = pd.read_csv(shapley_csv)
df = df.iloc[0]#.to_dict('list')
# print(df)
gt_shapley = [df[m] for m in modalities]
result_list.append(gt_shapley)
result_array = np.array(result_list)
shapley_mean = result_array.mean(axis = 0)
shapley_std = result_array.std(axis = 0)
print(result_array)
print("Shapley mean: {}, std {}".format(shapley_mean, shapley_std))
# save the mean and std as two csv files
mean_shapley, std_shapley = {}, {}
mean_shapley['fold'], std_shapley['fold'] = fold, fold
# for each mod, calculate its shapley value:
for i, mod in enumerate(modalities):
mean_shapley[mod] = shapley_mean[i]
std_shapley[mod] = shapley_std[i]
with open(os.path.join(csv_save_dir, 'multirun_gt_shapley_fold_{}.csv'.format(fold)), 'w') as f:
csv_writer = csv.DictWriter(f, fieldnames=list(mean_shapley.keys()))
csv_writer.writeheader()
csv_writer.writerow(mean_shapley)
with open(os.path.join(csv_save_dir, 'multirun_gt_shapleySTD_fold_{}.csv'.format(fold)), 'w') as f:
csv_writer = csv.DictWriter(f, fieldnames=list(std_shapley.keys()))
csv_writer.writeheader()
csv_writer.writerow(std_shapley)
def get_modality_feature_hm_value(post_hm, seg, penalize = True, portion = True):
"""
Get positive+negative values inside tumor regions, minus positive values outside tumor regions
(penalty for positive values outside tumor)
:param post_hm: np array of the same shape with seg
:param penalize: old parameter. No longer needed with new parameter portion
:return:
"""
assert seg.shape == post_hm.shape[1:], "segmentation map shape {} and processed hm shape {} does not match!".format(seg.shape, post_hm.shape[1:])
# binary_seg = seg[seg>0]
edge = 20
dilated_seg = []
for s in range(seg.shape[-1]):
dilated= binary_dilation(seg[:,:,s], selem = np.ones([edge for i in range(seg[:,:,s].ndim)]))
dilated_seg.append(dilated)
dilated_seg = np.stack(dilated_seg, axis = -1)
print((seg>0).sum()/seg.size, (dilated_seg>0).sum()/dilated_seg.size, dilated_seg.shape)
hm_values = []
for hm in post_hm:
feature = hm[(dilated_seg>0) & (hm>0)]
non_feature = hm[(dilated_seg==0) & (hm>0)]
if portion:
v = feature.sum() / ( feature.sum() + non_feature.sum() )
if (v < 0):
print( feature.sum() , feature.shape, non_feature.shape,non_feature.sum())
else:
v = feature.sum()
if penalize:
v -= non_feature.sum()
hm_values.append(v)
print(hm_values, np.sum(post_hm, axis = tuple([i for i in range(4)][1:])), '\n')
return hm_values
def get_save_modality_hm_value(hm_save_dir, result_save_dir, fold, method_list, penalize= False, portion_metrics= True, positiveHMonly = True, segment_path = None, modalities= ["t1", "t1ce", "t2", "flair"]):
'''
Since read hm is time consuming, read and save the hm values for each method
:param hm_save_dir:
:param method_list:
:param shapley_csv:
:param localize_feature: if True, calculate the sum of hm values using lesion masks.
Get positive+negative values inside tumor regions, minus positive values outside tumor regions
(penalty for positive values outside tumor)
:return:
'''
Path(result_save_dir).mkdir(parents=True, exist_ok=True)
columns = modalities+ ['XAI', 'dataID']
for method in method_list:
result_csv = Path(result_save_dir) / 'modalityHM_fold-{}-{}.csv'.format(fold, method)
file_exists = os.path.isfile(result_csv)
if file_exists:
print("{} exists, pass".format(method))
continue
result_df = pd.DataFrame(columns=columns)
value = {}
# post-process hms
# print(method)
hm_dict, data_record = get_heatmaps(hm_save_dir, method, by_data=False, hm_as_array=False, return_mri=False)
print("Number of data for {}: {}".format(method, len(hm_dict.keys()))) # , hm_dict.keys())
for dataID, hm in hm_dict.items():
print(hm.min(), hm.max(), dataID)
post_hm = postprocess_heatmaps(hm, no_neg=positiveHMonly) # (C, H,W,D) # the postprocessed hm is already non-negative
if segment_path:
seg_path = os.path.join(segment_path, dataID, dataID + '_seg.nii.gz')
seg = nibabel.load(seg_path).get_fdata()
seg = np.rot90(seg, k=3, axes=(0, 1)) # important, avoid bug of seg, saliency map mismatch
hm_values = get_modality_feature_hm_value(post_hm, seg, penalize=penalize, portion = portion_metrics)
else:
if positiveHMonly:
positive_hm = np.copy(post_hm)
positive_hm[positive_hm <0] =0
hm_values = np.sum(positive_hm, axis = tuple([i for i in range(len(modalities))][1:]))
else:
hm_values = np.sum(post_hm, axis = tuple([i for i in range(len(modalities))][1:]))
# print(method, dataID, corr, p_value)
value["XAI"] = method
value['dataID'] = dataID
for i, mod in enumerate(modalities):
value[mod] = hm_values[i]
result_series= pd.Series(value, index=columns)
result_df= result_df.append(result_series, ignore_index=True)
# print(result_df)
# result_df = pd.DataFrame.from_dict(result, orient = 'index')
result_df.to_csv(result_csv)
print("modalityHM Saved at: {}".format(result_csv))
return result_csv
# def corr_modality_shapley(hm_save_dir, method_list, shapley_csv, modalities= ["t1", "t1ce", "t2", "flair"]):
# ''''''
# fold = Path(shapley_csv).name.split('.')[0].split('_')[1] #fold_{}_modality_shapley.csv'
# df = pd.read_csv(shapley_csv)
# # print(df)
# df = df.iloc[0]#.to_dict('list')
# # print(df)
# gt_shapley = [df[m] for m in modalities]
# # print(gt_shapley)
# columns = modalities+ ['XAI', 'correlation', 'p_value', 'dataID']
#
# for method in method_list:
# result_csv = Path(shapley_csv).parent / 'CorrModalityShapley_fold-{}-{}.csv'.format(fold, method)
# file_exists = os.path.isfile(result_csv)
# if file_exists:
# print("{} exists, pass".format(file_exists))
# continue
# result_df = pd.DataFrame(columns=columns)
# correlations = {}
# gt_results = list()
# # post-process hms
# hm_dict, data_record = get_heatmaps(hm_save_dir, method, by_data=False, hm_as_array=False, return_mri=False)
# print("Number of data to be evaluated for {}: {}".format(method, len(hm_dict.keys()))) # , hm_dict.keys())
# for dataID, hm in hm_dict.items():
# post_hm = postprocess_heatmaps(hm) # (C, H,W,D)
# hm_values = np.sum(post_hm, axis = tuple([i for i in range(len(modalities))][1:]))
# corr, p_value = spr(gt_shapley, hm_values)
# # print(method, dataID, corr, p_value)
# correlations["XAI"] = method
# correlations["correlation"] = corr
# correlations["p_value"] = p_value
# correlations['dataID'] = dataID
# for i, mod in enumerate(modalities):
# correlations[mod] = hm_values[i]
# result_series= pd.Series(correlations, index=columns)
# result_df= result_df.append(result_series, ignore_index=True)
# print(result_df)
# # result_df = pd.DataFrame.from_dict(result, orient = 'index')
# result_df.to_csv(result_csv)
# print("corr_modality_shapley Saved at: {}".format(result_csv))
# return result_csv
def compute_xai_mod_shapley_corr(hm_result_csv_root, gt_csv_path, modalities= ["t1", "t1ce", "t2", "flair"], corr_name = "pearson"):
fold_dict = {}
if corr_name == 'pearson':
corr_method = stats.pearsonr
elif corr_name == 'spr':
corr_method = spr
elif corr_name == 'kendalltau':
corr_method = kendalltau
# get all hm value csv files for each
for f in Path(hm_result_csv_root).rglob('modalityHM_fold*.csv'):
fold = f.name.split('.')[0].split("-")[1]
if fold in fold_dict:
fold_dict[fold].append(f)
else:
fold_dict[fold] = [f]
columns = ['XAI', 'fold', 'corr','p_value', 'data_wise_corr', 'data_wise_std' ] + modalities
result_df = pd.DataFrame(columns=columns)
for fold, files in fold_dict.items():
# get mod shapley gt
shapley_csv = os.path.join(gt_csv_path, 'multirun_gt_shapley_fold_{}.csv'.format(fold))
if not os.path.isfile(shapley_csv):
if gt_csv_path[-2:] =='mi':
shapley_csv = Path(gt_csv_path) / 'seed_{}'.format(fold) /'shapley' / 'fold_{}_modality_shapley.csv'.format(fold)
else:
shapley_csv = Path(gt_csv_path) / 'shapley' / 'fold_{}_modality_shapley.csv'.format(fold)
# elif not os.path.isfile(shapley_csv):
# shapley_csv = Path(gt_csv_path) / 'shapley' / 'fold_{}_modality_shapley.csv'.format(fold)
# print("shapley_csv", shapley_csv)
df = pd.read_csv(shapley_csv)
df = df.iloc[0]#.to_dict('list')
# print(df)
gt_shapley = [df[m] for m in modalities]
print(fold, gt_shapley)
for fl in files:
method = fl.name.split('.')[0].split('-')[-1]
hm_df = pd.read_csv(fl)
# print("file", fl, hm_df )
result = {}
# print( hm_df.mean(axis=0))
result['XAI'] = hm_df['XAI'].unique()[0]
result['fold'] = fold
hm_mean = hm_df.mean(axis=0)
hm_value_dataset = [hm_mean[m] for m in modalities]
hm_df['XAI'] = method
hm_df['Fold'] = fold
for i,m in enumerate(modalities):
result[m] = hm_mean[m]
# print(hm_value_dataset)
result["corr"], result["p_value"] = corr_method(gt_shapley, hm_value_dataset)
hm_df['kendalltau'] = hm_df.apply(lambda row: corr_method(gt_shapley, row[modalities]).correlation, axis=1)
hm_df['pvalue'] = hm_df.apply(lambda row: corr_method(gt_shapley, row[modalities]).pvalue, axis=1)
correlation = list(hm_df['kendalltau'])
# hm_df['kendalltau'] = 0
# hm_df['pvalue'] = 0
# for index, row in hm_df.iterrows():
# corr, p_value = corr_method(gt_shapley, row[modalities])
# correlation.append(corr)
# hm_df['kendalltau'] = corr
# hm_df['pvalue'] = p_value
kandall_dir = fl.parent.parent/ 'kendalltau'
kandall_dir.mkdir(parents=True, exist_ok=True)
hm_df.to_csv(os.path.join(kandall_dir, fl.name))
data_wise_corr = np.array(correlation)
result["data_wise_corr"] = data_wise_corr.mean()
result["data_wise_std"] = data_wise_corr.std()
# print("data wise corr: mean {}, std {}".format(data_wise_corr.mean(), data_wise_corr.std()))
# print(result)
result_series= pd.Series(result, index=columns)
result_df= result_df.append(result_series, ignore_index=True)
dt = datetime.now().strftime(r'%m%d_%H%M%S')
sorted_df = result_df.sort_values(by='corr', ascending=False, na_position='last')
print(sorted_df)
hm_type = Path(hm_result_csv_root).name
sorted_df.to_csv(os.path.join(gt_csv_path, "mod_shapley_result-{}-{}-{}.csv".format(corr_name, hm_type, dt)))
def compute_mfsi(hm_result_csv_root, gt_csv_path, modalities= ["t1", "t1ce", "t2", "flair"], msfi_save_dir = 'msfi_featshapley', normalization_method = 'minmax'):#, corr_name = "pearson"):
fold_dict = {}
# if corr_name == 'pearson':
# corr_method = stats.pearsonr
# else:
# corr_method = spr
# get all hm value csv files for each
for f in Path(hm_result_csv_root).rglob('modalityHM_fold*.csv'):
fold = f.name.split('.')[0].split("-")[1]
if fold in fold_dict:
fold_dict[fold].append(f)
else:
fold_dict[fold] = [f]
columns = ['XAI', 'fold', 'msfi', 'msfi_std' ]
result_df = pd.DataFrame(columns=columns)
for fold, files in fold_dict.items():
# get mod shapley gt
shapley_csv = os.path.join(gt_csv_path, 'multirun_gt_shapley_fold_{}.csv'.format(fold))
if not os.path.isfile(shapley_csv):
if gt_csv_path[-2:] =='mi':
shapley_csv = Path(gt_csv_path) / 'seed_{}'.format(fold) /'shapley' / 'fold_{}_modality_shapley.csv'.format(fold)
elif gt_csv_path[-8:] =='shortcut':
print(gt_csv_path)
shapley_csv = Path(gt_csv_path) / 'fold_{}_modality_shapley.csv'.format(fold)
else:
shapley_csv = Path(gt_csv_path) / 'shapley' / 'fold_{}_modality_shapley.csv'.format(fold)
# print("shapley_csv", shapley_csv)
df = pd.read_csv(shapley_csv)
df = df.iloc[0]#.to_dict('list')
# print(df)
gt_shapley = [df[m] for m in modalities]
# normalize the gt_shapley value
sh_min = min(gt_shapley)
print('befor norm', df[modalities])
if normalization_method == 'minmax':
for m in modalities:
df[m] = (df[m] - min(gt_shapley) ) / (max(gt_shapley) - min(gt_shapley))
else:
ratio = 1 / max(gt_shapley)
for m in modalities:
df[m] = df[m] * ratio
print('after mi norm')
print(df, df[modalities], df[modalities].sum() )
for fl in files:
hm_df = | pd.read_csv(fl) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
This module contains all classes and functions dedicated to the processing and
analysis of a decay data.
"""
import logging
import os # used in docstrings
import pytest # used in docstrings
import tempfile # used in docstrings
import yaml # used in docstrings
import h5py
import copy
from math import sqrt
import numpy as np
import pandas as pd
import scipy
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import splu
import sandy
__author__ = "<NAME>"
__all__ = [
"DecayData",
"decay_modes",
"rdd2hdf",
"BranchingRatio",
"HalfLife",
"DecayEnergy",
]
pd.options.display.float_format = '{:.5e}'.format
decay_modes = {
0: "gamma",
1: "beta",
2: "e.c.",
3: "i.t.",
4: "alpha",
5: "n",
6: "s.f.",
7: "p",
}
class DecayData():
"""
Container of radioactive nuclide data for several isotopes.
Attributes
----------
data : `dict`
source of decay data content
Methods
-------
from_endf6
extract decay data from ENDF-6 instance
from_hdf5
extract decay data from hdf5 file
get_bmatrix
extract B-matrix inro dataframe
get_decay_chains
extract decay chains into dataframe
get_qmatrix
extract Q-matrix into dataframe
get_transition_matrix
extract transition matrix into dataframe
to_hdf5
write decay data to hdf5 file
"""
def __repr__(self):
return self.data.__repr__()
def __init__(self, dct):
self.data = dct
@property
def data(self):
"""
Dictionary of RDD content.
Returns
-------
`dict`
hierarchical RDD content
"""
return self._data
@data.setter
def data(self, data):
self._data = data
def get_nuclides(self):
return sorted(self.data.keys())
def get_pn(self):
"""
Extract probability of neutron emission.
Returns
-------
`pandas.Series`
panda series with ZAM index and probability of neutrom emission
Examples
--------
>>> endf6 = sandy.get_endf6_file("jeff_33", "decay", 391000)
>>> rdd = sandy.DecayData.from_endf6(endf6)
>>> rdd.get_pn()
ZAM
391000 1.00000e+00
Name: PN, dtype: float64
"""
pn = {}
for zam, data in self.data.items():
if data["stable"]:
continue
for (rtyp, rfs), decay_mode in data["decay_modes"].items():
# number_del_neuts = f"{rdtp}".count("5")
daughters = decay_mode["decay_products"]
if 10 in daughters:
pn[zam] = daughters[10]
series = pd.Series(pn, name="PN")
series.index.name = "ZAM"
return series
def get_half_life(self, with_uncertainty=True):
"""
Extract half life and its uncertainty.
Parameters
----------
with_uncertainty : `bool`, optional, default is 'True'
makes the method return half lives and uncertainties
if set equal True, or else return only the half lives
Returns
-------
`sandy.HalfLife`
object containing half life and associated uncertainty or
only half life if with_uncertainty=False
Notes
-----
.. note:: if a nuclide is stable, half-life of zero will be assigned,
according with the value stored in the ENDF6 format.
Examples
--------
>>> endf6 = sandy.get_endf6_file("jeff_33", "decay", [942400, 922350])
>>> rdd = sandy.DecayData.from_endf6(endf6)
>>> rdd.get_half_life()
HL DHL
ZAM
922350 2.22102e+16 1.57788e+13
942400 2.07108e+11 1.57785e+08
>>> rdd.get_half_life(with_uncertainty=False)
HL
ZAM
922350 2.22102e+16
942400 2.07108e+11
Stable nuclide:
>>> endf6 = sandy.get_endf6_file("jeff_33", "decay", 260560)
>>> rdd = sandy.DecayData.from_endf6(endf6)
>>> rdd.get_half_life(with_uncertainty=False)
HL
ZAM
260560 0.00000e+00
"""
thalf = {zam: {
"HL": dic['half_life'],
"DHL": dic['half_life_uncertainty'],
} for zam, dic in self.data.items()}
df = pd.DataFrame(thalf).T
df.index.name = "ZAM"
if with_uncertainty:
return HalfLife(df)
else:
return HalfLife(df.HL)
def get_branching_ratio(self, with_uncertainty=True):
"""
Extract branching ratios and their uncertainties.
Parameters
----------
with_uncertainty : `bool`, optional, default is 'True'
makes the method return branching ratios and uncertainties
if set equal True, or else return only the branching ratios
Returns
-------
`sandy.BranchingRatio`
object containing branching ratios and associated uncertainties or
only branching ratios if with_uncertainty=False
Examples
--------
>>> endf6 = sandy.get_endf6_file("jeff_33", "decay", [942410, 922350])
>>> rdd = sandy.DecayData.from_endf6(endf6)
>>> rdd.get_branching_ratio()
BR DBR
ZAM RTYP RFS
922350 4 0 1.00000e+00 1.00000e-04
6 0 7.20000e-11 2.10000e-11
942410 4 0 2.44000e-05 0.00000e+00
1 0 9.99976e-01 0.00000e+00
>>> rdd.get_branching_ratio(with_uncertainty=False)
BR
ZAM RTYP RFS
922350 4 0 1.00000e+00
6 0 7.20000e-11
942410 4 0 2.44000e-05
1 0 9.99976e-01
>>> endf6 = sandy.get_endf6_file("jeff_33", "decay", [942410, 10010, 922350])
>>> rdd = sandy.DecayData.from_endf6(endf6)
>>> rdd.get_branching_ratio(with_uncertainty=False)
BR
ZAM RTYP RFS
922350 4 0 1.00000e+00
6 0 7.20000e-11
942410 4 0 2.44000e-05
1 0 9.99976e-01
Decay at first isomeric state:
>>> endf6 = sandy.get_endf6_file("jeff_33", "decay", 942390)
>>> rdd = sandy.DecayData.from_endf6(endf6)
>>> rdd.get_branching_ratio(with_uncertainty=False)
BR
ZAM RTYP RFS
942390 4 0 6.00000e-04
1 9.99400e-01
6 0 3.10000e-12
Stable nuclide:
>>> endf6 = sandy.get_endf6_file("jeff_33", "decay", 260560)
>>> rdd = sandy.DecayData.from_endf6(endf6)
>>> rdd.get_branching_ratio()
Empty DataFrame
Columns: [BR, DBR]
Index: []
"""
br = []
zam = []
rtyp_ = []
rfs_ = []
for z, dic in self.data.items():
if 'decay_modes' in dic.keys():
for (rtyp, rfs), dk in dic['decay_modes'].items():
br.append([
dk['branching_ratio'],
dk['branching_ratio_uncertainty'],
])
rtyp_.append(rtyp)
rfs_.append(rfs)
zam.append(z)
tuples = zip(* [zam,
rtyp_,
rfs_])
idx = pd.MultiIndex.from_tuples(tuples, names=['ZAM', 'RTYP', 'RFS'])
df = pd.DataFrame(br, index=idx, columns=['BR', 'DBR'])
if with_uncertainty:
return BranchingRatio(df)
else:
return BranchingRatio(df.BR)
def get_decay_energy(self, with_uncertainty=True):
"""
Extract decay energy and its uncertainty.
Parameters
----------
with_uncertainty : `bool`, optional, default is 'True'
makes the method return decay energies and uncertainties
if set equal True, or else return only the decay energies
Returns
-------
`sandy.DecayEnergy`
object containing decay energy and associated uncertainty or
only decay energy if with_uncertainty=False
Examples
--------
>>> endf6 = sandy.get_endf6_file("jeff_33", "decay", [942400, 922350])
>>> rdd = sandy.DecayData.from_endf6(endf6)
>>> rdd.get_decay_energy()
E DE
ZAM TYPE
922350 alpha 4.46460e+06 1.63255e+05
beta 5.06717e+04 4.29163e+03
gamma 1.63616e+05 1.70801e+03
942400 alpha 5.24303e+06 3.63881e+04
beta 1.11164e+04 9.02572e+02
gamma 1.36292e+03 1.33403e+02
>>> rdd.get_decay_energy(with_uncertainty=False)
E
ZAM TYPE
922350 alpha 4.46460e+06
beta 5.06717e+04
gamma 1.63616e+05
942400 alpha 5.24303e+06
beta 1.11164e+04
gamma 1.36292e+03
Stable nuclide:
>>> endf6 = sandy.get_endf6_file("jeff_33", "decay", 260560)
>>> rdd = sandy.DecayData.from_endf6(endf6)
>>> rdd.get_decay_energy(with_uncertainty=False)
E
ZAM TYPE
260560 alpha 0.00000e+00
beta 0.00000e+00
gamma 0.00000e+00
"""
decay_energy = []
decay_energy_uncertainty = []
zam = []
for z, dic in self.data.items():
decay_energy.extend([
dic['decay_energy']['alpha'],
dic['decay_energy']['beta'],
dic['decay_energy']['gamma'],
])
decay_energy_uncertainty.extend([
dic['decay_energy_uncertainties']['alpha'],
dic['decay_energy_uncertainties']['beta'],
dic['decay_energy_uncertainties']['gamma'],
])
zam.append(z)
name = ['alpha', 'beta', 'gamma']
df = pd.DataFrame(zip(decay_energy, decay_energy_uncertainty),
index=pd.MultiIndex.from_product([zam, name], names=['ZAM', 'TYPE']),
columns=['E', 'DE'])
if with_uncertainty:
return DecayEnergy(df)
else:
return DecayEnergy(df.E)
def get_decay_chains(self, skip_parents=False, **kwargs):
"""
Extract decay chains into dataframe.
Parameters
----------
skip_parent : `bool`, optional, default is `False`
Returns
-------
`pandas.DataFrame`
decay chains dataframe
Examples
--------
>>> file = os.path.join(sandy.data.__path__[0], "rdd.endf")
>>> endf6 = sandy.Endf6.from_file(file)
>>> rdd = sandy.DecayData.from_endf6(endf6)
>>> rdd.get_decay_chains()
PARENT DAUGHTER YIELD LAMBDA
0 10010 10010 0.00000e+00 0.00000e+00
1 270600 270600 -1.00000e+00 4.16705e-09
2 270600 280600 1.00000e+00 4.16705e-09
3 280600 280600 0.00000e+00 0.00000e+00
>>> rdd.get_decay_chains(skip_parents=True)
PARENT DAUGHTER YIELD LAMBDA
0 270600 280600 1.00000e+00 4.16705e-09
"""
items = []
columns = ["PARENT", "DAUGHTER", "YIELD", "LAMBDA"]
for zam, nucl in sorted(self.data.items()):
yld = 0. if nucl["stable"] else -1.
if not skip_parents: # add also the disappearance of the parent
add = {
"PARENT": zam,
"DAUGHTER": zam,
"YIELD": yld,
"LAMBDA": nucl["decay_constant"]
}
items.append(add)
if nucl["stable"]:
continue
for (rtyp, rfs), decay_mode in nucl["decay_modes"].items():
br = decay_mode["branching_ratio"]
if "decay_products" not in decay_mode:
continue # S.F.
for zap, yld in decay_mode["decay_products"].items():
# add the production of each daughter
add = {
"PARENT": zam,
"DAUGHTER": zap,
"YIELD": yld * br,
"LAMBDA": nucl["decay_constant"]
}
items.append(add)
df = | pd.DataFrame(items) | pandas.DataFrame |
'''
Tools for simple baseline/benchmark forecasts
These methods might serve as the forecast themselves, but are more likely
to be used as a baseline to evaluate if more complex models offer a sufficient
increase in accuracy to justify their use.
Naive1:
Carry last value forward across forecast horizon (random walk)
SNaive:
Carry forward value from last seasonal period
Average: np.sqrt(((h - 1) / self._period).astype(np.int)+1)
Carry forward average of observations
Drift:
Carry forward last time period, but allow for upwards/downwards drift.
EnsembleNaive:
An unweighted average of all of the Naive forecasting methods.
'''
import numpy as np
import pandas as pd
from scipy.stats import norm, t
from abc import ABC, abstractmethod
# Boolean, unsigned integer, signed integer, float, complex.
_NUMERIC_KINDS = set('buifc')
def is_numeric(array):
"""Determine whether the argument has a numeric datatype, when
converted to a NumPy array.
Booleans, unsigned integers, signed integers, floats and complex
numbers are the kinds of numeric datatype.
source:
https://codereview.stackexchange.com/questions/128032/check-if-a-numpy-array-contains-numerical-data
Parameters
----------
array : array-like
The array to check.
Returns
-------
is_numeric : `bool`
True if the array has a numeric datatype, False if not.
"""
return np.asarray(array).dtype.kind in _NUMERIC_KINDS
class Forecast(ABC):
'''
Abstract base class for all baseline forecast
methods
'''
def __init__(self):
self._fitted = None
self._t = None
def _get_fitted(self):
return self._fitted['pred']
def _get_resid(self):
return self._fitted['resid']
@abstractmethod
def fit(self, train):
pass
def fit_predict(self, train, horizon, return_predict_int=False,
alpha=None):
'''
Convenience method. Fit model and predict with one call.
Parameters:
---------
train: array-like,
vector, series, or dataframe of the time series used for training.
Values should be floats and not contain any np.nan or np.inf
horizon: int,
forecast horizon.
return_predict_int: bool, optional (default=False)
If True function will return a Tuple
0: point forecasts (mean)
1: matrix of intervals.
alpha: None, or list of floats, optional (default=None)
List of floats between 0 and 1. If return_predict_int == True this
specifies the 100(1-alpha) prediction intervals to return.
Returns:
------
np.array, vector of predictions. length=horizon
'''
self.fit(train)
return self.predict(horizon, return_predict_int=return_predict_int,
alpha=alpha)
def validate_training_data(self, train, min_length=1):
'''
Checks the validity of training data for forecasting
and raises exceptions if required.
1. check is instance of pd.Series, pd.DataFrame or np.ndarray
2. check len is > min_length
Parameters:
---------
min_length: int optional (default=0)
minimum length of the time series.
'''
if not isinstance(train, (pd.Series, pd.DataFrame, np.ndarray)):
raise TypeError(
'Training data must be pd.Series, pd.DataFrame or np.ndarray')
elif len(train) < min_length:
raise ValueError('Training data is empty')
elif not is_numeric(train):
raise TypeError('Training data must be numeric')
elif np.isnan(np.asarray(train)).any():
raise TypeError(
'Training data contains at least one NaN. '
+ 'Data myst all be floats')
elif np.isinf(np.asarray(train)).any():
raise TypeError(
'Training data contains at least one Infinite '
+ 'value (np.Inf). Data myst all be floats')
@abstractmethod
def predict(self, horizon, return_predict_int=False, alpha=None):
pass
def _prediction_interval(self, horizon, alpha=None):
'''
Prediction intervals for naive forecast 1 (NF1)
lower = pred - z * std_h
upper = pred + z * std_h
where
std_h = resid_std * sqrt(h)
resid_std = standard deviation of in-sample residuals
h = horizon
See and credit: https://otexts.com/fpp2/prediction-intervals.html
Pre-requisit: Must have called fit()
Parameters:
---------
horizon - int,
forecast horizon
levels - list,
list of floats representing prediction limits
e.g. [0.80, 0.90, 0.95] will calculate three sets ofprediction
intervals giving limits for which will include the actual future
value with probability 80, 90 and 95 percent,
respectively (default = [0.8, 0.95]).
Returns:
--------
list
np.array matricies that contain the lower and upper prediction
limits for each prediction interval specified.
'''
if alpha is None:
alpha = [0.20, 0.05]
zs = [self.interval_multiplier(1-a, self._t - 1) for a in alpha]
pis = []
std_h = self._std_h(horizon)
for z in zs:
hw = z * std_h
pis.append(np.array([self.predict(horizon) - hw,
self.predict(horizon) + hw]).T)
return pis
def interval_multiplier(self, level, dof):
'''
inverse of normal distribution
can be overridden if needed.
'''
x = norm.ppf((1 - level) / 2)
return np.abs(x)
@abstractmethod
def _std_h(self, horizon):
'''
Calculate the standard error of the residuals over
a forecast horizon. This is method specific.
'''
pass
# breaks PEP8 to align with statsmodels naming
fittedvalues = property(_get_fitted)
resid = property(_get_resid)
class Naive1(Forecast):
'''
Naive forecast 1 or NF1: Carry the last value foreward across a
forecast horizon
For details and theory see [1]
Attributes
----------
fittedvalues: pd.Series
In-sample predictions of training data
resid: pd.Series
In-sample residuals
Methods
-------
fit(train)
fit the model to training data
predict(horizon, return_predict_int=False, alpha=None)
Predict h-steps ahead
fit_predict(train, horizons, return_predict_int=False, alpha=None)
convenience method. combine fit() and predict()
See Also
--------
forecast_tools.baseline.SNaive
forecast_tools.baseline.Drift
forecast_tools.baseline.Average
forecast_tools.baseline.EnsembleNaive
References:
----------
[1]. https://otexts.com/fpp2/simple-methods.html
Examples:
--------
Basic fitting and prediction
>>> y_train = np.arange(10)
>>> model = Naive1()
>>> model.fit(y_train)
>>> model.predict(horizon=7)
array([9., 9., 9., 9., 9., 9., 9.]
fit_predict() convenience method
>>> y_train = np.arange(10)
>>> model = Naive1()
>>> model.fit_predict(y_train, horizon=7)
array([9., 9., 9., 9., 9., 9., 9.]
80 and 95% prediction intervals
>>> y_train = np.arange(10)
>>> model = Naive1()
>>> model.fit(y_train)
>>> y_pred, y_intervals = model.predict(horizon=2,
return_pred_interval=True,
alpha=[0.1, 0.05])
>>> y_pred
array([9., 9.]
>>> y_intervals[0]
array([[ 7.71844843, 10.28155157],
[ 7.1876124 , 10.8123876 ]])
>>> y_intervals[1]
array([[ 7.35514637, 10.64485363],
[ 6.67382569, 11.32617431]])
Fitted values (one step in-sample predictions)
.fittedvalue returns a pandas.Series called pred
>>> y_train = np.arange(5)
>>> model = Naive1()
>>> model.fit(y_train)
>>> model.fittedvalues
0 NaN
1 0.0
2 1.0
3 2.0
4 3.0
Name: pred, dtype: float64
'''
def __init__(self):
'''
Constructor method
Parameters:
-------
level - list,
confidence levels for prediction intervals (e.g. [90, 95])
'''
self._fitted = None
def __repr__(self):
'''
String representation of object
'''
return f'Naive1()'
def __str__(self):
'''
Print/str representation of object
'''
return f'Naive1()'
def fit(self, train):
'''
Train the naive model
Parameters:
--------
train - array-like,
vector, series, or dataframe of the time series used for training.
Values should be floats and not contain any np.nan or np.inf
'''
self.validate_training_data(train)
_train = np.asarray(train)
self._pred = _train[-1]
self._fitted = pd.DataFrame(_train)
if isinstance(train, (pd.DataFrame, pd.Series)):
self._fitted.index = train.index
self._t = len(_train)
self._fitted.columns = ['actual']
self._fitted['pred'] = self._fitted['actual'].shift(periods=1)
self._fitted['resid'] = self._fitted['actual'] - self._fitted['pred']
self._resid_std = np.sqrt(np.nanmean(np.square(self._fitted['resid'])))
def predict(self, horizon, return_predict_int=False, alpha=None):
'''
Forecast and optionally produce 100(1-alpha) prediction intervals.
Prediction intervals for naive forecast 1 (NF1)
lower = pred - z * std_h
upper = pred + z * std_h
where
std_h = resid_std * sqrt(h)
resid_std = standard deviation of in-sample residuals
h = horizon
See and credit: https://otexts.com/fpp2/prediction-intervals.html
Pre-requisit: Must have called fit()
Parameters:
--------
horizon - int,
forecast horizon.
return_predict_int: bool, optional
if True calculate 100(1-alpha) prediction
intervals for the forecast. (default=False)
alpha: list of floats, optional (default=None)
controls set of prediction intervals returned and the width of
each.
Intervals are 100(1-alpha) in width. e.g. [0.2, 0.1]
would return the 80% and 90% prediction intervals of the forecast
distribution. default=None. When return_predict_int = True the
default behaviour is to return 80 and 90% intervals.
Returns:
-------
if return_predict_int = False
np.array, vector of predictions. length=horizon
if return_predict_int = True then returns a tuple.
0. np.array, vector of predictions. length=horizon
1. list of numpy.array[lower_pi, upper_pi].
One for each prediction interval.
'''
if self._fitted is None:
raise UnboundLocalError('Must call fit() prior to predict()')
if alpha is None:
alpha = [0.2, 0.1]
preds = np.full(shape=horizon, fill_value=self._pred, dtype=float)
if return_predict_int:
return preds, self._prediction_interval(horizon, alpha)
else:
return preds
def _std_h(self, horizon):
'''
Calculate the sample standard deviation.
'''
indexes = np.sqrt(np.arange(1, horizon+1))
std = np.full(shape=horizon,
fill_value=self._resid_std,
dtype=np.float)
std_h = std * indexes
return std_h
class SNaive(Forecast):
'''
Seasonal Naive Forecast SNF
Each forecast to be equal to the last observed value from the
same season of the year (e.g., the same month of the previous year).
SNF is useful for highly seasonal data. See [1]
Attributes
----------
fittedvalues: pd.Series
In-sample predictions of training data
resid: pd.Series
In-sample residuals
Methods
-------
fit(train)
fit the model to training data
predict(horizon, return_predict_int=False, alpha=None)
Predict h-steps ahead
fit_predict(train, horizons, return_predict_int=False, alpha=None)
convenience method. combine fit() and predict()
See Also
--------
forecast_tools.baseline.Naive1
forecast_tools.baseline.Drift
forecast_tools.baseline.Average
forecast_tools.baseline.EnsembleNaive
References:
-----------
[1]. https://otexts.com/fpp2/simple-methods.html
'''
def __init__(self, period):
'''
Parameters:
--------
period - int, the seasonal period of the daya
e.g. weekly = 7, monthly = 12, daily = 24
'''
self._period = period
self._fitted = None
def __repr__(self):
'''
String representation of object
'''
return f'SNaive1(period={self._period})'
def __str__(self):
'''
Print/str representation of object
'''
return f'SNaive1(period={self._period})'
def fit(self, train):
'''
Seasonal naive forecast - train the model
Parameters:
--------
train: array-like.
vector, pd.DataFrame or pd.Series containing the time series used
for training. Values should be floats and not contain any np.nan
or np.inf
'''
self.validate_training_data(train, min_length=self._period)
# could refactor this to be more like Naive1's simpler implementation.
if isinstance(train, (pd.Series)):
self._f = np.asarray(train)[-self._period:]
_train = train.to_numpy()
self._fitted = pd.DataFrame(_train, index=train.index)
elif isinstance(train, (pd.DataFrame)):
self._f = train.to_numpy().T[0][-self._period:]
_train = train.copy()[train.columns[0]].to_numpy()
self._fitted = pd.DataFrame(_train, index=train.index)
else:
self._f = train[-self._period:]
_train = train.copy()
self._fitted = pd.DataFrame(_train)
self._t = len(_train)
self._fitted.columns = ['actual']
self._fitted['pred'] = self._fitted['actual'].shift(self._period)
self._fitted['resid'] = self._fitted['actual'] - self._fitted['pred']
self._resid_std = np.sqrt(np.nanmean(np.square(self._fitted['resid'])))
def predict(self, horizon, return_predict_int=False, alpha=None):
'''
Predict time series over a horizon
Parameters:
--------
horizon - int,
forecast horizon.
return_predict_int: bool, optional
if True calculate 100(1-alpha) prediction
intervals for the forecast. (default=False)
alpha: list of floats, optional (default=None)
controls set of prediction intervals returned and the width of
each.
Intervals are 100(1-alpha) in width. e.g. [0.2, 0.1]
would return the 80% and 90% prediction intervals of the forecast
distribution. default=None. When return_predict_int = True the
default behaviour is to return 80 and 90% intervals.
Returns:
--------
if return_predict_int = False
np.array, vector of predictions. length=horizon
if return_predict_int = True then returns a tuple.
0. np.array, vector of predictions. length=horizon
1. list of numpy.array[lower_pi, upper_pi].
One for each prediction interval.
'''
if self._fitted is None:
raise UnboundLocalError('Must call fit() prior to predict()')
if alpha is None:
alpha = [0.2, 0.1]
preds = np.array([], dtype=float)
for _ in range(0, int(horizon/self._period)):
preds = np.concatenate([preds, self._f.copy()], axis=0)
preds = np.concatenate([preds,
self._f.copy()[:horizon % self._period]],
axis=0)
if return_predict_int:
return preds, self._prediction_interval(horizon, alpha)
else:
return preds
def _std_h(self, horizon):
h = np.arange(1, horizon+1)
# need to query if should be +1 or not.
return self._resid_std * \
np.sqrt(((h - 1) / self._period).astype(np.int)+1)
class Average(Forecast):
'''
Average forecast. Forecast is set to the average
of the historical data.
See for discussion of the average as a forecat measure [1]
Attributes
----------
fittedvalues: pd.Series
In-sample predictions of training data
resid: pd.Series
In-sample residuals
Methods
-------
fit(train)
fit the model to training data
predict(horizon, return_predict_int=False, alpha=None)
Predict h-steps ahead
fit_predict(train, horizons, return_predict_int=False, alpha=None)
convenience method. combine fit() and predict()
See Also
--------
forecast_tools.baseline.Naive1
forecast_tools.baseline.SNaive
forecast_tools.baseline.Drift
forecast_tools.baseline.EnsembleNaive
References:
-----------
[1.] Makridakis, Wheelwright and Hyndman. Forecasting (1998)
'''
def __init__(self):
self._pred = None
self._fitted = None
def __repr__(self):
'''
String representation of object
'''
return f'Average()'
def __str__(self):
'''
Print/str representation of object
'''
return f'Average()'
def _get_fitted(self):
return self._fitted['pred']
def _get_resid(self):
return self._fitted['resid']
def fit(self, train):
'''
Train the model
Parameters:
--------
train: arraylike
vector, pd.series, pd.DataFrame,
Time series used for training. Values should be floats
and not contain any np.nan or np.inf
'''
self.validate_training_data(train)
if isinstance(train, (pd.DataFrame)):
_train = train.copy()[train.columns[0]].to_numpy()
self._fitted = pd.DataFrame(_train, index=train.index)
elif isinstance(train, (pd.Series)):
_train = train.to_numpy()
self._fitted = pd.DataFrame(_train, index=train.index)
else:
_train = train.copy()
self._fitted = pd.DataFrame(train)
self._fitted.columns = ['actual']
self._t = len(_train)
self._pred = _train.mean()
# ddof set to get sample mean
self._resid_std = (_train - self._pred).std(ddof=1)
self._fitted['pred'] = self._pred
self._fitted['resid'] = self._fitted['actual'] - self._fitted['pred']
def predict(self, horizon, return_predict_int=False, alpha=None):
'''
Predict time series over a horizon
Parameters:
--------
horizon - int, forecast horizon.
return_predict_int: bool, optional
if True calculate 100(1-alpha) prediction
intervals for the forecast. (default=False)
alpha: list of floats, optional (default=None)
controls set of prediction intervals returned and the width of
each.
Intervals are 100(1-alpha) in width. e.g. [0.2, 0.1]
would return the 80% and 90% prediction intervals of the forecast
distribution. default=None. When return_predict_int = True the
default behaviour is to return 80 and 90% intervals.
Returns:
--------
if return_predict_int = False
np.array, vector of predictions. length=horizon
if return_predict_int = True then returns a tuple.
0. np.array, vector of predictions. length=horizon
1. list of numpy.array[lower_pi, upper_pi].
One for each prediction interval.
'''
if self._fitted is None:
raise UnboundLocalError('Must call fit() prior to predict()')
if alpha is None:
alpha = [0.2, 0.1]
preds = np.full(shape=horizon, fill_value=self._pred, dtype=float)
if return_predict_int:
return preds, self._prediction_interval(horizon, alpha)
else:
return preds
def interval_multiplier(self, level, dof):
'''
inverse of student t distribution
'''
x = t.ppf((1 - level) / 2, dof)
return np.abs(x)
def _std_h(self, horizon):
std = self._resid_std * np.sqrt(1 + (1/self._t))
return np.full(shape=horizon, fill_value=std, dtype=np.float)
class Drift(Forecast):
'''
Naive1 forecast with drift
Carry the last value foreward across a forecast horizon but
allow for upwards of downwards drift defined in [1]
Drift = average change in the historical data.
Note. The current implementation has a standard error of the forecast
that is the same as for the naive1 se. This could be adjusted for drift.
The following link suggests this is minor and benchmark with R is v.similar
[2]
Attributes
----------
fittedvalues: pd.Series
In-sample predictions of training data
resid: pd.Series
In-sample residuals
Methods
-------
fit(train)
fit the model to training data
predict(horizon, return_predict_int=False, alpha=None)
Predict h-steps ahead
fit_predict(train, horizons, return_predict_int=False, alpha=None)
convenience method. combine fit() and predict()
See Also
--------
forecast_tools.baseline.Naive1
forecast_tools.baseline.SNaive
forecast_tools.baseline.Average
forecast_tools.baseline.EnsembleNaive
References:
-----------
[1]. https://otexts.com/fpp2/simple-methods.html
[2]. https://www.coursehero.com/file/p12k3ln/For-the-random-walk-with-drift-model-the-1-step-ahead-forecast-standard-error/
'''
def __init__(self):
self._fitted = None
def __repr__(self):
'''
String representation of object
'''
return f'Drift()'
def __str__(self):
'''
Print/str representation of object
'''
return f'Drift()'
def _get_fitted_gradient(self):
return self._fitted['gradient_fit']
def fit(self, train):
'''
Train the naive with drift model
Parameters:
--------
train: arraylike
vector, pd.series, pd.DataFrame,
Time series used for training. Values should be floats
and not contain any np.nan or np.inf
'''
self.validate_training_data(train)
# if dataframe convert to series for compatability with
# proc (for convenience of passing the dataframe rather than a series)
if isinstance(train, (pd.DataFrame)):
_train = train.copy()[train.columns[0]].to_numpy()
self._fitted = pd.DataFrame(_train, index=train.index)
elif isinstance(train, (pd.Series)):
_train = train.to_numpy()
self._fitted = pd.DataFrame(_train, index=train.index)
else:
_train = train.copy()
self._fitted = | pd.DataFrame(train) | pandas.DataFrame |
from collections import defaultdict
import copy
import json
import numpy as np
import pandas as pd
import pickle
import scipy
import seaborn as sb
import torch
from allennlp.common.util import prepare_environment, Params
from matplotlib import pyplot as plt
from pytorch_pretrained_bert import BertTokenizer, BertModel
from scipy.stats import entropy
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import accuracy_score, mean_squared_error
from probing.globals import *
from probing.helpers import _reg_r2
from probing.tasks import ProbingTask
class Analytics:
def __init__(self, workspace):
self.directories = {d: os.path.join(workspace, d) for d in ["out", "tasks", "datasets", "configs"]}
self.scalar_mixes = None
self.tokenizer = None
self.embedder = None
# === Data statistics
def task_statistics(self):
data = []
for task_id in sorted(os.listdir(self.directories["tasks"])):
config = ProbingTask.parse_id(task_id)
stats = json.load(open(os.path.join(self.directories["tasks"], task_id, "_stats.json")))
for split in stats:
c = copy.deepcopy(config)
c["sentences"] = stats[split]["total_sentences"]
c["instances"] = stats[split]["total_instances"]
c["labels"] = stats[split]["total_labels"]
c["split"] = split
data += [c]
return | pd.DataFrame(data) | pandas.DataFrame |
import os
os.system('apt-get clean')
os.system('mv /var/lib/apt/lists /var/lib/apt/lists.old')
os.system('mkdir -p /var/lib/apt/lists/partial')
os.system('apt-get clean')
os.system('apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 04EE7237B7D453EC')
os.system('apt-key adv --keyserver keyserver.ubuntu.com --recv-keys <KEY>')
os.system('apt-get update')
os.system('apt-get -y install gcc build-essential libdpkg-perl')
os.system("pip3 install hyperopt")
os.system("pip3 install lightgbm")
os.system("pip3 install pandas==0.24.2")
os.system("pip3 install featuretools==0.7.1")
#os.system("pip3 install category-encoders==1.2.7")
import copy
import numpy as np
import pandas as pd
import datetime
import time
from automl import predict, train, validate
from CONSTANT import MAIN_TABLE_NAME
from merge import merge_table, merge_table_v2, FT_process
from preprocess import clean_df, clean_tables, feature_engineer, process_main_cat, process_cat_label, trans2basicInfo, trans2interval, process_relation_cat, process_relation_time, process_relation_cat_v2, trans2weekday, trans2hour, trans2day
from util import Config, log, show_dataframe, timeit
import warnings
warnings.filterwarnings('ignore')
class Model:
def __init__(self, info):
self.config = Config(info)
self.tables = None
self.lables = None
@timeit
def fit(self, Xs, y, time_ramain):
self.tables = Xs
self.lables = y
'''
clean_tables(Xs)
#if row_number > 700000 and time < 700:
# X = merge_table(Xs, self.config) # 表连接
#else:
config2 = copy.deepcopy(self.config)
merge_table_v2(Xs, self.config)
# clean_tables(Xs)
X = FT_process(Xs, self.config) # 考虑时间窗后会改变index顺序
#times = X["t_01"].min() + self.config["window_number"] * datetime.timedelta(seconds=self.config["timeBucket"])
#X = X[X["t_01"] > times]
X.sort_index(inplace=True)
self.config["tables"] = config2["tables"]
self.config["relations"] = config2["relations"]
clean_df(X)
feature_engineer(X, self.config)
# new_y=y.loc[X.index]
# train_X, train_y=sampling(X, new_y)
# train(train_X, train_y, self.config)
train(X, y.loc[X.index], self.config)
'''
@timeit
def predict(self, X_test, time_remain):
Xs = self.tables
main_table = Xs[MAIN_TABLE_NAME]
main_time_index = main_table[["t_01"]].sort_values("t_01")
# catLabel_dict = process_cat_label(main_table, self.lables.loc[main_table.index]) # modified By 05.30
main_table = pd.concat([main_table, X_test], keys=['train', 'test'])
main_table.index = main_table.index.map(lambda x: f"{x[0]}_{x[1]}")
Xs[MAIN_TABLE_NAME] = main_table
clean_tables(Xs, self.config, fill=True)
main_table = Xs[MAIN_TABLE_NAME]
main_cat_cols = [col for col in main_table.columns if (col.startswith("c_") or col.startswith("m_")) and len(main_table[col].unique())>1]
total_num_fea = 0
catFea_dict, total_num_fea = process_main_cat(main_table, main_cat_cols, total_num_fea) # 专门利用主表提其他类别特征针对main的特征
print("total_num Fea:", total_num_fea)
catFea_dicts = []
relation_catFea_dicts = []
relation_time_dicts = []
relation_catFea_dicts2 = []
if total_num_fea < 150: # 表示主表的衍生特征不够多,还可加
for relation in self.config['relations']:
tableA=relation["table_A"]
l_type=relation["type"].split("_")[0]
tableB = relation["table_B"]
r_type = relation["type"].split("_")[2]
key=relation["key"][0]
if tableA=="main" and l_type=="many" and r_type=="one": #and "t_01" not in Xs[tableB].columns: # 这里比较定制,后期需要改
'''
temp_main_cat = main_table[main_cat_cols]
relation_num_cols = [col for col in Xs[tableB].columns if col.startswith("n_")]
temp_tableB_num = Xs[tableB][[key]+relation_num_cols]
temp_tableB_num = temp_tableB_num.set_index(key)
temp_main_cat = temp_main_cat.join(temp_tableB_num, on=key)
temp_dict, total_num_fea = process_main_cat_v2(temp_main_cat, main_cat_cols, key, tableB, total_num_fea) #main的类别,relation的numerical
catFea_dicts.append(temp_dict)
if total_num_fea > 150: break
'''
Xs[tableB].drop_duplicates([key], inplace=True)
relation_cat_cols = [col for col in Xs[tableB].columns if
(col.startswith("c_") or col.startswith("m_")) and len(Xs[tableB][col].unique()) > 1]
temp_tableB_cat=Xs[tableB][relation_cat_cols]
if key in main_table and key in temp_tableB_cat:
temp_main_num = main_table[[key]]
temp_tableB_cat = temp_tableB_cat.set_index(key)
temp_main_num = temp_main_num.join(temp_tableB_cat, on=key)
relation_temp_dict, total_num_fea = process_relation_cat(temp_main_num, relation_cat_cols, key, tableB, total_num_fea) #relation的类别,main的numerical
#relation_catFea_dicts.append(relation_temp_dict)
relation_catFea_dicts=relation_catFea_dicts+relation_temp_dict
# if total_num_fea > 150: break
'''
temp_tableB_cat = Xs[tableB][relation_cat_cols]
relation_temp_dict2, total_num_fea = process_relation_cat_v2(temp_tableB_cat, relation_cat_cols, key,
tableB,
total_num_fea)
relation_catFea_dicts2.append(relation_temp_dict2)
'''
relation_time_cols = [col for col in Xs[tableB].columns if col.startswith("t_")]
if len(relation_time_cols) > 0:
if key in Xs[tableB] and key in main_table and "t_01" in main_table:
temp_tableB_time = Xs[tableB][[key]+relation_time_cols]
temp_tableB_time.columns = [col+"_in_"+tableB if col.startswith("t_") else col for col in temp_tableB_time.columns]
temp_main_time = main_table[[key] + ["t_01"]]
temp_tableB_time = temp_tableB_time.set_index(key)
temp_main_time = temp_main_time.join(temp_tableB_time, on=key)
temp_main_time.drop(key, axis=1, inplace=True)
#print("time_test v1")
#print(temp_main_time.head())
temp_main_time = process_relation_time(temp_main_time)
relation_time_dicts.append(temp_main_time)
'''
temp_tableB = Xs[tableB].set_index(key)
temp_main_key = main_table[[key]]
temp_main_key = temp_main_key.join(temp_tableB, on=key)
relation_temp_dict2, total_num_fea = process_relation_cat_v2(temp_main_key, relation_cat_cols, key,
tableB, total_num_fea)
del temp_main_key
del temp_tableB
relation_catFea_dicts2.append(relation_temp_dict2)
if total_num_fea > 150: break
'''
'''
#if len(relation_time_dicts) > 0:
main_time_col=[col for col in main_table.columns if col.startswith("t_")]
temp_main_time = main_table[main_time_col]
for col in main_time_col:
temp_main_time["n_weekday_" + col], temp_main_time["n_hour_" + col], temp_main_time["n_day_" + col]=zip(*temp_main_time[col].map(trans2basicInfo))
# temp_main_time["n_weekday_" + col] = temp_main_time[col].apply(trans2weekday)
# temp_main_time["n_hour_" + col] = temp_main_time[col].apply(trans2hour)
# temp_main_time["n_day_" + col] = temp_main_time[col].apply(trans2day)
if not col.startswith("t_0"):
temp_main_time["n_interval_" + col] = (temp_main_time[col] - temp_main_time["t_01"]).map(trans2interval)
temp_main_time.drop(main_time_col, axis=1, inplace=True)
relation_time_dicts.append(temp_main_time)
print("Processing Trans to main time")
'''
# Xs[MAIN_TABLE_NAME] = main_table
# clean_tables(Xs, self.config, fill=True)
merge_table_v2(Xs, self.config)
#clean_tables(Xs)
X = FT_process(Xs, self.config)
del Xs
del self.tables
del main_table
#print(X.shape)
'''
for catLabel in catLabel_dict:
# print(catLabel_dict[catLabel].head())
if catLabel in X.columns:
X = X.join(catLabel_dict[catLabel], on=catLabel)
'''
t1=time.time()
useful_catFea=[catFea_dict[catFea] for catFea in catFea_dict if catFea in X.columns]
X = pd.concat([X] + useful_catFea, axis=1)
print("processing process_main_cat")
'''
for catFea in catFea_dict:
if catFea in X.columns:
#print(catFea_dict[catFea].head())
X = X.join(catFea_dict[catFea], on=catFea)
print("processing process_main_cat")
#print(X.head())
'''
del catFea_dict
'''
for catFea_dict2 in catFea_dicts:
for catFea in catFea_dict2:
if catFea in X.columns:
#print(catFea_dict2[catFea].head())
X = X.join(catFea_dict2[catFea], on=catFea)
print("processing process_main_cat_v2")
#print(X.head())
del catFea_dicts
'''
'''
for relation_catFea_dict in relation_catFea_dicts:
for relation_catFea in relation_catFea_dict:
#print(relation_catFea_dict[relation_catFea].head())
if relation_catFea in X.columns:
z=yield(relation_catFea_dict[relation_catFea])
# X = X.join(relation_catFea_dict[relation_catFea], on=relation_catFea)
print("processing process_relation_cat")
#print(X.head())
'''
X = pd.concat([X] + relation_catFea_dicts, axis=1)
del relation_catFea_dicts
if len(relation_time_dicts) > 0:
X = pd.concat([X]+relation_time_dicts, axis=1)
print("processing process_relation_time")
#print(X.shape)
#print(X.head())
del relation_time_dicts
'''
for relation_catFea_dict2 in relation_catFea_dicts2:
for relation_catFea in relation_catFea_dict2:
#print(relation_catFea_dict2[relation_catFea].head())
if relation_catFea in X.columns:
X = X.join(relation_catFea_dict2[relation_catFea], on=relation_catFea)
print("processing process_relation_cat_v2")
#print(X.head())
del relation_catFea_dicts2
'''
t2=time.time()
print("cat join cost time: ", t2-t1)
#print(X.head())
X.columns = [
"m_" + c if (".m_" in c) and ("MEAN" not in c) and ("SUM" not in c) and (
"COUNT" not in c) and ("N_UNIQUE" not in c) and ("N_TIME" not in c) else c for c in X.columns]
X.columns = [
"c_" + c if (".c_" in c) and ("MEAN" not in c) and ("SUM" not in c) and (
"COUNT" not in c) and ("N_UNIQUE" not in c) and ("N_TIME" not in c) else c for c in X.columns]
X.columns = [
"n_" + c if not c.startswith("n_") and not c.startswith("m_") and not c.startswith("c_") and not c.startswith("t_") else c for c in X.columns]
#print(X.columns)
print("Column Number:",len(X.columns))
clean_df(X, "no_table", self.config)
feature_engineer(X, self.config, len(X.columns), self.lables)
X_train = X[X.index.str.startswith("train")]
X_train.index = X_train.index.map(lambda x: int(x.split('_')[1]))
X_train.sort_index(inplace=True)
#train(X_train, self.lables.loc[X_train.index], self.config)
train(X_train.loc[main_time_index.index], self.lables.loc[main_time_index.index], self.config) # 按时间排序
del main_time_index
X = X[X.index.str.startswith("test")]
X.index = X.index.map(lambda x: int(x.split('_')[1]))
X.sort_index(inplace=True)
result = predict(X, self.config)
return | pd.Series(result) | pandas.Series |
import os
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import tushare as ts
import datetime
#ts.set_token('09f77414f088aad7959f5eecba391fe685ea50462e208ce451b1b6a6')
pro = ts.pro_api('09f77414f088aad7959f5eecba391fe685ea50462e208ce451b1b6a6')
StockBasic = pro.query('stock_basic', list_status='L')
# 主板,20151231前上市,
stockcodepool = StockBasic[(StockBasic['list_date']<'20151231') & (StockBasic['market']=='主板')]
# find the peak value of stock price in year 2015
Time_rangemin='20150101'
Time_rangemax='20151231'
# date of today and adjustment factors
todaydate=datetime.datetime.today().strftime('%Y%m%d')
#AdjustmentFactorToday = pro.adj_factor(ts_code='', trade_date=todaydate)
#AdjustmentFactorTimeRangeMax = pro.adj_factor(ts_code='', trade_date=Time_rangemax)
HighPoint2015 = | pd.DataFrame(columns=['ts_code', 'HighPoint2015']) | pandas.DataFrame |
#!/usr/bin/env python3
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
import subprocess
import glob
import re
from helperFunctions.myFunctions_helper import *
import numpy as np
import pandas as pd
import fileinput
from itertools import product
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB import PDBList
from pdbfixer import PDBFixer
from simtk.openmm.app import PDBFile
# compute cross Q for every pdb pair in one folder
# parser = argparse.ArgumentParser(description="Compute cross q")
# parser.add_argument("-m", "--mode",
# type=int, default=1)
# args = parser.parse_args()
def getFromTerminal(CMD):
return subprocess.Popen(CMD,stdout=subprocess.PIPE,shell=True).communicate()[0].decode()
def read_hydrophobicity_scale(seq, isNew=False):
seq_dataFrame = pd.DataFrame({"oneLetterCode":list(seq)})
HFscales = pd.read_table("~/opt/small_script/Whole_residue_HFscales.txt")
if not isNew:
# Octanol Scale
# new and old difference is at HIS.
code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I",
"ARG+" : "R", "LYS+" : "K", "MET" : "M", "CYS" : "C",
"TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S",
"TRP" : "W", "ASP-" : "D", "GLU-" : "E", "ASN" : "N",
"GLN" : "Q", "PHE" : "F", "HIS+" : "H", "VAL" : "V",
"M3L" : "K", "MSE" : "M", "CAS" : "C"}
else:
code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I",
"ARG+" : "R", "LYS+" : "K", "MET" : "M", "CYS" : "C",
"TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S",
"TRP" : "W", "ASP-" : "D", "GLU-" : "E", "ASN" : "N",
"GLN" : "Q", "PHE" : "F", "HIS0" : "H", "VAL" : "V",
"M3L" : "K", "MSE" : "M", "CAS" : "C"}
HFscales_with_oneLetterCode = HFscales.assign(oneLetterCode=HFscales.AA.str.upper().map(code)).dropna()
data = seq_dataFrame.merge(HFscales_with_oneLetterCode, on="oneLetterCode", how="left")
return data
def create_zim(seqFile, isNew=False):
a = seqFile
seq = getFromTerminal("cat " + a).rstrip()
data = read_hydrophobicity_scale(seq, isNew=isNew)
z = data["DGwoct"].values
np.savetxt("zim", z, fmt="%.2f")
def expand_grid(dictionary):
return pd.DataFrame([row for row in product(*dictionary.values())],
columns=dictionary.keys())
def duplicate_pdb(From, To, offset_x=0, offset_y=0, offset_z=0, new_chain="B"):
with open(To, "w") as out:
with open(From, "r") as f:
for line in f:
tmp = list(line)
atom = line[0:4]
atomSerialNumber = line[6:11]
atomName = line[12:16]
atomResidueName = line[17:20]
chain = line[21]
residueNumber = line[22:26]
# change chain A to B
# new_chain = "B"
tmp[21] = new_chain
if atom == "ATOM":
x = float(line[30:38])
y = float(line[38:46])
z = float(line[46:54])
# add 40 to the x
new_x = x + offset_x
new_y = y + offset_y
new_z = z + offset_z
tmp[30:38] = "{:8.3f}".format(new_x)
tmp[38:46] = "{:8.3f}".format(new_y)
tmp[46:54] = "{:8.3f}".format(new_z)
a = "".join(tmp)
out.write(a)
def compute_native_contacts(coords, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
n = len(dis)
remove_band = np.eye(n)
for i in range(1, MAX_OFFSET):
remove_band += np.eye(n, k=i)
remove_band += np.eye(n, k=-i)
dis[remove_band==1] = np.max(dis)
native_contacts = dis < DISTANCE_CUTOFF
return native_contacts.astype("int")
def compute_contacts(coords, native_contacts, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
constacts = dis < DISTANCE_CUTOFF
constacts = constacts*native_contacts # remove non native contacts
return np.sum(constacts, axis=1).astype("float")
def compute_localQ_init(MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
from pathlib import Path
home = str(Path.home())
struct_id = '2xov'
filename = os.path.join(home, "opt/pulling/2xov.pdb")
p = PDBParser(PERMISSIVE=1)
s = p.get_structure(struct_id, filename)
chains = s[0].get_list()
# import pdb file
native_coords = []
for chain in chains:
dis = []
all_res = []
for res in chain:
is_regular_res = res.has_id('CA') and res.has_id('O')
res_id = res.get_id()[0]
if (res.get_resname()=='GLY'):
native_coords.append(res['CA'].get_coord())
elif (res_id==' ' or res_id=='H_MSE' or res_id=='H_M3L' or res_id=='H_CAS') and is_regular_res:
native_coords.append(res['CB'].get_coord())
else:
print('ERROR: irregular residue at %s!' % res)
exit()
native_contacts_table = compute_native_contacts(native_coords, MAX_OFFSET, DISTANCE_CUTOFF)
return native_contacts_table
def compute_localQ(native_contacts_table, pre=".", ii=-1, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_contacts = np.sum(native_contacts_table, axis=1).astype("float")
dump = read_lammps(os.path.join(pre, f"dump.lammpstrj.{ii}"), ca=False)
localQ_list = []
for atom in dump:
contacts = compute_contacts(np.array(atom), native_contacts_table, DISTANCE_CUTOFF=DISTANCE_CUTOFF)
c = np.divide(contacts, native_contacts, out=np.zeros_like(contacts), where=native_contacts!=0)
localQ_list.append(c)
data = pd.DataFrame(localQ_list)
data.columns = ["Res" + str(i+1) for i in data.columns]
data.to_csv(os.path.join(pre, f"localQ.{ii}.csv"), index=False)
def readPMF_basic(pre):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys())
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(location)
name_list = ["f", "df", "e", "s"]
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def make_metadata_3(k=1000.0, temps_list=["450"], i=-1, biasLow=None, biasHigh=None):
print("make metadata")
cwd = os.getcwd()
files = glob.glob(f"../data_{i}/*")
kconstant = k
with open("metadatafile", "w") as out:
for oneFile in sorted(files):
tmp = oneFile.split("/")[-1].replace('.dat', '')
t = tmp.split("_")[1]
bias = tmp.split("_")[3]
if biasLow:
if float(bias) < biasLow:
continue
if biasHigh:
if float(bias) > biasHigh:
continue
# print(tmp)
# if int(float(dis)) > 150:
# continue
if t in temps_list:
target = "../{} {} {} {}\n".format(oneFile, t, kconstant, bias)
out.write(target)
def readPMF(pre, is2d=False, force_list=["0.0", "0.1", "0.2"]):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys()),
"force":force_list
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
force = row["force"]
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/force_{force}/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/force_{force}/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(pmf_list)
name_list = ["f", "df", "e", "s"]
if is2d:
names = ["x", "y"] + name_list
else:
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, force=force, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def readPMF_2(pre, is2d=0, force_list=["0.0", "0.1", "0.2"]):
if is2d:
print("reading 2d pmfs")
else:
print("reading 1d dis, qw and z")
if is2d == 1:
mode_list = ["2d_qw_dis", "2d_z_dis", "2d_z_qw"]
elif is2d == 2:
mode_list = ["quick"]
else:
mode_list = ["1d_dis", "1d_qw", "1d_z"]
all_data_list =[]
for mode in mode_list:
tmp = readPMF(mode, is2d, force_list).assign(mode=mode)
all_data_list.append(tmp)
return pd.concat(all_data_list).dropna().reset_index()
def shrinkage(n=552, shrink_size=6, max_frame=2000, fileName="dump.lammpstrj"):
print("Shrinkage: size: {}, max_frame: {}".format(shrink_size, max_frame))
bashCommand = "wc " + fileName
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
line_number = int(output.decode("utf-8").split()[0])
print(line_number)
print(line_number/552)
# number of atom = 543
n = 552
count = 0
with open("small.lammpstrj", "w") as out:
with open(fileName, "r") as f:
for i, line in enumerate(f):
if (i // n) % shrink_size == 0:
if count >= max_frame*n:
break
count += 1
out.write(line)
def compute_theta_for_each_helix(output="angles.csv", dumpName="../dump.lammpstrj.0"):
print("This is for 2xov only")
helices_list = [(94,114), (147,168), (171, 192), (200, 217), (226, 241), (250, 269)]
atoms_all_frames = read_lammps(dumpName)
# print(atoms[0])
# print(len(atoms), len(atoms[0]))
# helices_angles_all_frames = []
with open(output, "w") as out:
out.write("Frame, Helix, Angle\n")
for ii, frame in enumerate(atoms_all_frames):
# helices_angles = []
for count, (i, j) in enumerate(helices_list):
# print(i, j)
i = i-91
j = j-91
# end - start
a = np.array(frame[j]) - np.array(frame[i])
b = np.array([0, 0, 1])
angle = a[2]/length(a) # in form of cos theta
# helices_angles.append(angle)
# print(angle)
out.write("{}, {}, {}\n".format(ii, count+1, angle))
# helices_angles_all_frames.append(helices_angles)
def structure_prediction_run(protein):
print(protein)
protocol_list = ["awsemer", "frag", "er"]
do = os.system
cd = os.chdir
cd(protein)
# run = "frag"
for protocol in protocol_list:
do("rm -r " + protocol)
do("mkdir -p " + protocol)
do("cp -r {} {}/".format(protein, protocol))
cd(protocol)
cd(protein)
# do("cp ~/opt/gremlin/protein/{}/gremlin/go_rnativeC* .".format(protein))
do("cp ~/opt/gremlin/protein/{}/raptor/go_rnativeC* .".format(protein))
fileName = protein + "_multi.in"
backboneFile = "fix_backbone_coeff_" + protocol
with fileinput.FileInput(fileName, inplace=True, backup='.bak') as file:
for line in file:
tmp = line.replace("fix_backbone_coeff_er", backboneFile)
print(tmp, end='')
cd("..")
do("run.py -m 0 -n 20 {}".format(protein))
cd("..")
cd("..")
# do("")
def check_and_correct_fragment_memory(fragFile="fragsLAMW.mem"):
with open("tmp.mem", "w") as out:
with open(fragFile, "r") as f:
for i in range(4):
line = next(f)
out.write(line)
for line in f:
gro, _, i, n, _ = line.split()
delete = False
# print(gro, i, n)
# name = gro.split("/")[-1]
with open(gro, "r") as one:
next(one)
next(one)
all_residues = set()
for atom in one:
residue, *_ = atom.split()
# print(residue)
all_residues.add(int(residue))
for test in range(int(i), int(i)+int(n)):
if test not in all_residues:
print("ATTENTION", gro, i, n, "missing:",test)
delete = True
if not delete:
out.write(line)
os.system(f"mv {fragFile} fragsLAMW_back")
os.system(f"mv tmp.mem {fragFile}")
def read_complete_temper_2(n=4, location=".", rerun=-1, qnqc=False, average_z=False, localQ=False, disReal=False, dis_h56=False, goEnergy=False, goEnergy3H=False, goEnergy4H=False):
all_data_list = []
for i in range(n):
file = "lipid.{}.dat".format(i)
lipid = pd.read_csv(location+file)
lipid.columns = lipid.columns.str.strip()
remove_columns = ['Steps']
lipid = lipid.drop(remove_columns, axis=1)
file = "rgs.{}.dat".format(i)
rgs = pd.read_csv(location+file)
rgs.columns = rgs.columns.str.strip()
remove_columns = ['Steps']
rgs = rgs.drop(remove_columns, axis=1)
file = "energy.{}.dat".format(i)
energy = pd.read_csv(location+file)
energy.columns = energy.columns.str.strip()
energy = energy[["AMH-Go", "Membrane", "Rg"]]
file = "addforce.{}.dat".format(i)
dis = pd.read_csv(location+file)
dis.columns = dis.columns.str.strip()
remove_columns = ['Steps', 'AddedForce', 'Dis12', 'Dis34', 'Dis56']
dis.drop(remove_columns, axis=1,inplace=True)
file = "wham.{}.dat".format(i)
wham = pd.read_csv(location+file).assign(Run=i)
wham.columns = wham.columns.str.strip()
remove_columns = ['Rg', 'Tc']
wham = wham.drop(remove_columns, axis=1)
if qnqc:
qc = pd.read_table(location+f"qc_{i}", names=["qc"])[1:].reset_index(drop=True)
qn = pd.read_table(location+f"qn_{i}", names=["qn"])[1:].reset_index(drop=True)
qc2 = pd.read_table(location+f"qc2_{i}", names=["qc2"])[1:].reset_index(drop=True)
wham = pd.concat([wham, qn, qc, qc2],axis=1)
# if average_z:
# z = pd.read_table(location+f"z_{i}.dat", names=["AverageZ"])[1:].reset_index(drop=True)
# wham = pd.concat([wham, z],axis=1)
if disReal:
tmp = pd.read_csv(location+f"distance_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
wham = pd.concat([wham, tmp],axis=1)
if dis_h56:
tmp = pd.read_csv(location+f"distance_h56_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
tmp1 = pd.read_csv(location+f"distance_h12_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
tmp2 = pd.read_csv(location+f"distance_h34_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
tmp1.columns = tmp1.columns.str.strip()
tmp2.columns = tmp2.columns.str.strip()
wham = pd.concat([wham, tmp, tmp1, tmp2],axis=1)
if average_z:
z = pd.read_csv(location+f"z_complete_{i}.dat")[1:].reset_index(drop=True)
z.columns = z.columns.str.strip()
wham = pd.concat([wham, z],axis=1)
if localQ:
all_localQ = pd.read_csv(location+f"localQ.{i}.csv")[1:].reset_index(drop=True)
wham = pd.concat([wham, all_localQ], axis=1)
if goEnergy:
tmp = pd.read_csv(location+f"Go_{i}/goEnergy.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
wham = pd.concat([wham, tmp],axis=1)
if goEnergy3H:
nEnergy = pd.read_csv(location+f"Go_3helix_{i}/goEnergy.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
nEnergy.columns = nEnergy.columns.str.strip()
wham = pd.concat([wham, nEnergy],axis=1)
if goEnergy4H:
nEnergy = pd.read_csv(location+f"Go_4helix_{i}/goEnergy.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
nEnergy.columns = nEnergy.columns.str.strip()
wham = pd.concat([wham, nEnergy],axis=1)
data = pd.concat([wham, dis, energy, rgs, lipid], axis=1)
# lipid = lipid[["Steps","Lipid","Run"]]
all_data_list.append(data)
data = pd.concat(all_data_list)
file = f"../log{rerun}/log.lammps"
temper = pd.read_table(location+file, skiprows=2, sep=' ')
temper = temper.melt(id_vars=['Step'], value_vars=['T' + str(i) for i in range(n)], value_name="Temp", var_name="Run")
temper["Run"] = temper["Run"].str[1:].astype(int)
temper["Temp"] = "T" + temper["Temp"].astype(str)
# print(temper)
# print(wham)
t2 = temper.merge(data, how='inner', left_on=["Step", "Run"], right_on=["Steps", "Run"]).sort_values('Step').drop('Steps', axis=1)
# print(t2)
t3 = t2.assign(TotalE=t2.Energy + t2.Lipid)
return t3.sort_values(["Step", "Run"]).reset_index(drop=True)
def process_complete_temper_data_3(pre, data_folder, folder_list, rerun=-1, end=-1, n=12, bias="dis", qnqc=False, average_z=False, disReal=False, dis_h56=False, localQ=False, goEnergy=False, goEnergy3H=False, goEnergy4H=False, label=""):
print("process temp data")
dateAndTime = datetime.today().strftime('%d_%h_%H%M%S')
for folder in folder_list:
simulation_list = glob.glob(pre+folder+f"/simulation/{bias}_*")
print(pre+folder+f"/simulation/{bias}_*")
os.system("mkdir -p " + pre+folder+"/data")
# this one only consider rerun >=0, for the case rerun=-1, move log.lammps to log0
for i in range(rerun, end, -1):
all_data_list = []
for one_simulation in simulation_list:
bias_num = one_simulation.split("_")[-1]
print(bias_num, "!")
location = one_simulation + f"/{i}/"
print(location)
data = read_complete_temper_2(location=location, n=n, rerun=i, qnqc=qnqc, average_z=average_z, localQ=localQ, disReal=disReal, dis_h56=dis_h56, goEnergy=goEnergy, goEnergy3H=goEnergy3H, goEnergy4H=goEnergy4H)
print(data.shape)
# remove_columns = ['Step', "Run"]
# data = data.drop(remove_columns, axis=1)
all_data_list.append(data.assign(BiasTo=bias_num))
data = pd.concat(all_data_list).reset_index(drop=True)
# if localQ:
# print("hi")
# else:
# data.to_csv(os.path.join(pre, folder, f"data/rerun_{i}.csv"))
# complete_data_list.append(data)
# temps = list(dic.keys())
# complete_data = pd.concat(complete_data_list)
name = f"rerun_{2*i}_{dateAndTime}.feather"
data = data.reset_index(drop=True)
data.query(f'Step > {2*i}e7 & Step <= {2*i+1}e7').reset_index(drop=True).to_feather(pre+folder+"/" + name)
os.system("cp "+pre+folder+"/" + name + " "+data_folder+label+name)
name = f"rerun_{2*i+1}_{dateAndTime}.feather"
data = data.reset_index(drop=True)
data.query(f'Step > {2*i+1}e7 & Step <= {2*i+2}e7').reset_index(drop=True).to_feather(pre+folder+"/" + name)
os.system("cp "+pre+folder+"/" + name + " "+data_folder+label+name)
def move_data4(data_folder, freeEnergy_folder, folder_list, temp_dict_mode=1, sub_mode_name="", kmem=0.2, klipid=0.1, kgo=0.1, krg=0.2, sample_range_mode=0, biasName="dis", qnqc=False, average_z=0, chosen_mode=0):
print("move data")
# dic = {"T_defined":300, "T0":350, "T1":400, "T2":450, "T3":500, "T4":550, "T5":600, "T6":650, "T7":700, "T8":750, "T9":800, "T10":900, "T11":1000}
if temp_dict_mode == 1:
dic = {"T0":280, "T1":300, "T2":325, "T3":350, "T4":375, "T5":400, "T6":450, "T7":500, "T8":550, "T9":600, "T10":650, "T11":700}
if temp_dict_mode == 2:
dic = {"T0":280, "T1":290, "T2":300, "T3":315, "T4":335, "T5":355, "T6":380, "T7":410, "T8":440, "T9":470, "T10":500, "T11":530}
if temp_dict_mode == 3:
dic = {"T0":280, "T1":290, "T2":300, "T3":310, "T4":320, "T5":335, "T6":350, "T7":365, "T8":380, "T9":410, "T10":440, "T11":470}
if temp_dict_mode == 4:
dic = {"T0":300, "T1":335, "T2":373, "T3":417, "T4":465, "T5":519, "T6":579, "T7":645, "T8":720, "T9":803, "T10":896, "T11":1000}
# read in complete.feather
data_list = []
for folder in folder_list:
tmp = pd.read_feather(data_folder + folder +".feather")
data_list.append(tmp)
data = pd.concat(data_list)
os.system("mkdir -p "+freeEnergy_folder+"/"+sub_mode_name+f"/data_{sample_range_mode}")
for bias, oneBias in data.groupby("BiasTo"):
for tempSymbol, oneTempAndBias in oneBias.groupby("Temp"):
temp = dic[tempSymbol]
if float(temp) > 800:
continue
print(f"t_{temp}_{biasName}_{bias}.dat")
if sample_range_mode == 0:
queryCmd = 'Step > 0 & Step <= 1e7'
if sample_range_mode == 1:
queryCmd = 'Step > 1e7 & Step <= 2e7'
elif sample_range_mode == 2:
queryCmd ='Step > 2e7 & Step <= 3e7'
elif sample_range_mode == 3:
queryCmd ='Step > 3e7 & Step <= 4e7'
elif sample_range_mode == 4:
queryCmd ='Step > 4e7 & Step <= 5e7'
elif sample_range_mode == 5:
queryCmd ='Step > 5e7 & Step <= 6e7'
elif sample_range_mode == 6:
queryCmd ='Step > 6e7 & Step <= 7e7'
elif sample_range_mode == 7:
queryCmd ='Step > 7e7 & Step <= 8e7'
elif sample_range_mode == -1:
queryCmd ='Step > 4e7 & Step <= 6e7'
if sample_range_mode == -2:
tmp = oneTempAndBias.reset_index(drop=True)
else:
tmp = oneTempAndBias.query(queryCmd).reset_index()
if average_z < 5:
chosen_list = ["TotalE", "Qw", "Distance"]
elif average_z == 5:
chosen_list = ["TotalE", "Qw", "DisReal"]
chosen_list += ["z_h6"]
if average_z == 1:
chosen_list += ["abs_z_average"]
if average_z == 2 or average_z == 3:
chosen_list += ["z_h6"]
if average_z == 3:
chosen_list += ["DisReal"]
if average_z == 4:
tmp["z_h5_and_h6"] = tmp["z_h5"] + tmp["z_h6"]
chosen_list += ["z_h5_and_h6"]
chosen_list += ["DisReal"]
if average_z == 6:
chosen_list = ["TotalE", "Qw", "DisReal"]
tmp["z_h5_and_h6"] = tmp["z_h5"] + tmp["z_h6"]
chosen_list += ["z_h5_and_h6"]
chosen_list += ["z_h5"]
chosen_list += ["z_h6"]
chosen_list += ["Dis_h56"]
if average_z == 7:
chosen_list = ["TotalE", "Qw", "DisReal"]
tmp["z_h56"] = tmp["z_h5"] + tmp["z_h6"]
tmp["z_h14"] = tmp["z_h1"] + tmp["z_h2"] + tmp["z_h3"] + tmp["z_h4"]
chosen_list += ["z_h14"]
chosen_list += ["z_h56"]
chosen_list += ["z_h5"]
chosen_list += ["z_h6"]
chosen_list += ["Dis_h12"]
chosen_list += ["Dis_h34"]
chosen_list += ["Dis_h56"]
if chosen_mode == 0:
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_mem_p=tmp.TotalE + kmem*tmp.Membrane,
TotalE_perturb_mem_m=tmp.TotalE - kmem*tmp.Membrane,
TotalE_perturb_lipid_p=tmp.TotalE + klipid*tmp.Lipid,
TotalE_perturb_lipid_m=tmp.TotalE - klipid*tmp.Lipid,
TotalE_perturb_go_p=tmp.TotalE + kgo*tmp["AMH-Go"],
TotalE_perturb_go_m=tmp.TotalE - kgo*tmp["AMH-Go"],
TotalE_perturb_rg_p=tmp.TotalE + krg*tmp.Rg,
TotalE_perturb_rg_m=tmp.TotalE - krg*tmp.Rg)
if chosen_mode == 1:
chosen_list += ["Res" + str(i+1) for i in range(181)]
chosen = tmp[chosen_list]
if chosen_mode == 2:
chosen_list += ["Res" + str(i+1) for i in range(181)]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_go_m=tmp.TotalE - kgo*tmp["AMH-Go"],
TotalE_perturb_go_p=tmp.TotalE + kgo*tmp["AMH-Go"],
TotalE_perturb_lipid_m=tmp.TotalE - klipid*tmp.Lipid,
TotalE_perturb_lipid_p=tmp.TotalE + klipid*tmp.Lipid,
TotalE_perturb_mem_m=tmp.TotalE - kmem*tmp.Membrane,
TotalE_perturb_mem_p=tmp.TotalE + kmem*tmp.Membrane,
TotalE_perturb_rg_m=tmp.TotalE - krg*tmp.Rg,
TotalE_perturb_rg_p=tmp.TotalE + krg*tmp.Rg)
# print(tmp.count())
if chosen_mode == 3:
chosen_list += ["AMH-Go", "Lipid", "Membrane", "Rg"]
chosen = tmp[chosen_list]
if chosen_mode == 4:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
if chosen_mode == 5:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_go_m=tmp.TotalE/10,
TotalE_perturb_go_p=0,
Go=tmp["AMH-Go"])
if chosen_mode == 6:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH,
TotalE_4=tmp.TotalE + tmp.AMH,
TotalE_5=tmp.AMH)
if chosen_mode == 7:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_3H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_3H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_3H,
TotalE_4=tmp.TotalE + tmp.AMH_3H,
TotalE_5=tmp.TotalE + 0.1*tmp.AMH,
TotalE_6=tmp.TotalE + 0.2*tmp.AMH)
if chosen_mode == 8:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_4H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_4H,
TotalE_4=tmp.TotalE + 0.1*tmp.AMH_3H,
TotalE_5=tmp.TotalE + 0.2*tmp.AMH_3H,
TotalE_6=tmp.TotalE + 0.5*tmp.AMH_3H)
if chosen_mode == 9:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_4H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_4H)
chosen = chosen.assign(TotalE_perturb_1go_m=chosen.TotalE_2 - kgo*tmp["AMH-Go"],
TotalE_perturb_1go_p=chosen.TotalE_2 + kgo*tmp["AMH-Go"],
TotalE_perturb_2lipid_m=chosen.TotalE_2 - tmp.Lipid,
TotalE_perturb_2lipid_p=chosen.TotalE_2 + tmp.Lipid,
TotalE_perturb_3mem_m=chosen.TotalE_2 - tmp.Membrane,
TotalE_perturb_3mem_p=chosen.TotalE_2 + tmp.Membrane,
TotalE_perturb_4rg_m=chosen.TotalE_2 - tmp.Rg,
TotalE_perturb_4rg_p=chosen.TotalE_2 + tmp.Rg,
TotalE_perturb_5go=tmp["AMH-Go"],
TotalE_perturb_5lipid=tmp.Lipid,
TotalE_perturb_5mem=tmp.Membrane,
TotalE_perturb_5rg=tmp.Rg)
if chosen_mode == 10:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_4H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_4H)
chosen = chosen.assign(TotalE_perturb_1lipid_m1=chosen.TotalE_2 - 0.1*tmp.Lipid,
TotalE_perturb_1lipid_p1=chosen.TotalE_2 + 0.1*tmp.Lipid,
TotalE_perturb_2lipid_m2=chosen.TotalE_2 - 0.2*tmp.Lipid,
TotalE_perturb_2lipid_p2=chosen.TotalE_2 + 0.2*tmp.Lipid,
TotalE_perturb_3lipid_m3=chosen.TotalE_2 - 0.3*tmp.Lipid,
TotalE_perturb_3lipid_p3=chosen.TotalE_2 + 0.3*tmp.Lipid,
TotalE_perturb_4lipid_m4=chosen.TotalE_2 - 0.5*tmp.Lipid,
TotalE_perturb_4lipid_p4=chosen.TotalE_2 + 0.5*tmp.Lipid,
TotalE_perturb_5go=tmp["AMH-Go"],
TotalE_perturb_5lipid=tmp.Lipid,
TotalE_perturb_5mem=tmp.Membrane,
TotalE_perturb_5rg=tmp.Rg)
if chosen_mode == 11:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 1.1*0.1*tmp.AMH_4H + 0.1*tmp["AMH-Go"],
TotalE_2=tmp.TotalE + 1.1*0.2*tmp.AMH_4H + 0.1*tmp["AMH-Go"],
TotalE_3=tmp.TotalE + 1.1*0.5*tmp.AMH_4H + 0.1*tmp["AMH-Go"])
chosen = chosen.assign(TotalE_perturb_1lipid_m1=chosen.TotalE_2 - 0.1*tmp.Lipid,
TotalE_perturb_1lipid_p1=chosen.TotalE_2 + 0.1*tmp.Lipid,
TotalE_perturb_2lipid_m2=chosen.TotalE_2 - 0.2*tmp.Lipid,
TotalE_perturb_2lipid_p2=chosen.TotalE_2 + 0.2*tmp.Lipid,
TotalE_perturb_3lipid_m3=chosen.TotalE_2 - 0.1*tmp.Membrane,
TotalE_perturb_3lipid_p3=chosen.TotalE_2 + 0.1*tmp.Membrane,
TotalE_perturb_4lipid_m4=chosen.TotalE_2 - 0.2*tmp.Membrane,
TotalE_perturb_4lipid_p4=chosen.TotalE_2 + 0.2*tmp.Membrane,
TotalE_perturb_5go=tmp["AMH-Go"],
TotalE_perturb_5lipid=tmp.Lipid,
TotalE_perturb_5mem=tmp.Membrane,
TotalE_perturb_5rg=tmp.Rg)
if chosen_mode == 12:
chosen = tmp[chosen_list]
# chosen["z_h56"] = (chosen["z_h5"] + chosen["z_h6"])/2
chosen = chosen.assign(TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
z_h56=(tmp.z_h5 + tmp.z_h6)/2)
if chosen_mode == 13:
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
# chosen["z_h56"] = (chosen["z_h5"] + chosen["z_h6"])/2
force = 0.1
chosen = chosen.assign(TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H - (tmp.DisReal - 25.1)*force,
TotalE_3=tmp.TotalE - (tmp.DisReal - 25.1)*force,
TotalE_4=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_5=tmp.TotalE + 0.2*tmp.AMH_4H - (tmp.DisReal)*force)
chosen.to_csv(freeEnergy_folder+"/"+sub_mode_name+f"/data_{sample_range_mode}/t_{temp}_{biasName}_{bias}.dat", sep=' ', index=False, header=False)
# perturbation_table = {0:"original", 1:"m_go",
# 2:"p_go", 3:"m_lipid",
# 4:"p_lipid", 5:"m_mem",
# 6:"p_mem", 7:"m_rg", 8:"p_rg"}
def compute_average_z(dumpFile, outFile):
# input dump, output z.dat
z_list = []
with open(outFile, "w") as f:
a = read_lammps(dumpFile)
for atoms in a:
b = np.array(atoms)
z = b.mean(axis=0)[2]
z_list.append(z)
f.write(str(z)+"\n")
def compute_average_z_2(dumpFile, outFile):
# input dump, output z.dat
helices_list = [(94,114), (147,168), (171, 192), (200, 217), (226, 241), (250, 269)]
with open(outFile, "w") as f:
a = read_lammps(dumpFile)
f.write("z_average, abs_z_average, z_h1, z_h2, z_h3, z_h4, z_h5, z_h6\n")
for atoms in a:
b = np.array(atoms)
z = b.mean(axis=0)[2]
f.write(str(z)+ ", ")
z = np.abs(b).mean(axis=0)[2]
f.write(str(z)+ ", ")
for count, (i,j) in enumerate(helices_list):
i = i - 91
j = j - 91
z = np.mean(b[i:j], axis=0)[2]
if count == 5:
f.write(str(z))
else:
f.write(str(z)+ ", ")
f.write("\n")
def read_simulation_2(location=".", i=-1, qnqc=False, average_z=False, localQ=False, disReal=False, **kwargs):
file = "lipid.dat"
lipid = pd.read_csv(location+file)
lipid.columns = lipid.columns.str.strip()
remove_columns = ['Steps']
lipid = lipid.drop(remove_columns, axis=1)
file = "rgs.dat"
rgs = pd.read_csv(location+file)
rgs.columns = rgs.columns.str.strip()
remove_columns = ['Steps']
rgs = rgs.drop(remove_columns, axis=1)
file = "energy.dat"
energy = pd.read_csv(location+file)
energy.columns = energy.columns.str.strip()
energy = energy[["AMH-Go", "Membrane", "Rg"]]
file = "addforce.dat"
dis = pd.read_csv(location+file)
dis.columns = dis.columns.str.strip()
remove_columns = ['Steps', 'AddedForce', 'Dis12', 'Dis34', 'Dis56']
dis.drop(remove_columns, axis=1,inplace=True)
file = "wham.dat"
wham = pd.read_csv(location+file).assign(Run=i)
wham.columns = wham.columns.str.strip()
remove_columns = ['Rg', 'Tc']
wham = wham.drop(remove_columns, axis=1)
if qnqc:
qc = pd.read_table(location+f"qc", names=["qc"])[1:].reset_index(drop=True)
qn = | pd.read_table(location+f"qn", names=["qn"]) | pandas.read_table |
"""General utility functions that are used in a variety of contexts.
The functions in this module are used in various stages of the ETL and post-etl
processes. They are usually not dataset specific, but not always. If a function
is designed to be used as a general purpose tool, applicable in multiple
scenarios, it should probably live here. There are lost of transform type
functions in here that help with cleaning and restructing dataframes.
"""
import itertools
import logging
import pathlib
import re
import shutil
from collections import defaultdict
from functools import partial
from importlib import resources
from io import BytesIO
from typing import Any, DefaultDict, Dict, List, Optional, Set, Union
import addfips
import numpy as np
import pandas as pd
import requests
import sqlalchemy as sa
from pudl.metadata.classes import DataSource, Package
from pudl.metadata.fields import apply_pudl_dtypes, get_pudl_dtypes
logger = logging.getLogger(__name__)
sum_na = partial(pd.Series.sum, skipna=False)
"""A sum function that returns NA if the Series includes any NA values.
In many of our aggregations we need to override the default behavior of treating
NA values as if they were zero. E.g. when calculating the heat rates of
generation units, if there are some months where fuel consumption is reported
as NA, but electricity generation is reported normally, then the fuel
consumption for the year needs to be NA, otherwise we'll get unrealistic heat
rates.
"""
def label_map(
df: pd.DataFrame,
from_col: str = "code",
to_col: str = "label",
null_value: Union[str, type(pd.NA)] = pd.NA,
) -> DefaultDict[str, Union[str, type(pd.NA)]]:
"""Build a mapping dictionary from two columns of a labeling / coding dataframe.
These dataframes document the meanings of the codes that show up in much of the
originally reported data. They're defined in :mod:`pudl.metadata.codes`. This
function is mostly used to build maps that can translate the hard to understand
short codes into longer human-readable codes.
Args:
df: The coding / labeling dataframe. Must contain columns ``from_col``
and ``to_col``.
from_col: Label of column containing the existing codes to be replaced.
to_col: Label of column containing the new codes to be swapped in.
null_value: Defualt (Null) value to map to when a value which doesn't
appear in ``from_col`` is encountered.
Returns:
A mapping dictionary suitable for use with :meth:`pandas.Series.map`.
"""
return defaultdict(
lambda: null_value,
df.loc[:, [from_col, to_col]]
.drop_duplicates(subset=[from_col])
.to_records(index=False),
)
def find_new_ferc1_strings(
table: str,
field: str,
strdict: Dict[str, List[str]],
ferc1_engine: sa.engine.Engine,
) -> Set[str]:
"""Identify as-of-yet uncategorized freeform strings in FERC Form 1.
Args:
table: Name of the FERC Form 1 DB to search.
field: Name of the column in that table to search.
strdict: A string cleaning dictionary. See
e.g. `pudl.transform.ferc1.FUEL_UNIT_STRINGS`
ferc1_engine: SQL Alchemy DB connection engine for the FERC Form 1 DB.
Returns:
Any string found in the searched table + field that was not part of any of
categories enumerated in strdict.
"""
all_strings = set(
pd.read_sql(f"SELECT {field} FROM {table};", ferc1_engine).pipe( # nosec
simplify_strings, columns=[field]
)[field]
)
old_strings = set.union(*[set(strings) for strings in strdict.values()])
return all_strings.difference(old_strings)
def find_foreign_key_errors(dfs: Dict[str, pd.DataFrame]) -> List[Dict[str, Any]]:
"""Report foreign key violations from a dictionary of dataframes.
The database schema to check against is generated based on the names of the
dataframes (keys of the dictionary) and the PUDL metadata structures.
Args:
dfs: Keys are table names, and values are dataframes ready for loading
into the SQLite database.
Returns:
A list of dictionaries, each one pertains to a single database table
in which a foreign key constraint violation was found, and it includes
the table name, foreign key definition, and the elements of the
dataframe that violated the foreign key constraint.
"""
package = Package.from_resource_ids(resource_ids=tuple(sorted(dfs)))
errors = []
for resource in package.resources:
for foreign_key in resource.schema.foreign_keys:
x = dfs[resource.name][foreign_key.fields]
y = dfs[foreign_key.reference.resource][foreign_key.reference.fields]
ncols = x.shape[1]
idx = range(ncols)
xx, yy = x.set_axis(idx, axis=1), y.set_axis(idx, axis=1)
if ncols == 1:
# Faster check for single-field foreign key
invalid = ~(xx[0].isin(yy[0]) | xx[0].isna())
else:
invalid = ~(
pd.concat([yy, xx]).duplicated().iloc[len(yy) :]
| xx.isna().any(axis=1)
)
if invalid.any():
errors.append(
{
"resource": resource.name,
"foreign_key": foreign_key,
"invalid": x[invalid],
}
)
return errors
def download_zip_url(url, save_path, chunk_size=128):
"""Download and save a Zipfile locally.
Useful for acquiring and storing non-PUDL data locally.
Args:
url (str): The URL from which to download the Zipfile
save_path (pathlib.Path): The location to save the file.
chunk_size (int): Data chunk in bytes to use while downloading.
Returns:
None
"""
# This is a temporary hack to avoid being filtered as a bot:
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"DNT": "1",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
}
r = requests.get(url, stream=True, headers=headers)
with save_path.open(mode="wb") as fd:
for chunk in r.iter_content(chunk_size=chunk_size):
fd.write(chunk)
def add_fips_ids(df, state_col="state", county_col="county", vintage=2015):
"""Add State and County FIPS IDs to a dataframe.
To just add State FIPS IDs, make county_col = None.
"""
# force the columns to be the nullable string types so we have a consistent
# null value to filter out before feeding to addfips
df = df.astype({state_col: pd.StringDtype()})
if county_col:
df = df.astype({county_col: pd.StringDtype()})
af = addfips.AddFIPS(vintage=vintage)
# Lookup the state and county FIPS IDs and add them to the dataframe:
df["state_id_fips"] = df.apply(
lambda x: (
af.get_state_fips(state=x[state_col]) if pd.notnull(x[state_col]) else pd.NA
),
axis=1,
)
# force the code columns to be nullable strings - the leading zeros are
# important
df = df.astype({"state_id_fips": | pd.StringDtype() | pandas.StringDtype |
import os
from nose.tools import *
import unittest
import pandas as pd
import numpy as np
import py_entitymatching as em
from py_entitymatching.utils.generic_helper import get_install_path
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.utils.catalog_helper as ch
from py_entitymatching.io.parsers import read_csv_metadata
#import sys
#sys.path.insert(0, '../debugblocker')
#import debugblocker as db
import py_entitymatching.debugblocker.debugblocker as db
from operator import itemgetter
from array import array
datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets'])
catalog_datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets', 'catalog'])
debugblocker_datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets', 'debugblocker'])
path_a = os.sep.join([datasets_path, 'A.csv'])
path_b = os.sep.join([datasets_path, 'B.csv'])
path_c = os.sep.join([datasets_path, 'C.csv'])
class DebugblockerTestCases(unittest.TestCase):
def test_validate_types_1(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable_ID',
fk_rtable='rtable_ID', key = '_id')
A_key = em.get_key(A)
B_key = em.get_key(B)
attr_corres = None
db._validate_types(A, B, C, 100, attr_corres, False)
def test_validate_types_2(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable_' +
A_key, fk_rtable='rtable_' + B_key, key = '_id')
attr_corres = [('ID', 'ID'), ('name', 'name'),
('birth_year', 'birth_year'),
('hourly_wage', 'hourly_wage'),
('address', 'address'),
('zipcode', 'zipcode')]
db._validate_types(A, B, C, 100, attr_corres, False)
def test_check_input_field_correspondence_list_1(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
field_corres_list = None
db._check_input_field_correspondence_list(A, B, field_corres_list)
def test_check_input_field_correspondence_list_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
field_corres_list = []
db._check_input_field_correspondence_list(A, B, field_corres_list)
@raises(AssertionError)
def test_check_input_field_correspondence_list_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
field_corres_list = [('adsf', 'fdsa'), 'asdf']
db._check_input_field_correspondence_list(A, B, field_corres_list)
@raises(AssertionError)
def test_check_input_field_correspondence_list_4(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
field_corres_list = [('asdf', 'fdsa')]
db._check_input_field_correspondence_list(A, B, field_corres_list)
@raises(AssertionError)
def test_check_input_field_correspondence_list_5(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
field_corres_list = [('address', 'fdsa')]
db._check_input_field_correspondence_list(A, B, field_corres_list)
def test_check_input_field_correspondence_list_7(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
field_corres_list = [('zipcode', 'zipcode'),
('birth_year', 'birth_year')]
db._check_input_field_correspondence_list(A, B, field_corres_list)
def test_get_field_correspondence_list_1(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
expected_list = [('ID', 'ID'), ('name', 'name'),
('birth_year', 'birth_year'),
('hourly_wage', 'hourly_wage'),
('address', 'address'),
('zipcode', 'zipcode')]
attr_corres = None
corres_list = db._get_field_correspondence_list(
A, B, A_key, B_key, attr_corres)
self.assertEqual(corres_list, expected_list)
attr_corres = []
corres_list = db._get_field_correspondence_list(
A, B, A_key, B_key, attr_corres)
self.assertEqual(corres_list, expected_list)
def test_get_field_correspondence_list_2(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
expected_list = [('ID', 'ID'), ('name', 'name'),
('address', 'address'),
('zipcode', 'zipcode')]
attr_corres = [('ID', 'ID'), ('name', 'name'),
('address', 'address'),
('zipcode', 'zipcode')]
corres_list = db._get_field_correspondence_list(
A, B, A_key, B_key, attr_corres)
self.assertEqual(corres_list, expected_list)
def test_get_field_correspondence_list_3(self):
data = [[1, 'asdf', 'a0001']]
A = pd.DataFrame(data)
A.columns = ['Id', 'Title', 'ISBN']
A_key = 'Id'
B = pd.DataFrame(data)
B.columns = ['Id', 'title', 'ISBN']
B_key = 'Id'
attr_corres = []
corres_list = db._get_field_correspondence_list(
A, B, A_key, B_key, attr_corres)
expected_list = [('Id', 'Id'), ('ISBN', 'ISBN')]
self.assertEqual(corres_list, expected_list)
@raises(AssertionError)
def test_get_field_correspondence_list_4(self):
data = [[1, 'asdf', 'a0001']]
A = pd.DataFrame(data)
A.columns = ['ID', 'Title', 'isbn']
A_key = 'ID'
B = pd.DataFrame(data)
B.columns = ['Id', 'title', 'ISBN']
B_key = 'Id'
attr_corres = []
db._get_field_correspondence_list(
A, B, A_key, B_key, attr_corres)
def test_get_field_correspondence_list_5(self):
A = pd.DataFrame([[0, 'A', 0.11, 'ASDF']])
A.columns = ['ID', 'name', 'price', 'desc']
em.set_key(A, 'ID')
A_key = em.get_key(A)
B = pd.DataFrame([['B', 'B001', 'ASDF', 0.111]])
B.columns = ['item_name', 'item_id', 'item_desc', 'item_price']
em.set_key(B, 'item_id')
B_key = em.get_key(B)
attr_corres = [('name', 'item_name'),
('price', 'item_price')]
actual_attr_corres = db._get_field_correspondence_list(
A, B, A_key, B_key, attr_corres)
expected_attr_corres = [('name', 'item_name'),
('price', 'item_price'),
('ID', 'item_id')]
self.assertEqual(expected_attr_corres, actual_attr_corres)
def test_build_col_name_index_dict_1(self):
A = pd.DataFrame([[]])
A.columns = []
col_index = db._build_col_name_index_dict(A)
def test_build_col_name_index_dict_2(self):
A = pd.DataFrame([[0, 'A', 0.11, 'ASDF']])
A.columns = ['ID', 'name', 'price', 'desc']
em.set_key(A, 'ID')
col_index = db._build_col_name_index_dict(A)
self.assertEqual(col_index['ID'], 0)
self.assertEqual(col_index['name'], 1)
self.assertEqual(col_index['price'], 2)
self.assertEqual(col_index['desc'], 3)
@raises(AssertionError)
def test_filter_corres_list_1(self):
A = pd.DataFrame([[0, 20, 0.11, 4576]])
A.columns = ['ID', 'age', 'price', 'zip code']
em.set_key(A, 'ID')
B = pd.DataFrame([[0, 240, 0.311, 4474]])
B.columns = ['ID', 'age', 'price', 'zip code']
em.set_key(A, 'ID')
A_key = 'ID'
B_key = 'ID'
ltable_col_dict = db._build_col_name_index_dict(A)
rtable_col_dict = db._build_col_name_index_dict(B)
attr_corres = [('ID', 'ID'), ('age', 'age'),
('price', 'price'),
('zip code', 'zip code')]
db._filter_corres_list(A, B, A_key, B_key, ltable_col_dict,
rtable_col_dict, attr_corres)
def test_filter_corres_list_2(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
ltable_col_dict = db._build_col_name_index_dict(A)
rtable_col_dict = db._build_col_name_index_dict(B)
attr_corres = [('ID', 'ID'), ('name', 'name'),
('birth_year', 'birth_year'),
('hourly_wage', 'hourly_wage'),
('address', 'address'),
('zipcode', 'zipcode')]
expected_filtered_attr = [('ID', 'ID'), ('name', 'name'),
('address', 'address')]
db._filter_corres_list(A, B, A_key, B_key, ltable_col_dict,
rtable_col_dict, attr_corres)
self.assertEqual(expected_filtered_attr, attr_corres)
def test_get_filtered_table(self):
A = pd.DataFrame([['a1', 'A', 0.11, 53704]])
A.columns = ['ID', 'name', 'price', 'zip code']
em.set_key(A, 'ID')
B = pd.DataFrame([['b1', 'A', 0.11, 54321]])
B.columns = ['ID', 'name', 'price', 'zip code']
em.set_key(B, 'ID')
A_key = 'ID'
B_key = 'ID'
ltable_col_dict = db._build_col_name_index_dict(A)
rtable_col_dict = db._build_col_name_index_dict(B)
attr_corres = [('ID', 'ID'), ('name', 'name'),
('price', 'price'),
('zip code', 'zip code')]
db._filter_corres_list(A, B, A_key, B_key, ltable_col_dict,
rtable_col_dict, attr_corres)
filtered_A, filtered_B = db._get_filtered_table(A, B, attr_corres)
expected_filtered_A = pd.DataFrame([['a1', 'A']])
expected_filtered_A.columns = ['ID', 'name']
em.set_key(expected_filtered_A, 'ID')
expected_filtered_B = pd.DataFrame([['b1', 'A']])
expected_filtered_B.columns = ['ID', 'name']
em.set_key(expected_filtered_B, 'ID')
self.assertEqual(expected_filtered_A.equals(filtered_A), True)
self.assertEqual(expected_filtered_B.equals(filtered_B), True)
@raises(AssertionError)
def test_get_feature_weight_1(self):
A = []
dataframe = | pd.DataFrame(A) | pandas.DataFrame |
import numpy as np
import pandas as pd
import altair as alt
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Plot a 3d
def Vis3d(X,Y,Z):
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, color='y')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
# Visualise the metrics from the model
def MetricsVis(history):
df = pd.DataFrame(history)
df.reset_index()
df["batch"] = df.index + 1
df = df.melt("batch", var_name="name")
df["val"] = df.name.str.startswith("val")
df["type"] = df["val"]
df["metrics"] = df["val"]
df.loc[df.val == False, "type"] = "training"
df.loc[df.val == True, "type"] = "validation"
df.loc[df.val == False, "metrics"] = df.name
df.loc[df.val == True, "metrics"] = df.name.str.split("val_", expand=True)[1]
df = df.drop(["name", "val"], axis=1)
base = alt.Chart().encode(
x = "batch:Q",
y = "value:Q",
color = "type"
).properties(width = 300, height = 300)
layers = base.mark_circle(size = 50).encode(tooltip = ["batch", "value"]) + base.mark_line()
chart = layers.facet(column='metrics:N', data=df).resolve_scale(y='independent')
return chart
def InteractionVis(df):
vis = alt.Chart(df).mark_rect().encode(
alt.X(field="ITEM", type="nominal",
axis=alt.Axis(orient="top", labelAngle=0)),
alt.Y(field="USER", type="nominal",
axis=alt.Axis(orient="left")),
alt.Color(field="RATING", type="quantitative",
scale=alt.Scale(type="bin-ordinal", scheme='yellowgreenblue', nice=True),
legend=alt.Legend(titleOrient='top', orient="bottom",
direction= "horizontal", tickCount=5))
).properties(
width= 180,
height=300
).configure_axis(
grid=False
)
return vis
def TrainTestVis(train, test):
df = pd.concat([train, test])
maptt = {0: "train", 1: "test"}
df["SPLIT"] = df.split_index.apply(lambda x: maptt[x])
df.head()
vis = alt.Chart(df).mark_rect().encode(
alt.X(field="ITEM", type="nominal",
axis=alt.Axis(orient="top", labelAngle=0)),
alt.Y(field="USER", type="nominal",
axis=alt.Axis(orient="left")),
alt.Color(field="SPLIT", type="ordinal",
scale=alt.Scale(type="ordinal", scheme="darkred", nice=True),
legend=alt.Legend(titleOrient='top', orient="bottom",
direction= "horizontal", tickCount=5)),
alt.Opacity(value=1)
).properties(
width= 180,
height=300
).configure_axis(
grid=False
)
return vis
def EmbeddingVis(embedding, n_factors, name):
embedding_df_wide = pd.DataFrame(embedding)
embedding_df_wide[name]= embedding_df_wide.index
embedding_df = pd.melt(embedding_df_wide, id_vars=[name], value_vars=np.arange(n_factors).tolist(),
var_name='dim', value_name='value')
dim = n_factors
if name == "ITEM":
vis = alt.Chart(embedding_df).mark_rect().encode(
alt.X(field=name, type="nominal", axis=alt.Axis(orient="top", labelAngle=0)),
alt.Y(field="dim", type="nominal", axis=alt.Axis(orient="left")),
alt.Color(field="value", type="quantitative",
scale=alt.Scale(type="bin-ordinal", scheme='yellowgreenblue', nice=True),
legend=alt.Legend(titleOrient='top', orient="bottom",
direction= "horizontal", tickCount=5))
).properties(
width=180,
height=30*dim
)
else:
vis = alt.Chart(embedding_df).mark_rect().encode(
alt.X(field="dim", type="nominal", axis=alt.Axis(orient="top", labelAngle=0)),
alt.Y(field=name, type="nominal", axis=alt.Axis(orient="left")),
alt.Color(field="value", type="quantitative",
scale=alt.Scale(type="bin-ordinal", scheme='yellowgreenblue', nice=True),
legend=alt.Legend(titleOrient='top', orient="bottom",
direction= "horizontal", tickCount=5))
).properties(
width=30*dim,
height=300
)
return vis
def SimilarityVis(item_embedding, user_embedding):
item_embedding_df_wide = | pd.DataFrame(item_embedding) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import itertools
from numpy import nan
import numpy as np
from pandas import (DataFrame, Series, Timestamp, date_range, compat,
option_context, Categorical)
from pandas.core.arrays import IntervalArray, integer_array
from pandas.compat import StringIO
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestDataFrameBlockInternals():
def test_cast_internals(self, float_frame):
casted = DataFrame(float_frame._data, dtype=int)
expected = DataFrame(float_frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(float_frame._data, dtype=np.int32)
expected = DataFrame(float_frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self, float_frame):
float_frame['E'] = 7.
consolidated = float_frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
float_frame['F'] = 8.
assert len(float_frame._data.blocks) == 3
float_frame._consolidate(inplace=True)
assert len(float_frame._data.blocks) == 1
def test_consolidate_inplace(self, float_frame):
frame = float_frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
float_frame[chr(letter)] = chr(letter)
def test_values_consolidate(self, float_frame):
float_frame['E'] = 7.
assert not float_frame._data.is_consolidated()
_ = float_frame.values # noqa
assert float_frame._data.is_consolidated()
def test_modify_values(self, float_frame):
float_frame.values[5] = 5
assert (float_frame.values[5] == 5).all()
# unconsolidated
float_frame['E'] = 7.
float_frame.values[6] = 6
assert (float_frame.values[6] == 6).all()
def test_boolean_set_uncons(self, float_frame):
float_frame['E'] = 7.
expected = float_frame.values.copy()
expected[expected > 1] = 2
float_frame[float_frame > 1] = 2
assert_almost_equal(expected, float_frame.values)
def test_values_numeric_cols(self, float_frame):
float_frame['foo'] = 'bar'
values = float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
# mixed lcd
values = mixed_float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_float_frame[['A', 'B', 'C']].values
assert values.dtype == np.float32
values = mixed_float_frame[['C']].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_int_frame[['A', 'D']].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C']].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = mixed_int_frame[['B', 'C']].values
assert values.dtype == np.uint64
values = mixed_int_frame[['A', 'C']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C', 'D']].values
assert values.dtype == np.int64
values = mixed_int_frame[['A']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C']].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A': [2 ** 63 - 1]})
result = df['A']
expected = Series(np.asarray([2 ** 63 - 1], np.int64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2 ** 63]})
result = df['A']
expected = Series(np.asarray([2 ** 63], np.uint64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [datetime(2005, 1, 1), True]})
result = df['A']
expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_),
name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [None, 1]})
result = df['A']
expected = Series(np.asarray([np.nan, 1], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, 2]})
result = df['A']
expected = Series(np.asarray([1.0, 2], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3.0]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, True]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, None]})
result = df['A']
expected = Series(np.asarray([1.0, np.nan], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, None]})
result = df['A']
expected = Series(np.asarray(
[1.0 + 2.0j, np.nan], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, True, None]})
result = df['A']
expected = Series(np.asarray(
[2.0, 1, True, None], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, datetime(2006, 1, 1), None]})
result = df['A']
expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_), name='A')
assert_series_equal(result, expected)
def test_construction_with_mixed(self, float_string_frame):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check dtypes
result = df.get_dtype_counts().sort_values()
expected = Series({'datetime64[ns]': 3})
# mixed-type frames
float_string_frame['datetime'] = datetime.now()
float_string_frame['timedelta'] = timedelta(days=1, seconds=1)
assert float_string_frame['datetime'].dtype == 'M8[ns]'
assert float_string_frame['timedelta'].dtype == 'm8[ns]'
result = float_string_frame.get_dtype_counts().sort_values()
expected = Series({'float64': 4,
'object': 1,
'datetime64[ns]': 1,
'timedelta64[ns]': 1}).sort_values()
assert_series_equal(result, expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype='timedelta64[s]')
df = DataFrame(index=range(3))
df['A'] = arr
expected = DataFrame({'A': pd.timedelta_range('00:00:01', periods=3,
freq='s')},
index=range(3))
assert_frame_equal(df, expected)
expected = DataFrame({
'dt1': Timestamp('20130101'),
'dt2': date_range('20130101', periods=3),
# 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
}, index=range(3))
df = DataFrame(index=range(3))
df['dt1'] = np.datetime64('2013-01-01')
df['dt2'] = np.array(['2013-01-01', '2013-01-02', '2013-01-03'],
dtype='datetime64[D]')
# df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(df, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
data = list(itertools.repeat((datetime(2001, 1, 1),
"aa", 20), 9))
return DataFrame(data=data,
columns=["A", "B", "C"],
dtype=dtype)
pytest.raises(NotImplementedError, f,
[("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
# 10822
# invalid error message on dt inference
if not compat.is_platform_windows():
f('M8[ns]')
def test_equals_different_blocks(self):
# GH 9330
df0 = pd.DataFrame({"A": ["x", "y"], "B": [1, 2],
"C": ["w", "z"]})
df1 = df0.reset_index()[["A", "B", "C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
assert (df0._data.blocks[0].dtype != df1._data.blocks[0].dtype)
# do the real tests
assert_frame_equal(df0, df1)
assert df0.equals(df1)
assert df1.equals(df0)
def test_copy_blocks(self, float_frame):
# API/ENH 9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the default copy=True, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks()
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did not change the original DataFrame
assert not _df[column].equals(df[column])
def test_no_copy_blocks(self, float_frame):
# API/ENH 9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the copy=False, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks(copy=False)
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did change the original DataFrame
assert _df[column].equals(df[column])
def test_copy(self, float_frame, float_string_frame):
cop = float_frame.copy()
cop['E'] = cop['A']
assert 'E' not in float_frame
# copy objects
copy = float_string_frame.copy()
assert copy._data is not float_string_frame._data
def test_pickle(self, float_string_frame, empty_frame, timezone_frame):
unpickled = tm.round_trip_pickle(float_string_frame)
assert_frame_equal(float_string_frame, unpickled)
# buglet
float_string_frame._data.ndim
# empty
unpickled = tm.round_trip_pickle(empty_frame)
repr(unpickled)
# tz frame
unpickled = tm.round_trip_pickle(timezone_frame)
assert_frame_equal(timezone_frame, unpickled)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
df = pd.read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = df.starting
ser_starting.index = ser_starting.values
ser_starting = ser_starting.tz_localize('US/Eastern')
ser_starting = ser_starting.tz_convert('UTC')
ser_starting.index.name = 'starting'
ser_ending = df.ending
ser_ending.index = ser_ending.values
ser_ending = ser_ending.tz_localize('US/Eastern')
ser_ending = ser_ending.tz_convert('UTC')
ser_ending.index.name = 'ending'
df.starting = ser_starting.index
df.ending = ser_ending.index
tm.assert_index_equal(pd.DatetimeIndex(
df.starting), ser_starting.index)
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
def test_is_mixed_type(self, float_frame, float_string_frame):
assert not float_frame._is_mixed_type
assert float_string_frame._is_mixed_type
def test_get_numeric_data(self):
# TODO(wesm): unused?
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'f': Timestamp('20010102')},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64': 1,
datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
assert_series_equal(result, expected)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'd': np.array([1.] * 10, dtype='float32'),
'e': np.array([1] * 10, dtype='int32'),
'f': np.array([1] * 10, dtype='int16'),
'g': Timestamp('20010102')},
index=np.arange(10))
result = df._get_numeric_data()
expected = df.loc[:, ['a', 'b', 'd', 'e', 'f']]
assert_frame_equal(result, expected)
only_obj = df.loc[:, ['c', 'g']]
result = only_obj._get_numeric_data()
expected = df.loc[:, []]
assert_frame_equal(result, expected)
df = DataFrame.from_dict(
{'a': [1, 2], 'b': ['foo', 'bar'], 'c': [np.pi, np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({'a': [1, 2], 'c': [np.pi, np.e]})
assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
assert_frame_equal(result, expected)
def test_get_numeric_data_extension_dtype(self):
# GH 22290
df = DataFrame({
'A': integer_array([-10, np.nan, 0, 10, 20, 30], dtype='Int64'),
'B': Categorical(list('abcabc')),
'C': integer_array([0, 1, 2, 3, np.nan, 5], dtype='UInt8'),
'D': IntervalArray.from_breaks(range(7))})
result = df._get_numeric_data()
expected = df.loc[:, ['A', 'C']]
assert_frame_equal(result, expected)
def test_convert_objects(self, float_string_frame):
oops = float_string_frame.T.T
converted = oops._convert(datetime=True)
assert_frame_equal(converted, float_string_frame)
assert converted['A'].dtype == np.float64
# force numeric conversion
float_string_frame['H'] = '1.'
float_string_frame['I'] = '1'
# add in some items that will be nan
length = len(float_string_frame)
float_string_frame['J'] = '1.'
float_string_frame['K'] = '1'
float_string_frame.loc[0:5, ['J', 'K']] = 'garbled'
converted = float_string_frame._convert(datetime=True, numeric=True)
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
assert converted['J'].dtype == 'float64'
assert converted['K'].dtype == 'float64'
assert len(converted['J'].dropna()) == length - 5
assert len(converted['K'].dropna()) == length - 5
# via astype
converted = float_string_frame.copy()
converted['H'] = converted['H'].astype('float64')
converted['I'] = converted['I'].astype('int64')
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
# via astype, but errors
converted = float_string_frame.copy()
with tm.assert_raises_regex(ValueError, 'invalid literal'):
converted['H'].astype('int32')
# mixed in a single column
df = DataFrame(dict(s=Series([1, 'na', 3, 4])))
result = df._convert(datetime=True, numeric=True)
expected = DataFrame(dict(s=Series([1, np.nan, 3, 4])))
assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
mixed1 = DataFrame(
{'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})
mixed2 = mixed1._convert(datetime=True)
assert_frame_equal(mixed1, mixed2)
def test_infer_objects(self):
# GH 11221
df = DataFrame({'a': ['a', 1, 2, 3],
'b': ['b', 2.0, 3.0, 4.1],
'c': ['c', datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [1, 2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
df = df.iloc[1:].infer_objects()
assert df['a'].dtype == 'int64'
assert df['b'].dtype == 'float64'
assert df['c'].dtype == 'M8[ns]'
assert df['d'].dtype == 'object'
expected = DataFrame({'a': [1, 2, 3],
'b': [2.0, 3.0, 4.1],
'c': [datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
# reconstruct frame to verify inference is same
tm.assert_frame_equal(df.reset_index(drop=True), expected)
def test_stale_cached_series_bug_473(self):
# this is chained, but ok
with option_context('chained_assignment', None):
Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),
columns=('e', 'f', 'g', 'h'))
repr(Y)
Y['e'] = Y['e'].astype('object')
Y['g']['c'] = np.NaN
repr(Y)
result = Y.sum() # noqa
exp = Y['g'].sum() # noqa
assert | pd.isna(Y['g']['c']) | pandas.isna |
"""
Use the ``MNLDiscreteChoiceModel`` class to train a choice module using
multinomial logit and make subsequent choice predictions.
"""
from __future__ import print_function, division
import abc
import logging
import numpy as np
import pandas as pd
from patsy import dmatrix
from prettytable import PrettyTable
from zbox import toolz as tz
from . import util
from ..exceptions import ModelEvaluationError
from ..urbanchoice import interaction, mnl
from ..utils import yamlio
from ..utils.logutil import log_start_finish
from urbansim_defaults.randomfile import fixedrandomseed,seednum
logger = logging.getLogger(__name__)
def unit_choice(chooser_ids, alternative_ids, probabilities):
"""
Have a set of choosers choose from among alternatives according
to a probability distribution. Choice is binary: each
alternative can only be chosen once.
Parameters
----------
chooser_ids : 1d array_like
Array of IDs of the agents that are making choices.
alternative_ids : 1d array_like
Array of IDs of alternatives among which agents are making choices.
probabilities : 1d array_like
The probability that an agent will choose an alternative.
Must be the same shape as `alternative_ids`. Unavailable
alternatives should have a probability of 0.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
"""
chooser_ids = np.asanyarray(chooser_ids)
alternative_ids = np.asanyarray(alternative_ids)
probabilities = np.asanyarray(probabilities)
logger.debug(
'start: unit choice with {} choosers and {} alternatives'.format(
len(chooser_ids), len(alternative_ids)))
choices = pd.Series(index=chooser_ids)
if probabilities.sum() == 0:
# return all nan if there are no available units
return choices
# probabilities need to sum to 1 for np.random.choice
probabilities = probabilities / probabilities.sum()
# need to see if there are as many available alternatives as choosers
n_available = np.count_nonzero(probabilities)
n_choosers = len(chooser_ids)
n_to_choose = n_choosers if n_choosers < n_available else n_available
if fixedrandomseed==0: np.random.seed(seednum)
chosen = np.random.choice(
alternative_ids, size=n_to_choose, replace=False, p=probabilities)
# if there are fewer available units than choosers we need to pick
# which choosers get a unit
if n_to_choose == n_available:
if fixedrandomseed==0: np.random.seed(seednum)
chooser_ids = np.random.choice(
chooser_ids, size=n_to_choose, replace=False)
choices[chooser_ids] = chosen
logger.debug('finish: unit choice')
return choices
# define the minimum interface a class must have in order to
# look like we expect DCMs to look
class DiscreteChoiceModel(object):
"""
Abstract base class for discrete choice models.
"""
__metaclass__ = abc.ABCMeta
@staticmethod
def _check_prob_choice_mode_compat(probability_mode, choice_mode):
"""
Check that the probability and choice modes are compatibly with
each other. Currently 'single_chooser' must be paired with
'aggregate' and 'full_product' must be paired with 'individual'.
"""
if (probability_mode == 'full_product' and
choice_mode == 'aggregate'):
raise ValueError(
"'full_product' probability mode is not compatible with "
"'aggregate' choice mode")
if (probability_mode == 'single_chooser' and
choice_mode == 'individual'):
raise ValueError(
"'single_chooser' probability mode is not compatible with "
"'individual' choice mode")
@staticmethod
def _check_prob_mode_interaction_compat(
probability_mode, interaction_predict_filters):
"""
The 'full_product' probability mode is currently incompatible with
post-interaction prediction filters, so make sure we don't have
both of those.
"""
if (interaction_predict_filters is not None and
probability_mode == 'full_product'):
raise ValueError(
"interaction filters may not be used in "
"'full_product' mode")
@abc.abstractmethod
def apply_fit_filters(self, choosers, alternatives):
choosers = util.apply_filter_query(choosers, self.choosers_fit_filters)
alternatives = util.apply_filter_query(
alternatives, self.alts_fit_filters)
return choosers, alternatives
@abc.abstractmethod
def apply_predict_filters(self, choosers, alternatives):
choosers = util.apply_filter_query(
choosers, self.choosers_predict_filters)
alternatives = util.apply_filter_query(
alternatives, self.alts_predict_filters)
return choosers, alternatives
@abc.abstractproperty
def fitted(self):
pass
@abc.abstractmethod
def probabilities(self):
pass
@abc.abstractmethod
def summed_probabilities(self):
pass
@abc.abstractmethod
def fit(self):
pass
@abc.abstractmethod
def predict(self):
pass
@abc.abstractmethod
def choosers_columns_used(self):
pass
@abc.abstractmethod
def alts_columns_used(self):
pass
@abc.abstractmethod
def interaction_columns_used(self):
pass
@abc.abstractmethod
def columns_used(self):
pass
class MNLDiscreteChoiceModel(DiscreteChoiceModel):
"""
A discrete choice model with the ability to store an estimated
model and predict new data based on the model.
Based on multinomial logit.
Parameters
----------
model_expression : str, iterable, or dict
A patsy model expression. Should contain only a right-hand side.
sample_size : int
Number of choices to sample for estimating the model.
probability_mode : str, optional
Specify the method to use for calculating probabilities
during prediction.
Available string options are 'single_chooser' and 'full_product'.
In "single chooser" mode one agent is chosen for calculating
probabilities across all alternatives. In "full product" mode
probabilities are calculated for every chooser across all alternatives.
Currently "single chooser" mode must be used with a `choice_mode`
of 'aggregate' and "full product" mode must be used with a
`choice_mode` of 'individual'.
choice_mode : str, optional
Specify the method to use for making choices among alternatives.
Available string options are 'individual' and 'aggregate'.
In "individual" mode choices will be made separately for each chooser.
In "aggregate" mode choices are made for all choosers at once.
Aggregate mode implies that an alternative chosen by one agent
is unavailable to other agents and that the same probabilities
can be used for all choosers.
Currently "individual" mode must be used with a `probability_mode`
of 'full_product' and "aggregate" mode must be used with a
`probability_mode` of 'single_chooser'.
choosers_fit_filters : list of str, optional
Filters applied to choosers table before fitting the model.
choosers_predict_filters : list of str, optional
Filters applied to the choosers table before calculating
new data points.
alts_fit_filters : list of str, optional
Filters applied to the alternatives table before fitting the model.
alts_predict_filters : list of str, optional
Filters applied to the alternatives table before calculating
new data points.
interaction_predict_filters : list of str, optional
Filters applied to the merged choosers/alternatives table
before predicting agent choices.
estimation_sample_size : int, optional
Whether to sample choosers during estimation
(needs to be applied after choosers_fit_filters).
prediction_sample_size : int, optional
Whether (and how much) to sample alternatives during prediction.
Note that this can lead to multiple choosers picking the same
alternative.
choice_column : optional
Name of the column in the `alternatives` table that choosers
should choose. e.g. the 'building_id' column. If not provided
the alternatives index is used.
name : optional
Optional descriptive name for this model that may be used
in output.
"""
def __init__(
self, model_expression, sample_size,
probability_mode='full_product', choice_mode='individual',
choosers_fit_filters=None, choosers_predict_filters=None,
alts_fit_filters=None, alts_predict_filters=None,
interaction_predict_filters=None,
estimation_sample_size=None,
prediction_sample_size=None,
choice_column=None, name=None):
self._check_prob_choice_mode_compat(probability_mode, choice_mode)
self._check_prob_mode_interaction_compat(
probability_mode, interaction_predict_filters)
self.model_expression = model_expression
self.sample_size = sample_size
self.probability_mode = probability_mode
self.choice_mode = choice_mode
self.choosers_fit_filters = choosers_fit_filters
self.choosers_predict_filters = choosers_predict_filters
self.alts_fit_filters = alts_fit_filters
self.alts_predict_filters = alts_predict_filters
self.interaction_predict_filters = interaction_predict_filters
self.estimation_sample_size = estimation_sample_size
self.prediction_sample_size = prediction_sample_size
self.choice_column = choice_column
self.name = name if name is not None else 'MNLDiscreteChoiceModel'
self.sim_pdf = None
self.log_likelihoods = None
self.fit_parameters = None
@classmethod
def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a DiscreteChoiceModel instance from a saved YAML configuration.
Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
MNLDiscreteChoiceModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
model = cls(
cfg['model_expression'],
cfg['sample_size'],
probability_mode=cfg.get('probability_mode', 'full_product'),
choice_mode=cfg.get('choice_mode', 'individual'),
choosers_fit_filters=cfg.get('choosers_fit_filters', None),
choosers_predict_filters=cfg.get('choosers_predict_filters', None),
alts_fit_filters=cfg.get('alts_fit_filters', None),
alts_predict_filters=cfg.get('alts_predict_filters', None),
interaction_predict_filters=cfg.get(
'interaction_predict_filters', None),
estimation_sample_size=cfg.get('estimation_sample_size', None),
prediction_sample_size=cfg.get('prediction_sample_size', None),
choice_column=cfg.get('choice_column', None),
name=cfg.get('name', None)
)
if cfg.get('log_likelihoods', None):
model.log_likelihoods = cfg['log_likelihoods']
if cfg.get('fit_parameters', None):
model.fit_parameters = pd.DataFrame(cfg['fit_parameters'])
logger.debug('loaded LCM model {} from YAML'.format(model.name))
return model
@property
def str_model_expression(self):
"""
Model expression as a string suitable for use with patsy/statsmodels.
"""
return util.str_model_expression(
self.model_expression, add_constant=False)
def apply_fit_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for fitting.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
return super(MNLDiscreteChoiceModel, self).apply_fit_filters(
choosers, alternatives)
def apply_predict_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for prediction.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
return super(MNLDiscreteChoiceModel, self).apply_predict_filters(
choosers, alternatives)
def fit(self, choosers, alternatives, current_choice):
"""
Fit and save model parameters based on given data.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice : pandas.Series or any
A Series describing the `alternatives` currently chosen
by the `choosers`. Should have an index matching `choosers`
and values matching the index of `alternatives`.
If a non-Series is given it should be a column in `choosers`.
Returns
-------
log_likelihoods : dict
Dict of log-liklihood values describing the quality of the
model fit. Will have keys 'null', 'convergence', and 'ratio'.
"""
logger.debug('start: fit LCM model {}'.format(self.name))
if not isinstance(current_choice, pd.Series):
current_choice = choosers[current_choice]
choosers, alternatives = self.apply_fit_filters(choosers, alternatives)
if self.estimation_sample_size:
if fixedrandomseed==0: np.random.seed(seednum)
choosers = choosers.loc[np.random.choice(
choosers.index,
min(self.estimation_sample_size, len(choosers)),
replace=False)]
current_choice = current_choice.loc[choosers.index]
_, merged, chosen = interaction.mnl_interaction_dataset(
choosers, alternatives, self.sample_size, current_choice)
model_design = dmatrix(
self.str_model_expression, data=merged, return_type='dataframe')
if len(merged) != model_design.values.shape[0]:
raise ModelEvaluationError(
'Estimated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
self.log_likelihoods, self.fit_parameters = mnl.mnl_estimate(
model_design.values, chosen, self.sample_size)
self.fit_parameters.index = model_design.columns
logger.debug('finish: fit LCM model {}'.format(self.name))
return self.log_likelihoods
@property
def fitted(self):
"""
True if model is ready for prediction.
"""
return self.fit_parameters is not None
def assert_fitted(self):
"""
Raises `RuntimeError` if the model is not ready for prediction.
"""
if not self.fitted:
raise RuntimeError('Model has not been fit.')
def report_fit(self):
"""
Print a report of the fit results.
"""
if not self.fitted:
print('Model not yet fit.')
return
print('Null Log-liklihood: {0:.3f}'.format(
self.log_likelihoods['null']))
print('Log-liklihood at convergence: {0:.3f}'.format(
self.log_likelihoods['convergence']))
print('Log-liklihood Ratio: {0:.3f}\n'.format(
self.log_likelihoods['ratio']))
tbl = PrettyTable(
['Component', ])
tbl = PrettyTable()
tbl.add_column('Component', self.fit_parameters.index.values)
for col in ('Coefficient', 'Std. Error', 'T-Score'):
tbl.add_column(col, self.fit_parameters[col].values)
tbl.align['Component'] = 'l'
tbl.float_format = '.3'
print(tbl)
def probabilities(self, choosers, alternatives, filter_tables=True):
"""
Returns the probabilities for a set of choosers to choose
from among a set of alternatives.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
filter_tables : bool, optional
If True, filter `choosers` and `alternatives` with prediction
filters before calculating probabilities.
Returns
-------
probabilities : pandas.Series
Probability of selection associated with each chooser
and alternative. Index will be a MultiIndex with alternative
IDs in the inner index and chooser IDs in the out index.
"""
logger.debug('start: calculate probabilities for LCM model {}'.format(
self.name))
self.assert_fitted()
if filter_tables:
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if self.prediction_sample_size is not None:
sample_size = self.prediction_sample_size
else:
sample_size = len(alternatives)
if self.probability_mode == 'single_chooser':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers.head(1), alternatives, sample_size)
elif self.probability_mode == 'full_product':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers, alternatives, sample_size)
else:
raise ValueError(
'Unrecognized probability_mode option: {}'.format(
self.probability_mode))
merged = util.apply_filter_query(
merged, self.interaction_predict_filters)
model_design = dmatrix(
self.str_model_expression, data=merged, return_type='dataframe')
if len(merged) != model_design.values.shape[0]:
raise ModelEvaluationError(
'Simulated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
# get the order of the coefficients in the same order as the
# columns in the design matrix
coeffs = [self.fit_parameters['Coefficient'][x]
for x in model_design.columns]
# probabilities are returned from mnl_simulate as a 2d array
# with choosers along rows and alternatives along columns
if self.probability_mode == 'single_chooser':
numalts = len(merged)
else:
numalts = sample_size
probabilities = mnl.mnl_simulate(
model_design.values,
coeffs,
numalts=numalts, returnprobs=True)
# want to turn probabilities into a Series with a MultiIndex
# of chooser IDs and alternative IDs.
# indexing by chooser ID will get you the probabilities
# across alternatives for that chooser
mi = pd.MultiIndex.from_arrays(
[merged['join_index'].values, merged.index.values],
names=('chooser_id', 'alternative_id'))
probabilities = pd.Series(probabilities.flatten(), index=mi)
logger.debug('finish: calculate probabilities for LCM model {}'.format(
self.name))
return probabilities
def summed_probabilities(self, choosers, alternatives):
"""
Calculate total probability associated with each alternative.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Total probability associated with each alternative.
"""
def normalize(s):
return s / s.sum()
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
probs = self.probabilities(choosers, alternatives, filter_tables=False)
# groupby the the alternatives ID and sum
if self.probability_mode == 'single_chooser':
return (
normalize(probs) * len(choosers)
).reset_index(level=0, drop=True)
elif self.probability_mode == 'full_product':
return probs.groupby(level=0).apply(normalize)\
.groupby(level=1).sum()
else:
raise ValueError(
'Unrecognized probability_mode option: {}'.format(
self.probability_mode))
def predict(self, choosers, alternatives, debug=False):
"""
Choose from among alternatives for a group of agents.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
debug : bool
If debug is set to true, will set the variable "sim_pdf" on
the object to store the probabilities for mapping of the
outcome.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
"""
self.assert_fitted()
logger.debug('start: predict LCM model {}'.format(self.name))
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if len(choosers) == 0:
return pd.Series()
if len(alternatives) == 0:
return pd.Series(index=choosers.index)
probabilities = self.probabilities(
choosers, alternatives, filter_tables=False)
if debug:
self.sim_pdf = probabilities
if self.choice_mode == 'aggregate':
choices = unit_choice(
choosers.index.values,
probabilities.index.get_level_values('alternative_id').values,
probabilities.values)
elif self.choice_mode == 'individual':
def mkchoice(probs):
probs.reset_index(0, drop=True, inplace=True)
if fixedrandomseed==0: np.random.seed(seednum)
return np.random.choice(
probs.index.values, p=probs.values / probs.sum())
choices = probabilities.groupby(level='chooser_id', sort=False)\
.apply(mkchoice)
else:
raise ValueError(
'Unrecognized choice_mode option: {}'.format(self.choice_mode))
logger.debug('finish: predict LCM model {}'.format(self.name))
return choices
def to_dict(self):
"""
Return a dict respresentation of an MNLDiscreteChoiceModel
instance.
"""
return {
'model_type': 'discretechoice',
'model_expression': self.model_expression,
'sample_size': self.sample_size,
'name': self.name,
'probability_mode': self.probability_mode,
'choice_mode': self.choice_mode,
'choosers_fit_filters': self.choosers_fit_filters,
'choosers_predict_filters': self.choosers_predict_filters,
'alts_fit_filters': self.alts_fit_filters,
'alts_predict_filters': self.alts_predict_filters,
'interaction_predict_filters': self.interaction_predict_filters,
'estimation_sample_size': self.estimation_sample_size,
'prediction_sample_size': self.prediction_sample_size,
'choice_column': self.choice_column,
'fitted': self.fitted,
'log_likelihoods': self.log_likelihoods,
'fit_parameters': (yamlio.frame_to_yaml_safe(self.fit_parameters)
if self.fitted else None)
}
def to_yaml(self, str_or_buffer=None):
"""
Save a model respresentation to YAML.
Parameters
----------
str_or_buffer : str or file like, optional
By default a YAML string is returned. If a string is
given here the YAML will be written to that file.
If an object with a ``.write`` method is given the
YAML will be written to that object.
Returns
-------
j : str
YAML is string if `str_or_buffer` is not given.
"""
logger.debug('serializing LCM model {} to YAML'.format(self.name))
if (not isinstance(self.probability_mode, str) or
not isinstance(self.choice_mode, str)):
raise TypeError(
'Cannot serialize model with non-string probability_mode '
'or choice_mode attributes.')
return yamlio.convert_to_yaml(self.to_dict(), str_or_buffer)
def choosers_columns_used(self):
"""
Columns from the choosers table that are used for filtering.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.choosers_predict_filters),
util.columns_in_filters(self.choosers_fit_filters))))
def alts_columns_used(self):
"""
Columns from the alternatives table that are used for filtering.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.alts_predict_filters),
util.columns_in_filters(self.alts_fit_filters))))
def interaction_columns_used(self):
"""
Columns from the interaction dataset used for filtering and in
the model. These may come originally from either the choosers or
alternatives tables.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.interaction_predict_filters),
util.columns_in_formula(self.model_expression))))
def columns_used(self):
"""
Columns from any table used in the model. May come from either
the choosers or alternatives tables.
"""
return list(tz.unique(tz.concatv(
self.choosers_columns_used(),
self.alts_columns_used(),
self.interaction_columns_used())))
@classmethod
def fit_from_cfg(cls, choosers, chosen_fname, alternatives, cfgname, outcfgname=None):
"""
Parameters
----------
choosers : DataFrame
A dataframe in which rows represent choosers.
chosen_fname : string
A string indicating the column in the choosers dataframe which
gives which alternatives the choosers have chosen.
alternatives : DataFrame
A table of alternatives. It should include the choices
from the choosers table as well as other alternatives from
which to sample. Values in choosers[chosen_fname] should index
into the alternatives dataframe.
cfgname : string
The name of the yaml config file from which to read the discrete
choice model.
outcfgname : string, optional (default cfgname)
The name of the output yaml config file where estimation results are written into.
Returns
-------
lcm : MNLDiscreteChoiceModel which was used to fit
"""
logger.debug('start: fit from configuration {}'.format(cfgname))
lcm = cls.from_yaml(str_or_buffer=cfgname)
lcm.fit(choosers, alternatives, choosers[chosen_fname])
lcm.report_fit()
outcfgname = outcfgname or cfgname
lcm.to_yaml(str_or_buffer=outcfgname)
logger.debug('finish: fit into configuration {}'.format(outcfgname))
return lcm
@classmethod
def predict_from_cfg(cls, choosers, alternatives, cfgname=None, cfg=None,
alternative_ratio=2.0, debug=False):
"""
Simulate choices for the specified choosers
Parameters
----------
choosers : DataFrame
A dataframe of agents doing the choosing.
alternatives : DataFrame
A dataframe of locations which the choosers are locating in and
which have a supply.
cfgname : string
The name of the yaml config file from which to read the discrete
choice model.
cfg: string
an ordered yaml string of the model discrete choice model configuration.
Used to read config from memory in lieu of loading cfgname from disk.
alternative_ratio : float, optional
Above the ratio of alternatives to choosers (default of 2.0),
the alternatives will be sampled to meet this ratio
(for performance reasons).
debug : boolean, optional (default False)
Whether to generate debug information on the model.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
lcm : MNLDiscreteChoiceModel which was used to predict
"""
logger.debug('start: predict from configuration {}'.format(cfgname))
if cfgname:
lcm = cls.from_yaml(str_or_buffer=cfgname)
elif cfg:
lcm = cls.from_yaml(yaml_str=cfg)
else:
msg = 'predict_from_cfg requires a configuration via the cfgname or cfg arguments'
logger.error(msg)
raise ValueError(msg)
if len(alternatives) > len(choosers) * alternative_ratio:
logger.info(
("Alternative ratio exceeded: %d alternatives "
"and only %d choosers") %
(len(alternatives), len(choosers)))
if fixedrandomseed==0: np.random.seed(seednum)
idxes = np.random.choice(
alternatives.index, size=int(len(choosers) *
alternative_ratio),
replace=False)
alternatives = alternatives.loc[idxes]
logger.info(
" after sampling %d alternatives are available\n" %
len(alternatives))
new_units = lcm.predict(choosers, alternatives, debug=debug)
print("Assigned %d choosers to new units" % len(new_units.dropna()))
logger.debug('finish: predict from configuration {}'.format(cfgname))
return new_units, lcm
class MNLDiscreteChoiceModelGroup(DiscreteChoiceModel):
"""
Manages a group of discrete choice models that refer to different
segments of choosers.
Model names must match the segment names after doing a pandas groupby.
Parameters
----------
segmentation_col : str
Name of a column in the table of choosers. Will be used to perform
a pandas groupby on the choosers table.
remove_alts : bool, optional
Specify how to handle alternatives between prediction for different
models. If False, the alternatives table is not modified between
predictions. If True, alternatives that have been chosen
are removed from the alternatives table before doing another
round of prediction.
name : str, optional
A name that may be used in places to identify this group.
"""
def __init__(self, segmentation_col, remove_alts=False, name=None):
self.segmentation_col = segmentation_col
self.remove_alts = remove_alts
self.name = name if name is not None else 'MNLDiscreteChoiceModelGroup'
self.models = {}
def add_model(self, model):
"""
Add an MNLDiscreteChoiceModel instance.
Parameters
----------
model : MNLDiscreteChoiceModel
Should have a ``.name`` attribute matching one of the segments
in the choosers table.
"""
logger.debug(
'adding model {} to LCM group {}'.format(model.name, self.name))
self.models[model.name] = model
def add_model_from_params(
self, name, model_expression, sample_size,
probability_mode='full_product', choice_mode='individual',
choosers_fit_filters=None, choosers_predict_filters=None,
alts_fit_filters=None, alts_predict_filters=None,
interaction_predict_filters=None, estimation_sample_size=None,
prediction_sample_size=None, choice_column=None):
"""
Add a model by passing parameters through to MNLDiscreteChoiceModel.
Parameters
----------
name
Must match a segment in the choosers table.
model_expression : str, iterable, or dict
A patsy model expression. Should contain only a right-hand side.
sample_size : int
Number of choices to sample for estimating the model.
probability_mode : str, optional
Specify the method to use for calculating probabilities
during prediction.
Available string options are 'single_chooser' and 'full_product'.
In "single chooser" mode one agent is chosen for calculating
probabilities across all alternatives. In "full product" mode
probabilities are calculated for every chooser across all
alternatives.
choice_mode : str or callable, optional
Specify the method to use for making choices among alternatives.
Available string options are 'individual' and 'aggregate'.
In "individual" mode choices will be made separately for each
chooser. In "aggregate" mode choices are made for all choosers at
once. Aggregate mode implies that an alternative chosen by one
agent is unavailable to other agents and that the same
probabilities can be used for all choosers.
choosers_fit_filters : list of str, optional
Filters applied to choosers table before fitting the model.
choosers_predict_filters : list of str, optional
Filters applied to the choosers table before calculating
new data points.
alts_fit_filters : list of str, optional
Filters applied to the alternatives table before fitting the model.
alts_predict_filters : list of str, optional
Filters applied to the alternatives table before calculating
new data points.
interaction_predict_filters : list of str, optional
Filters applied to the merged choosers/alternatives table
before predicting agent choices.
estimation_sample_size : int, optional
Whether to sample choosers during estimation
(needs to be applied after choosers_fit_filters)
prediction_sample_size : int, optional
Whether (and how much) to sample alternatives during prediction.
Note that this can lead to multiple choosers picking the same
alternative.
choice_column : optional
Name of the column in the `alternatives` table that choosers
should choose. e.g. the 'building_id' column. If not provided
the alternatives index is used.
"""
logger.debug('adding model {} to LCM group {}'.format(name, self.name))
self.models[name] = MNLDiscreteChoiceModel(
model_expression, sample_size,
probability_mode, choice_mode,
choosers_fit_filters, choosers_predict_filters,
alts_fit_filters, alts_predict_filters,
interaction_predict_filters, estimation_sample_size,
prediction_sample_size, choice_column, name)
def _iter_groups(self, data):
"""
Iterate over the groups in `data` after grouping by
`segmentation_col`. Skips any groups for which there
is no model stored.
Yields tuples of (name, df) where name is the group key
and df is the group DataFrame.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
"""
groups = data.groupby(self.segmentation_col)
for name, group in groups:
if name not in self.models:
continue
logger.debug(
'returning group {} in LCM group {}'.format(name, self.name))
yield name, group
def apply_fit_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for fitting.
This is done by filtering each submodel and concatenating
the results.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
ch = []
alts = []
for name, df in self._iter_groups(choosers):
filtered_choosers, filtered_alts = \
self.models[name].apply_fit_filters(df, alternatives)
ch.append(filtered_choosers)
alts.append(filtered_alts)
return pd.concat(ch), pd.concat(alts)
def apply_predict_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for prediction.
This is done by filtering each submodel and concatenating
the results.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
ch = []
alts = []
for name, df in self._iter_groups(choosers):
filtered_choosers, filtered_alts = \
self.models[name].apply_predict_filters(df, alternatives)
ch.append(filtered_choosers)
alts.append(filtered_alts)
filtered_choosers = pd.concat(ch)
filtered_alts = pd.concat(alts)
return filtered_choosers, filtered_alts.drop_duplicates()
def fit(self, choosers, alternatives, current_choice):
"""
Fit and save models based on given data after segmenting
the `choosers` table.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column with the same name as the .segmentation_col
attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice
Name of column in `choosers` that indicates which alternative
they have currently chosen.
Returns
-------
log_likelihoods : dict of dict
Keys will be model names and values will be dictionaries of
log-liklihood values as returned by MNLDiscreteChoiceModel.fit.
"""
with log_start_finish(
'fit models in LCM group {}'.format(self.name), logger):
return {
name: self.models[name].fit(df, alternatives, current_choice)
for name, df in self._iter_groups(choosers)}
@property
def fitted(self):
"""
Whether all models in the group have been fitted.
"""
return (all(m.fitted for m in self.models.values())
if self.models else False)
def probabilities(self, choosers, alternatives):
"""
Returns alternative probabilties for each chooser segment as
a dictionary keyed by segment name.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column matching the .segmentation_col attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probabilties : dict of pandas.Series
"""
logger.debug(
'start: calculate probabilities in LCM group {}'.format(self.name))
probs = {}
for name, df in self._iter_groups(choosers):
probs[name] = self.models[name].probabilities(df, alternatives)
logger.debug(
'finish: calculate probabilities in LCM group {}'.format(
self.name))
return probs
def summed_probabilities(self, choosers, alternatives):
"""
Returns the sum of probabilities for alternatives across all
chooser segments.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column matching the .segmentation_col attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Summed probabilities from each segment added together.
"""
if len(alternatives) == 0 or len(choosers) == 0:
return pd.Series()
logger.debug(
'start: calculate summed probabilities in LCM group {}'.format(
self.name))
probs = []
for name, df in self._iter_groups(choosers):
probs.append(
self.models[name].summed_probabilities(df, alternatives))
add = tz.curry(pd.Series.add, fill_value=0)
probs = tz.reduce(add, probs)
logger.debug(
'finish: calculate summed probabilities in LCM group {}'.format(
self.name))
return probs
def predict(self, choosers, alternatives, debug=False):
"""
Choose from among alternatives for a group of agents after
segmenting the `choosers` table.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column matching the .segmentation_col attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
debug : bool
If debug is set to true, will set the variable "sim_pdf" on
the object to store the probabilities for mapping of the
outcome.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
"""
logger.debug('start: predict models in LCM group {}'.format(self.name))
results = []
for name, df in self._iter_groups(choosers):
choices = self.models[name].predict(df, alternatives, debug=debug)
if self.remove_alts and len(alternatives) > 0:
alternatives = alternatives.loc[
~alternatives.index.isin(choices)]
results.append(choices)
logger.debug(
'finish: predict models in LCM group {}'.format(self.name))
return pd.concat(results) if results else | pd.Series() | pandas.Series |
#%%
import pandas as pd
import numpy as np
import holoviews as hv
import hvplot.pandas
from scipy.sparse.linalg import svds
from scipy.stats import chisquare, chi2_contingency
from sklearn.decomposition import TruncatedSVD
from umoja.ca import CA
import logging
from pathlib import Path
import numpy as np
import hvplot.pandas
import holoviews as hv
import pandas as pd
import git
from sklearn.compose import ColumnTransformer, TransformedTargetRegressor
from sklearn.datasets import load_digits
from sklearn.multioutput import MultiOutputRegressor
from sklearn.model_selection import train_test_split, KFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer, PolynomialFeatures
import xgboost as xgb
from skopt import BayesSearchCV
from sklearn.metrics import f1_score, get_scorer
hv.extension('bokeh')
#%%
X = context.io.load('xente_train')
Y = context.io.load('xente_sample_submission')
# %%
# lets view a sadom customer
X_mode = X.where(lambda df: df.acc == df.acc.sample(1).item()).dropna()
category = pd.DataFrame({'pid': pd.np.argmax(pd.get_dummies(X_mode.PID).to_numpy(), axis=1),
'time': | pd.to_datetime(X_mode.date) | pandas.to_datetime |
"""Expression Atlas."""
import logging
import os
import sys
from collections import OrderedDict
from typing import List, Tuple, Optional
import pandas as pd
from pandas.core.frame import DataFrame
import xmltodict
from pyorient import OrientDB
from tqdm import tqdm
from ebel.constants import DATA_DIR
from ebel.manager.orientdb import odb_meta, urls
from ebel.manager.rdbms.models import expression_atlas
from ebel.tools import get_standard_name
logger = logging.getLogger(__name__)
class ExpressionAtlas(odb_meta.Graph):
"""ExpressionAtlas."""
def __init__(self, client: OrientDB = None):
"""Init ExpressionAtlas."""
self.client = client
self.biodb_name = 'expression_atlas'
self.urls = {'latest_data': urls.EXPRESSION_ATLAS_EXPERIMENTS}
self.data_dir = os.path.join(DATA_DIR, self.biodb_name)
super().__init__(tables_base=expression_atlas.Base,
urls=self.urls,
biodb_name=self.biodb_name)
def __len__(self):
return self.number_of_generics
def __contains__(self, item):
# TODO: To be implemented
return True
def update(self):
"""Update ExpressionAtlas."""
logger.info("Update ExpressionAtlas")
downloaded = self.download()
if downloaded['latest_data']:
self.extract_files()
self.insert()
def extract_files(self):
"""Extract relevant files."""
os.chdir(self.data_dir)
cmd_temp = "tar -xzf atlas-latest-data.tar.gz --wildcards --no-anchored '{}'"
patterns = ['*.sdrf.txt',
'*.condensed-sdrf.tsv',
'*analytics.tsv',
'*-configuration.xml',
'*.idf.txt',
'*.go.gsea.tsv',
'*.interpro.gsea.tsv',
'*.reactome.gsea.tsv'
]
with tqdm(patterns) as t_patterns:
for pattern in t_patterns:
t_patterns.set_description(f"Extract files with pattern {pattern}")
command = cmd_temp.format(pattern)
os.system(command)
def insert_data(self):
"""Class method."""
pass
def update_interactions(self) -> int:
"""Class method."""
pass
def insert_experiment(self, experiment_name: str, title: str) -> int:
"""Insert individual experiment into SQL database.
Parameters
----------
experiment_name : str
Name of the Expression Atlas experiment.
title : str
Title of experiment.
Returns
-------
Table ID of newly inserted experiment.
"""
experiment = expression_atlas.Experiment(name=experiment_name, title=title)
self.session.add(experiment)
self.session.flush()
self.session.commit()
return experiment.id
def insert(self):
"""Override insert method for SQL data insertion."""
self.recreate_tables()
data_folder = os.scandir(self.data_dir)
for experiment_name in tqdm([(f.name) for f in data_folder if f.is_dir()]):
try:
df_configuration = self.get_configuration(experiment_name)
if isinstance(df_configuration, pd.DataFrame):
df_idf = self.get_idf(experiment_name)
title = df_idf[df_idf.key_name == 'investigation_title'].value.values[0]
experiment_id = self.insert_experiment(experiment_name, title)
groups_strs: Tuple[str, ...] = self.__insert_configuration(df_configuration, experiment_id)
self.__insert_idf(df_idf, experiment_id)
self.__insert_sdrf_condensed(experiment_id, experiment_name)
self.__insert_foldchange(experiment_id, experiment_name, groups_strs)
self.insert_gseas(experiment_id, experiment_name, groups_strs)
except Exception as e:
print(experiment_name)
print(e)
sys.exit()
def __insert_foldchange(self, experiment_id: int, experiment_name: str, groups_strs: Tuple[str, ...]):
df_log2foldchange = self.get_log2foldchange(experiment_name, groups_strs).set_index('group_comparison')
df_group_comparison = self.get_df_group_comparison(experiment_id, groups_strs).set_index('group_comparison')
df_log2foldchange.join(df_group_comparison).to_sql(expression_atlas.FoldChange.__tablename__,
self.engine, if_exists='append', index=False)
def get_df_group_comparison(self, experiment_id: int, groups_strs: Tuple[str, ...]) -> pd.DataFrame:
"""Get group comparison IDs and group comparison columns for pairs of group strings.
Parameters
----------
experiment_id : int
Experiment numerical ID.
groups_strs : tuple
Pairs of gene symbols.
Returns
-------
Pandas DataFrame of 'group_comparison_id' and 'group_comparison'.
"""
data = []
for groups_str in groups_strs:
group_comparison_id = self.session.query(expression_atlas.GroupComparison.id).filter_by(
experiment_id=experiment_id,
group_comparison=groups_str).first().id
data.append((group_comparison_id, groups_str))
return pd.DataFrame(data, columns=['group_comparison_id', 'group_comparison'])
def __insert_configuration(self, df_configuration, experiment_id: int) -> Tuple[str, ...]:
df_configuration['experiment_id'] = experiment_id
df_configuration.to_sql(expression_atlas.GroupComparison.__tablename__, self.engine, if_exists='append',
index=False)
groups_strs = tuple(df_configuration.group_comparison.values)
self.session.flush()
self.session.commit()
return groups_strs
def __insert_idf(self, df_idf: DataFrame, experiment_id: int):
df_idf['experiment_id'] = experiment_id
df_idf.to_sql(expression_atlas.Idf.__tablename__, self.engine, if_exists='append', index=False)
def __insert_sdrf_condensed(self, experiment_id: int, experiment_name: str):
df = self.get_sdrf_condensed(experiment_name)
# organisms = tuple(df[df.parameter == 'organism'].value.unique())
df['experiment_id'] = experiment_id
df.drop(columns=['experiment'], inplace=True)
df.to_sql(expression_atlas.SdrfCondensed.__tablename__, self.engine, if_exists='append', index=False)
def insert_gseas(self, experiment_id: int, experiment_name: str, groups_strs: Tuple[str, ...]):
"""Insert Gene set enrichment analysis.
For more information about parameters see https://www.bioconductor.org/packages/release/bioc/html/piano.html
Args:
experiment_id (int): [description]
experiment_name (str): [description]
groups_strs (Tuple[str, ...]): [description]
"""
df = self.get_gseas(experiment_name, experiment_id, groups_strs)
if isinstance(df, pd.DataFrame):
df.to_sql(expression_atlas.Gsea.__tablename__, self.engine, if_exists='append', index=False)
def get_gseas(self, experiment_name: str, experiment_id: int, groups_strs: Tuple[str]) -> Optional[pd.DataFrame]:
"""Get GSEA data.
Parameters
----------
experiment_name : str
Name of the Expression Atlas experiment.
experiment_id : int
Experiment numerical ID.
groups_strs : tuple
Pairs of gene symbols.
Returns
-------
If GSEA informaiton is found, returns a pandas DataFrame detailing the GSEAs associated with an experiment.
"""
dfs = []
for groups_str in groups_strs:
for gsea_type in ['go', 'reactome', 'interpro']:
df = self.get_gsea(experiment_name, groups_str, gsea_type)
if isinstance(df, pd.DataFrame):
df['gsea_type'] = gsea_type
df['group_comparison_id'] = self.__get_group_comparison_id(groups_str, experiment_id)
dfs.append(df[df.p_adj_non_dir <= 0.05])
if dfs:
return pd.concat(dfs)
def __get_group_comparison_id(self, groups_str: str, experiment_id: int):
query = self.session.query(expression_atlas.GroupComparison.id)
return query.filter_by(group_comparison=groups_str, experiment_id=experiment_id).first().id
def get_gsea(self, experiment_name: str, groups_str: str, gsea_type: str) -> Optional[pd.DataFrame]:
"""Generate a table of GSEA information for a pair of symbols for a given experiment.
Parameters
----------
experiment_name : str
Name of the Expression Atlas experiment.
groups_str : tuple
Pairs of gene symbol strings.
gsea_type : str
Type of GSEA.
Returns
-------
Returns a pandas DataFrame of GSEA information if file exists matching the passed parameters.
"""
file_path = os.path.join(self.data_dir,
experiment_name,
f"{experiment_name}.{groups_str}.{gsea_type}.gsea.tsv")
if not os.path.exists(file_path):
return
df = pd.read_csv(file_path, sep="\t")
df.columns = [get_standard_name(x) for x in df.columns]
if 'term' in df.columns:
return df
def get_log2foldchange(self, experiment_name: str, groups_strs: Tuple[str]) -> pd.DataFrame:
"""Generate a table of log2 fold changes between pairs of gene symbols.
Parameters
----------
experiment_name : str
Name of the Expression Atlas experiment.
groups_strs : tuple
Pairs of gene symbol strings.
Returns
-------
pandas DataFrame.
"""
dfs = []
df_analyticss = self.get_analyticss(experiment_name)
for df_analytics in df_analyticss:
for g in groups_strs:
col_p_value = f'{g}_p_value'
col_log2foldchange = f'{g}_log2foldchange'
if {col_p_value, col_log2foldchange}.issubset(df_analytics.columns):
cols = ['gene_id', 'gene_name', col_p_value, col_log2foldchange]
rename_map = {col_p_value: 'p_value', col_log2foldchange: 'log2foldchange'}
if f'{g}_t_statistic' in df_analytics.columns:
cols.append(f'{g}_t_statistic')
rename_map[f'{g}_t_statistic'] = 't_statistic'
df = df_analytics[cols].rename(columns=rename_map)
df['group_comparison'] = g
dfs.append(df)
df_concat = pd.concat(dfs)
return df_concat[(df_concat.p_value <= 0.05)
& df_concat.gene_name.notnull()
& ((df_concat.log2foldchange <= -1) | (df_concat.log2foldchange >= 1))]
def get_idf(self, experiment_name: str) -> Optional[pd.DataFrame]:
"""Get Data from IDF by experiment name.
Parameters
----------
experiment_name : str
Name of the Expression Atlas experiment.
Returns
-------
DataFrame with IDF values.
"""
file_path = os.path.join(self.data_dir, experiment_name, f"{experiment_name}.idf.txt")
if not os.path.exists(file_path):
return
rows = []
for line in open(file_path):
line_splitted = line.strip().split('\t')
if len(line_splitted) > 1:
key_name = get_standard_name(line_splitted[0])
values = [x.strip() for x in line_splitted[1:] if x.strip()]
rows.append((key_name, values))
df = pd.DataFrame(rows, columns=('key_name', 'value')).explode('value')
return df
def get_sdrf_condensed(self, experiment_name: str) -> Optional[pd.DataFrame]:
"""Generate a condensed version of an experiment's SDRF.
Parameters
----------
experiment_name : str
Name of the Expression Atlas experiment.
Returns
-------
pandas DataFrame.
"""
file_path = os.path.join(self.data_dir, experiment_name, f"{experiment_name}.condensed-sdrf.tsv")
if not os.path.exists(file_path):
return
names = ['experiment', 'method', 'sample', 'parameter_type', 'parameter', 'value', 'url']
df = | pd.read_csv(file_path, sep="\t", header=None, names=names) | pandas.read_csv |
import os
from pathlib import Path
from random import shuffle
from itertools import product
import dotenv
import tensorflow as tf
import h5py
import pandas as pd
from src.models.fetch_data_from_hdf5 import get_tf_data
from src.models.models_2d import unet_model, CustomModel, custom_loss
from src.models.losses_2d import dice_coe_1_hard
from src.models.model_evaluation import evaluate_model
project_dir = Path(__file__).resolve().parents[2]
dotenv_path = project_dir / ".env"
dotenv.load_dotenv(str(dotenv_path))
path_data_nii = Path(os.environ["NII_PATH"])
path_mask_lung_nii = Path(os.environ["NII_LUNG_PATH"])
path_clinical_info = Path(os.environ["CLINIC_INFO_PATH"])
bs = 4
n_epochs = 50
n_prefetch = 20
image_size = (256, 256)
# alphas = [0.25, 0.5, 0.75, 1.0]
# ws_gtvl = [2, 4]
# ws_gtvt = [1, 2]
# ws_lung = [1]
alphas = [0.1, 0.25, 0.5, 0.75, 0.9]
ws_gtvl = [1]
ss_gtvl = [1]
ws_gtvt = [0]
ws_lung = [0]
reps = [0, 1, 2, 3]
def get_trainval_patient_list(df, patient_list):
id_list = [int(p.split('_')[1]) for p in patient_list]
df = df.loc[id_list, :]
id_patient_plc_neg_training = list(df[(df["is_chuv"] == 1)
& (df["plc_status"] == 0)].index)
id_patient_plc_pos_training = list(df[(df["is_chuv"] == 1)
& (df["plc_status"] == 1)].index)
shuffle(id_patient_plc_neg_training)
shuffle(id_patient_plc_pos_training)
id_patient_plc_neg_val = id_patient_plc_neg_training[:2]
id_patient_plc_pos_val = id_patient_plc_pos_training[:4]
id_val = id_patient_plc_neg_val + id_patient_plc_pos_val
id_patient_plc_neg_train = id_patient_plc_neg_training[2:]
id_patient_plc_pos_train = id_patient_plc_pos_training[4:]
id_train = id_patient_plc_neg_train + id_patient_plc_pos_train
patient_list_val = [f"PatientLC_{i}" for i in id_val]
patient_list_train = [f"PatientLC_{i}" for i in id_train]
return patient_list_train, patient_list_val
def main():
file_train = h5py.File(
"/home/val/python_wkspce/plc_seg/data/processed/2d_pet_normalized/train.hdf5",
"r")
# file_test = h5py.File(
# "/home/val/python_wkspce/plc_seg/data/processed/2d_pet_normalized/test.hdf5",
# "r")
clinical_df = pd.read_csv(path_clinical_info).set_index("patient_id")
patient_list = list(file_train.keys())
patient_list = [p for p in patient_list if p not in ["PatientLC_63"]]
patient_list_train, patient_list_val = get_trainval_patient_list(
clinical_df, patient_list)
data_val = get_tf_data(
file_train,
clinical_df,
output_shape_image=(256, 256),
random_slice=False,
centered_on_gtvt=True,
patient_list=patient_list_val,
).cache().batch(2)
data_train = get_tf_data(file_train,
clinical_df,
output_shape_image=(256, 256),
random_slice=True,
random_shift=20,
n_repeat=10,
num_parallel_calls='auto',
oversample_plc_neg=True,
patient_list=patient_list_train).batch(bs)
data_train_eval = get_tf_data(
file_train,
clinical_df,
output_shape_image=(256, 256),
random_slice=False,
oversample_plc_neg=False,
centered_on_gtvt=True,
patient_list=patient_list_train).cache().batch(bs)
results_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import miditoolkit
import os
def getStats(folder_name,num_notes_dict={},channel=0):
if num_notes_dict=={}:
num_notes_dict=numNotes(folder_name,channel)
df=pd.DataFrame.from_dict(num_notes_dict, orient='index',columns=["Notes"])
df.index.names = ['Index']
print(f"Files with >=100 notes={len(df[df['Notes']>=100])}")
num_notes=list(num_notes_dict.values())
mean=int(np.mean(num_notes))
std=int(np.std(num_notes))
maximum=np.max(num_notes)
minimum=np.min(num_notes)
perc_10=int(np.percentile(num_notes,10))
perc_25=int(np.percentile(num_notes,25))
perc_50=int(np.percentile(num_notes,50))
perc_75=int(np.percentile(num_notes,75))
df= | pd.DataFrame(columns=["Metric","Value"]) | pandas.DataFrame |
"""Requires installation of requirements-extras.txt"""
import pandas as pd
import os
import seaborn as sns
from absl import logging
from ._nlp_constants import PROMPTS_PATHS, PERSPECTIVE_API_MODELS
from credoai.data.utils import get_data_path
from credoai.modules.credo_module import CredoModule
from credoai.utils.common import NotRunError, ValidationError, wrap_list
from functools import partial
from googleapiclient import discovery
from time import sleep
class NLPGeneratorAnalyzer(CredoModule):
"""
This module assesses language generation models based on various prompts and assessment attributes
Parameters
----------
prompts : str
choices are builtin datasets, which include:
'bold_gender', 'bold_political_ideology', 'bold_profession',
'bold_race', 'bold_religious_ideology' (Dhamala et al. 2021)
'realtoxicityprompts_1000', 'realtoxicityprompts_challenging_20',
'realtoxicityprompts_challenging_100', 'realtoxicityprompts_challenging' (Gehman et al. 2020)
'conversationai_age', 'conversationai_disability', 'conversationai_gender', 'conversationai_race',
'conversationai_religious_ideology', 'conversationai_sexual_orientation' (Dixon et al. 2018)
or path of your own prompts csv file with columns 'group', 'subgroup', 'prompt'
generation_functions : dict
keys are names of the models and values are their callable generation functions
assessment_functions : dict
keys are names of the assessment functions and values could be custom callable assessment functions
or name of builtin assessment functions.
Current choices, all using Perspective API include:
'perspective_toxicity', 'perspective_severe_toxicity',
'perspective_identify_attack', 'perspective_insult',
'perspective_profanity', 'perspective_threat'
perspective_config : dict
if Perspective API is to be used, this must be passed with the following:
'api_key': your Perspective API key
'rpm_limit': request per minute limit of your Perspective API account
"""
def __init__(
self,
prompts,
generation_functions,
assessment_functions,
perspective_config=None,
):
super().__init__()
self.prompts = prompts
self.generation_functions = generation_functions
self.assessment_functions = assessment_functions
self.perspective_config = perspective_config
self.perspective_client = None
def prepare_results(self):
"""Generates summary statistics of raw assessment results generated by self.run
Returns
-------
pandas.dataframe
Summary statistics of assessment results
Schema: ['generation_model' 'assessment_attribute', 'group', 'mean', 'std']
Raises
------
NotRunError
Occurs if self.run is not called yet to generate the raw assessment results
"""
if self.results is not None:
# Calculate statistics across groups and assessment attributes
results = (
self.results['assessment_results'][
["generation_model", "group", "assessment_attribute", "value"]
]
.groupby(
["generation_model", "group", "assessment_attribute"],
as_index=False,
)
.agg(mean=("value", "mean"), std=("value", "std"))
)
results.sort_values(
by=["generation_model", "assessment_attribute", "group"], inplace=True
)
results = results[
["generation_model", "assessment_attribute", "group", "mean", "std"]
]
return results
else:
raise NotRunError(
"Results not created yet. Call 'run' with appropriate arguments before preparing results"
)
def run(self, n_iterations=1):
"""Run the generations and assessments
Parameters
----------
n_iterations : int, optional
Number of times to generate responses for a prompt, by default 1
Increase if your generation model is stochastic for a higher confidence
Returns
-------
self
"""
df = self._get_prompts(self.prompts)
logging.info("Loaded the prompts dataset " + self.prompts)
# Perform prerun checks
self._perform_prerun_checks()
logging.info(
"Performed prerun checks of generation and assessment functions"
)
# Generate and record responses for the prompts with all the generation models n_iterations times
dfruns_lst = []
for gen_name, gen_fun in self.generation_functions.items():
gen_fun = partial(gen_fun, num_sequences=n_iterations)
logging.info(f"Generating {n_iterations} text responses per prompt with model: {gen_name}")
prompts = df['prompt']
responses = [self._gen_fun_robust(p, gen_fun) for p in prompts]
temp = pd.concat([df, | pd.DataFrame(responses) | pandas.DataFrame |
#!/usr/bin/env python3
"""
Process raw data to get related disease pairs from Disease Ontology
"""
__author__ = "<NAME>"
__version__ = "0.1.0"
__license__ = "MIT"
import argparse
import logging
from funcs import utils
import pandas as pd
import numpy as np
from tqdm.autonotebook import trange
import random
from os.path import join
def main(args):
logging.basicConfig(level=logging.INFO,
format='%(module)s:%(levelname)s:%(asctime)s:%(message)s',
handlers=[logging.FileHandler("../logs/report.log"),logging.StreamHandler()])
logging.info(args)
gda = pd.read_csv(args.in_gda_path, sep='\t')
doid_sims = pd.read_csv(args.in_sims_path, sep='\t')
all_doids = doid_sims.index.tolist()
diseases = | pd.read_csv(args.in_diseases_path, sep='\t', index_col='diseaseId') | pandas.read_csv |
from Calculatefunction import k,dl,seita
import csv
import pandas as pd
import numpy as np
sourcenamelist=csv.reader(open('/Users/dingding/Desktop/sample5.9.csv','r'))
GRBname=[column[0]for column in sourcenamelist]
Znamelist=csv.reader(open('/Users/dingding/Desktop/sample5.9.csv','r'))
z=[column[1]for column in Znamelist]
Epeaknamelist=csv.reader(open('/Users/dingding/Desktop/sample5.9.csv','r'))
ep=[column[2]for column in Epeaknamelist]
Enamelist=csv.reader(open('/Users/dingding/Desktop/sample5.9.csv','r'))
s=[column[3]for column in Enamelist]
i=0
seitalist=[]
for i in range(296):
seitalist=np.append(seitalist,seita(float(z[i]),149.3,float(s[i]),-1,-2.5,15,350))
i=i+1
print(seitalist)
dataframeseita= | pd.DataFrame(seitalist) | pandas.DataFrame |
import pytest
from pandas.tests.series.common import TestData
@pytest.fixture(scope="module")
def test_data():
return | TestData() | pandas.tests.series.common.TestData |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.