prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
def run_fengxian(path_1, path_2):
"""
name:“求风险值函数”
function: 将分组求和后的销项发票信息和进项发票信息合并; 求销售净利率
path_1:销项发票
path_2:进项发票
"""
df_1 = pd.read_csv(path_1, encoding='UTF-8') # 销项
df_2 = pd.read_csv(path_2, encoding='UTF-8') # 进项
# 删除没用的列
del df_1['发票号码']
del df_1['开票日期']
del df_1['购方单位代号']
del df_2['发票号码']
del df_2['开票日期']
del df_2['销方单位代号']
# 只保留发票状态中有效发票的行
df_1 = df_1[(df_1['发票状态'].isin(['有效发票']))]
df_2 = df_2[(df_2['发票状态'].isin(['有效发票']))]
# 删除整个数据表中所有含有负值的行
df_1 = df_1[df_1['金额'].apply(lambda x: x >= 0)]
df_1 = df_1[df_1['税额'].apply(lambda x: x >= 0)]
df_2 = df_2[df_2['金额'].apply(lambda x: x >= 0)]
df_2 = df_2[df_2['税额'].apply(lambda x: x >= 0)]
# 删除发票状态,为了后面的分组求和(中文求不了)
del df_1['发票状态']
del df_2['发票状态']
# 分组求和
df_1 = df_1.groupby('企业代号').sum()
df_1 = df_1.rename(columns={'金额': '金额_销项', '税额': '税额_销项', '价税合计': '价税合计_销项'})
df_2 = df_2.groupby('企业代号').sum()
df_2 = df_2.rename(columns={'金额': '金额_进项', '税额': '税额_进项', '价税合计': '价税合计_进项'})
# 合并两张表
result = pd.concat([df_1, df_2], axis=1)
# 求销售净利率
result['净利润'] = result['金额_销项'] - result['金额_进项'] + result['税额_进项'] - result['税额_销项']
result['销售收入'] = result['金额_销项']
result['销售净利率'] = result['净利润'] / result['销售收入']
# 求风险值
result["exp_销售净利率"] = result["销售净利率"].apply(pd.np.exp)
result['风险值'] = 1 / (result['exp_销售净利率'])
return result
def run_fenxian_p3(path):
"""
name:“求问题三的风险值”
path:求出的风险文件路径
"""
df = | pd.read_csv(path, encoding='GBK') | pandas.read_csv |
import pandas as pd
import pytest
import plotly.graph_objects as go
from easyplotly import Sankey
@pytest.fixture()
def sankey_a_b_target():
return go.Sankey(
node=dict(label=['Source A', 'Source B', 'Target']),
link=dict(
source=[0, 1],
target=[2, 2],
value=[1, 1]
))
def test_sankey_df(sankey_a_b_target, df=pd.DataFrame(1, index=['Source A', 'Source B'], columns=['Target'])):
assert Sankey(df) == sankey_a_b_target
def test_sankey_dict(sankey_a_b_target, links={('Source A', 'Target'): 1, ('Source B', 'Target'): 1}):
assert Sankey(links) == sankey_a_b_target
def test_sankey_two_series():
x = pd.Series({('A', 'B'): 1})
y = | pd.Series({('B', 'C'): 1}) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import os
import xgboost as xgb
from constants import VARS, IDS
from sklearn.metrics import mean_absolute_error
from utils import train_test_split
from src.constants import DATA_DIR, LOGS_DIR
def best_params(path):
if os.path.exists(path):
df_params = pd.read_json(path, lines=True)
df_params = df_params.sort_values(
'target', ascending=False).reset_index(drop=True)
best_params = df_params.loc[0, 'params']
min_max_params = ['feature_fraction', 'bagging_fraction']
non_zero_params = ['lambda_l1', 'lambda_l2']
int_params = ['max_depth', 'num_leaves', 'n_estimators']
for param in min_max_params:
best_params[param] = max(min(best_params[param], 1), 0)
for param in non_zero_params:
best_params[param] = max(best_params[param], 0)
for param in int_params:
best_params[param] = int(round(best_params[param]))
return best_params
else:
print('No Logs Found')
def run_model(X_train, y_train, X_test, y_test, params):
matrice = xgb.DMatrix(X_train, y_train)
model = xgb.train(params, matrice)
preds = model.predict(xgb.DMatrix(X_test))
print('MAE:', mean_absolute_error(preds, y_test))
return preds
def save_preds(X_test, y_test, preds, ids_test, suffix):
arrays = [ids_test, X_test, y_test, preds]
arrays = [pd.DataFrame(array).reset_index(drop=True) for array in arrays]
df_test = | pd.concat(arrays, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(object):
def test_0d_array(self):
assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isnull(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_isnull(self):
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isnull(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
with catch_warnings(record=True):
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists(self):
result = isnull([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isnull(['foo', 'bar'])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isnull([u('foo'), u('bar')])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_nat(self):
result = isnull([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_numpy_nat(self):
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime(self):
assert not isnull(datetime.now())
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notnull(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
mask = isnull(pidx[1:])
exp = np.zeros(len(mask), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
def test_datetime_other_units(self):
idx = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-02'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal(notnull(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_timedelta_other_units(self):
idx = pd.TimedeltaIndex(['1 days', 'NaT', '2 days'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['timedelta64[D]', 'timedelta64[h]', 'timedelta64[m]',
'timedelta64[s]', 'timedelta64[ms]', 'timedelta64[us]',
'timedelta64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal( | notnull(values) | pandas.core.dtypes.missing.notnull |
import os
import pandas as pd
import numpy as np
import uproot
import h5py
from twaml.data import dataset
from twaml.data import scale_weight_sum
from twaml.data import from_root, from_pytables, from_h5
branches = ["pT_lep1", "pT_lep2", "eta_lep1", "eta_lep2"]
ds = from_root(
["tests/data/test_file.root"], name="myds", branches=branches, TeXlabel=r"$t\bar{t}$"
)
def test_name():
assert ds.name == "myds"
assert ds.TeXlabel == "$t\\bar{t}$"
def test_no_name():
dst = from_root(["tests/data/test_file.root"], branches=branches)
assert dst.name == "test_file.root"
def test_content():
ts = [uproot.open(f)[ds.tree_name] for f in ds.files]
raws = [t.array("pT_lep1") for t in ts]
raw = np.concatenate([raws])
bins = np.linspace(0, 800, 21)
n1, bins1 = np.histogram(raw, bins=bins)
n2, bins2 = np.histogram(ds.df.pT_lep1.to_numpy(), bins=bins)
np.testing.assert_array_equal(n1, n2)
def test_nothing():
dst = from_root(["tests/data/test_file.root"], branches=branches)
assert dst.files[0].exists()
def test_with_executor():
lds = from_root(["tests/data/test_file.root"], branches=branches, nthreads=4)
np.testing.assert_array_almost_equal(lds.weights, ds.weights, 8)
def test_weight():
ts = [uproot.open(f)[ds.tree_name] for f in ds.files]
raws = [t.array("weight_nominal") for t in ts]
raw = np.concatenate(raws)
raw = raw * 150.0
ds.weights = ds.weights * 150.0
np.testing.assert_array_almost_equal(raw, ds.weights, 6)
def test_add():
ds2 = from_root(["tests/data/test_file.root"], name="ds2", branches=branches)
ds2.weights = ds2.weights * 22
combined = ds + ds2
comb_w = np.concatenate([ds.weights, ds2.weights])
comb_df = pd.concat([ds.df, ds2.df])
np.testing.assert_array_almost_equal(comb_w, combined.weights, 5)
np.testing.assert_array_almost_equal(comb_df.get_values(), combined.df.get_values(), 5)
assert ds.name == combined.name
assert ds.tree_name == combined.tree_name
assert ds.label == combined.label
def test_selection():
ds2 = from_root(
["tests/data/test_file.root"],
name="ds2",
selection="(reg2j2b==True) & (OS == True) & (pT_lep1 > 50)",
)
upt = uproot.open("tests/data/test_file.root")["WtLoop_nominal"]
reg2j2b = upt.array("reg2j2b")
OS = upt.array("OS")
pT_lep1 = upt.array("pT_lep1")
sel = np.logical_and(np.logical_and(reg2j2b, OS), pT_lep1 > 50)
w = upt.array("weight_nominal")[sel]
assert np.allclose(w, ds2.weights)
# np.testing.assert_array_almost_equal(w, ds2.weights)
def test_append():
branches = ["pT_lep1", "pT_lep2", "eta_lep1", "eta_lep2"]
ds1 = from_root(["tests/data/test_file.root"], name="myds", branches=branches)
ds2 = from_root(["tests/data/test_file.root"], name="ds2", branches=branches)
ds2.weights = ds2.weights * 5
# raw
comb_w = np.concatenate([ds1.weights, ds2.weights])
comb_df = | pd.concat([ds1.df, ds2.df]) | pandas.concat |
#kMeans
import random
import pandas as pd
import numpy as np
from pandas.util.testing import assert_frame_equal
import matplotlib as plt
def random_sample(df,k):
rindex = np.array(random.sample(xrange(len(df)), k))
return df.ix[rindex]
def distance(e1,e2):
return np.linalg.norm(e1-e2)
def create_clusters(centers):
clusters = []
for i in range(len(centers)):
clusters.append(pd.DataFrame())
return clusters
def closest(element,centers):
smallest = None
for i in range(len(centers)):
center = centers.iloc[i]
if smallest == None or distance(element,center) < smallest:
smallest = distance(element,center)
closest_center = center
return closest_center
def assign_clusters(df,clusters,centers):
for i in range(len(df)):
element = df.iloc[i]
center = closest(element,centers)
# Yes, this is ugly, but damn pandas is hard to grok.
for c in range(len(centers)):
if (centers.iloc[c] == center).all():
clusters[c] = clusters[c].append(element)
return clusters
def find_center(df):
return df.mean(axis=0)
def find_centers(clusters):
centers = pd.DataFrame()
for cluster in clusters:
center = find_center(cluster)
centers = centers.append(center,ignore_index=True)
return centers
def train(df,k,loop_n):
init_centers = random_sample(df,k)
return train_loop(df,k,init_centers,loop_n)
def train_loop(df,k,centers,loop_n):
step = 1
for i in range(loop_n):
clusters = create_clusters(centers)
clusters = assign_clusters(df,clusters,centers)
old_centers = centers
centers = find_centers(clusters)
#return if we have not moved
if frames_equal(centers,old_centers):
return centers
step += 1
return centers
def classify(element):
return 2
def frames_equal(f1,f2):
try:
| assert_frame_equal(f1, f2) | pandas.util.testing.assert_frame_equal |
import pandas as pd
#Summary null values
def summary(X):
''' This Function will return the columns names as index,null_value_count,any unique character we specify & its percentage of occurance per column.'''
null_values = X.apply(lambda x:X.isnull().sum())
blank_char = X.apply(lambda x:X.isin(['?']).sum())
percent_blank_char =X.apply(lambda x:round((X.isin(['?']).sum()/X.shape[0])*100, 2))
unique_values = X.apply(lambda x:len(X.unique()))
return pd.DataFrame({'null_values':null_values,
'? Values':blank_char,'% ? Values':percent_blank_char
,'unique_values':unique_values})
#Date split
def date_split(X, date):
X=X.copy()
X['day']= | pd.to_datetime(X[date]) | pandas.to_datetime |
import pandas as pd,requests, plotly.graph_objects as go, plotly.express as px, os
from dotenv import load_dotenv
from plotly.subplots import make_subplots
# Using requests library to create urls
def req(series: str, start: str, end: str, json: str):
'''
{param} series: The series we are looking at (PAYEMS, GDPC1, and CPIAUCSL)
{param} start: Observation start date (default: 1776-07-04)
{param} end: Observation end date (default: 9999-12-31)
{param} json: File type to send
{type} str, str, str, str
{return} Json file of what we get from using requests.get
'''
payload = {'series_id': series, 'observation_start': start, 'observation_end': end, 'file_type': json}
load_dotenv() # Searches for .env file in root directory
api_key = os.environ.get("Api_Key", None) # Extracting the API Key from the .env file, returns None if there is nothing with this name there
payload["api_key"]=api_key
r = requests.get('https://api.stlouisfed.org/fred/series/observations', params=payload) # Going to retrieve data from this command using the parameters specified above
return r.json()
# Gathering series from FRED
PAYEMS = req("PAYEMS","2000-01-01","2020-12-31", "json") # Calling function from above to create a url then json file with these parameters
GDPC1 = req("GDPC1", "2000-01-01", "2020-12-31","json") # Because we want to look at data from 2000 to 2020,
# I chose the range to be the first day of 2000 to the last day of 2020
CPIAUCSL=req("CPIAUCSL", "2000-01-01", "2020-12-31", "json")
# Joining the series together into one dataframe
df1 = pd.json_normalize(PAYEMS, record_path=['observations'])
df1.rename(columns={'value':'Total Nonfarm Employment Value'}, inplace=True) # Source: https://www.geeksforgeeks.org/how-to-rename-columns-in-pandas-dataframe/
df2 = pd.json_normalize(GDPC1, record_path=['observations'])
df2.rename(columns={'value':'Real Gross Domestic Product Value'}, inplace=True)
df3 = pd.json_normalize(CPIAUCSL, record_path=['observations'])
"""
Source: https://towardsdatascience.com/how-to-convert-json-into-a-pandas-dataframe-100b2ae1e0d8
Because the data frame had a nested list, I wanted to first extract the data from the "observations".
record_path=['observations'] tells me I'm looking into the observation column at the dictionary inside of it
The pd.json_normalize then takes the realtime_start, realtime_end, date, and value within each observation and creates a column for each
I decided not to set meta = anything because the observation start, end, and file type parameters were the same for the three and I didn't want to confuse the dates with the actual date.
"""
df3.rename(columns={'value':'Consumer Price Index Value'}, inplace=True) # Renaming "value" column to "Consumer Price Index Value" in my third dataframe so I know what I am working with
merged_df=pd.merge(df1, df3, how="outer") # Creating a new variable to store my joined first dataframe and second dataframe
merged_df1=pd.merge(merged_df, df2, how="outer") # Using the new variable I just created w/ my first and second dataframes to join it with my third dataframe
merged_df1.drop("realtime_start", axis=1, inplace=True)
merged_df1.drop("realtime_end", axis=1, inplace=True) # Deleting the columns named realtime_start and realtime_end since we don't need it, source: https://www.nbshare.io/notebook/199139718/How-To-Drop-One-Or-More-Columns-In-Pandas-Dataframe/
# Saving dataframe as a .csv file
merged_df1.to_csv(r"/Users/sophia/Desktop/Lowe/FRED_DF1.csv") # Source: https://stackoverflow.com/questions/16923281/writing-a-pandas-dataframe-to-csv-file
# Specifying which folder I want to place my csv file with the name "Data_FRED.csv" in
# Plotting two of the time series on the same plot, with time as the horizontal axis
df = pd.read_csv("/Users/sophia/Desktop/Lowe/FRED_DF1.csv") #Source: https://plotly.com/python/plot-data-from-csv/
# Using pandas to read in the new csv I just created
fig=make_subplots(specs=[[{"secondary_y": True}]]) # Source: https://plotly.com/python/multiple-axes/#two-y-axes
# Indicating that we want two y-axes in our plot
fig.add_trace(
go.Scatter(x=df["date"], y=df["Consumer Price Index Value"], name="Consumer Price Index", line=dict(color="rgb(160, 97, 119)")),
secondary_y=False
) # Plotting this data using date and CPI, changing the line color, and signifying that this is not the one associated with our second y-axis
fig.add_trace(
go.Scatter(x=df["date"], y=df["Total Nonfarm Employment Value"], name="Total Nonfarm Employment", line=dict(color="rgb(104, 133, 92)")), # Changing color of line, Source: https://stackoverflow.com/questions/58188816/plotly-how-to-set-line-color
secondary_y=True
) # Adding this data to our plot and signifying that it is associated with our second y-axis
fig.update_xaxes(title_text="Year<br><br>Source: FRED Economic Research</sup>") # Creating X axis label, breaking to a new line, writing source
fig.update_yaxes(title_text="Consumer Price Index Value", secondary_y=False, linecolor="rgb(160, 97, 119)", title_font=dict(color="rgb(160, 97, 119)"), linewidth = 4.5) # Creating Y axis label for first variable and changing color/boldness of axis
fig.update_yaxes(title_text="Thousands of Persons", secondary_y=True, linecolor="rgb(104, 133, 92)", title_font=dict(color="rgb(104, 133, 92)"), linewidth = 4.5) # Creating Y axis label for second variable and changing color/boldness of axis
fig.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01),
title_text="How CPI and Total Nonfarm Employment Change Over Time")
# Source: https://plotly.com/python/legend/
# Moving legend from the top rightt corner to the top left corner within the graph and creating a graph title
fig.update_xaxes(
dtick="M24",
tickformat="%B <br> %Y")
# Source: https://plotly.com/python/time-series/
# Changing x-axis tick labels, has a label for every 24 months and displays the month and the year
fig.show()
# Plotting a histogram
fig=px.histogram(df, x="date", y="Real Gross Domestic Product Value", nbins= 21, color_discrete_sequence=["darkseagreen"])
# Source: https://plotly.com/python/histograms/
# Creating histogram from my dataframe (df), choosing number of bins I want in my histogram, setting color of bins to "darkseagreen"
fig.update_xaxes(showgrid=True, title_text="Date")
fig.update_yaxes(title_text="GDP in Billions of Chained 2012 Dollars")
fig.update_traces(marker_line_width=0.75,marker_line_color="white") # Source: https://stackoverflow.com/questions/67255139/how-to-change-the-edgecolor-of-an-histogram-in-plotly
# Creating a white border around the bins of my histogram to show each bin individually
fig.update_layout(title="GDP from 2000-2020")
fig.update_xaxes(
dtick="M24",
tickformat="%B <br> %Y")
fig.show()
# Plotting a scatterplot
fig=px.scatter(df, x=df["Total Nonfarm Employment Value"], y=df["Consumer Price Index Value"], color_discrete_sequence=["salmon"], title="CPI vs Nonfarm Employment Value")
# Creating a scatterplot through our dataframe with the x-axis data from Total Nonfarm Employment Value and the y-axis data from CPI
# Changing color of scatterplot points to be "salmon"
# Source: https://plotly.com/python/line-and-scatter/
fig.show()
# Pulling data from two more series
RPM = req("RAILPMD11","2000-01-01","2020-12-31", "json") # https://fred.stlouisfed.org/series/RAILPMD11
ARPM = req("AIRRPMTSID11","2000-01-01","2020-12-31", "json") # https://fred.stlouisfed.org/series/AIRRPMTSID11
df_2 = | pd.json_normalize(RPM, record_path=['observations']) | pandas.json_normalize |
import torch
from torchtext.legacy import data
from torchtext.legacy.data import Field, BucketIterator
import pandas as pd
import os
from .NLPClassificationDataset import NLPClassificationDataset
class SSTDataset(NLPClassificationDataset):
def __init__(self, data_path, seed, batch_size, device, split_ratio=[0.7, 0.3]):
# super(QuoraDataset, self).__init__(data_path, seed, batch_size, device, split_ratio)
self.split_ratio = split_ratio
self.data_path = data_path
self.seed = seed
self.device = device
self.batch_size = batch_size
self.ranges = [0, 0.2, 0.4, 0.6, 0.8, 1.0]
self.labels = ['very negative', 'negative', 'neutral', 'positive', 'very positive']
self.label = [0, 1, 2, 3, 4]
self.seq_data = self.load_data(self.data_path)
def get_labels(self):
return self.labels
def load_data(self, sst_path):
sst_sents = pd.read_csv(os.path.join(sst_path, 'datasetSentences.txt'), delimiter='\t')
sst_phrases = pd.read_csv(os.path.join(sst_path, 'dictionary.txt'), delimiter='|', names=['phrase','phrase_id'])
sst_labels = pd.read_csv(os.path.join(sst_path, 'sentiment_labels.txt'), delimiter='|')
sst_sentences_phrases = | pd.merge(sst_sents, sst_phrases, how='inner', left_on=['sentence'], right_on=['phrase']) | pandas.merge |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
| read_hdf(path, "dfq", where="A>0 or C>0") | pandas.io.pytables.read_hdf |
import numpy as np
import pandas as pd
from astropy.table import Table
from astropy.io.fits import getdata
from astropy.time import Time
from astropy.io import fits
import sys
from astroquery.simbad import Simbad
from astropy.coordinates import SkyCoord
import astropy.units as u
# Read base CSV from the Google drive
df = pd.read_csv('csv/straycats2_merged.csv')
df['SEQID'] = pd.to_numeric(df['SEQID'])
df['Module'] = [mod.strip() for mod in df['Module']]
# Greenlist the columns that we want
greenlist = ['SL Target', 'SEQID', 'Module', 'Primary Target', 'Exposure (s)', 'RA',
'DEC']
for col in df.columns:
if col not in greenlist:
df = df.drop(axis=1, labels=col)
# Drop everything with NaN in the SL Target column
df = df.dropna(subset=['SL Target'])
df = df.rename(columns={"Exposure (s)": "Exposure"})
df['SL Target'] = df['SL Target'].str.strip()
df['RA'] = pd.to_numeric(df['RA'])
df['DEC'] = | pd.to_numeric(df['DEC']) | pandas.to_numeric |
#test dataset model
from deepforest import get_data
from deepforest import dataset
from deepforest import utilities
import os
import pytest
import torch
import pandas as pd
import numpy as np
import tempfile
def single_class():
csv_file = get_data("example.csv")
return csv_file
def multi_class():
csv_file = get_data("testfile_multi.csv")
return csv_file
@pytest.mark.parametrize("csv_file,label_dict",[(single_class(), {"Tree":0}), (multi_class(),{"Alive":0,"Dead":1})])
def test_TreeDataset(csv_file, label_dict):
root_dir = os.path.dirname(get_data("OSBS_029.png"))
ds = dataset.TreeDataset(csv_file=csv_file,
root_dir=root_dir,
label_dict=label_dict)
raw_data = pd.read_csv(csv_file)
assert len(ds) == len(raw_data.image_path.unique())
for i in range(len(ds)):
#Between 0 and 1
path, image, targets = ds[i]
assert image.max() <= 1
assert image.min() >= 0
assert targets["boxes"].shape == (raw_data.shape[0],4)
assert targets["labels"].shape == (raw_data.shape[0],)
assert len(np.unique(targets["labels"])) == len(raw_data.label.unique())
def test_single_class_with_empty(tmpdir):
"""Add fake empty annotations to test parsing """
csv_file1 = get_data("example.csv")
csv_file2 = get_data("OSBS_029.csv")
df1 = pd.read_csv(csv_file1)
df2 = | pd.read_csv(csv_file2) | pandas.read_csv |
#coding=utf-8
from sklearn.metrics import roc_auc_score
import pandas as pd
import os
val = pd.read_csv('../data/validation/validation_set.csv')
"""
for i in range(30):
xgb = pd.read_csv('./val/svm_{0}.csv'.format(i))
tmp = pd.merge(xgb,val,on='Idx')
auc = roc_auc_score(tmp.target.values,tmp.score.values)
xgb.to_csv('./val/svm{0}_{1}.csv'.format(i,auc),index=None,encoding='utf-8')
"""
files = os.listdir('./val')
pred = pd.read_csv('./val/'+files[0])
Idx = pred.Idx
score = pred.score
for f in files[1:]:
pred = pd.read_csv('./val/'+f)
score += pred.score
score /= len(files)
pred = | pd.DataFrame(Idx,columns=['Idx']) | pandas.DataFrame |
import logging
import os
import sys
import pandas as pd
import pytest
import handy as hd
log: logging.Logger
@pytest.fixture
def setup_logging():
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
global log
log = logging.getLogger('handy test')
log.setLevel(logging.INFO)
return log
def test_nothing(setup_logging):
global log
# this is to show how to use logging with pycharm + pytest
# it will be printed if pytest is run with options `-p no:logging -s`
# Add them to "Additional Arguments" in your Run Configuration or
# Run Configuration Template for pytest
print('\nignore the communicates from this method. This is test.')
print('this is how to print to console, without logging')
log.warning('Just a test warning message. Ignore it.')
log.warning('This is how to print a warning with logging')
log.info('This is how to print an info with logging')
assert True, "dummy assertion"
def test_round_up():
assert hd.round_up(98) == 100
assert hd.round_up(55) == 60
assert hd.round_up(10) == 10
assert hd.round_up(345) == 400
def test_tidy_bins():
assert hd.tidy_bins([21,35,92], 4).tolist() == [0,25,50,75,100]
assert hd.tidy_bins([21,35,92]).tolist() == [0,10,20,30,40,50,60,70,80,90,100]
assert hd.tidy_bins([21,35,92], 5).tolist() == [0,20,40,60,80,100]
def test_to_datetime():
days = ['2021-04-05 00:00', # Mon
'2021-04-10 11:46', # Sat
'2021-04-11 23:59' # Sun
]
df = pd.DataFrame({'input': days})
df = hd.to_datetime(df, input_column='input', output_column='output')
assert df.output[2] == pd.to_datetime(days[2])
def test_monday_before_and_after():
days = ['2021-04-05 00:00', # Mon
'2021-04-10 11:46', # Sat
'2021-04-11-23:59' # Sun
]
days_dt = pd.to_datetime(days)
mon_before = pd.to_datetime('2021-04-05 00:00')
mon_after = pd.to_datetime('2021-04-12 00:00')
for d in days_dt:
assert (hd.monday_before(d) == mon_before)
assert (hd.monday_after(d) == mon_after)
log.info('test_monday_before_after: PASS')
@pytest.fixture
def load_testdata_5m():
# load the 5-months data set to be used for tests
data_dir = '../data'
src_file = 'sample01.csv'
f = os.path.join(data_dir, src_file)
df = pd.read_csv(f, encoding='latin_1', sep=';', error_bad_lines=False)
df['created'] = pd.to_datetime(df['created'], format=hd.format_dash, errors='coerce')
df['resolved'] = pd.to_datetime(df['resolved'], format=hd.format_dash, errors='coerce')
df = hd.augment_columns(df)
return df
# expect the data as loaded by load_testdata_5m
def test_week_boundaries(setup_logging, load_testdata_5m):
df = load_testdata_5m
# correct results
inner_boundaries = (pd.Timestamp('2020-09-07 00:00:00'), pd.Timestamp('2021-01-25 00:00:00'), 20.0)
outer_boundaries = ( | pd.Timestamp('2020-08-31 00:00:00') | pandas.Timestamp |
# *****************************************************************************
# © Copyright IBM Corp. 2018. All Rights Reserved.
#
# This program and the accompanying materials
# are made available under the terms of the Apache V2.0 license
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# *****************************************************************************
"""
The Built In Functions module contains preinstalled functions
"""
import itertools as it
import datetime as dt
import importlib
import logging
import time
import numpy as np
import pandas as pd
import scipy as sp
from pyod.models.cblof import CBLOF
import numpy as np
import pandas as pd
import scipy as sp
from pyod.models.cblof import CBLOF
import ruptures as rpt
# for Spectral Analysis
from scipy import signal, fftpack
import skimage as ski
from skimage import util as skiutil # for nifty windowing
# for KMeans
from sklearn import ensemble
from sklearn import linear_model
from sklearn import metrics
from sklearn.covariance import MinCovDet
from sklearn.neighbors import (KernelDensity, LocalOutlierFactor)
from sklearn.pipeline import Pipeline, TransformerMixin
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import (StandardScaler, RobustScaler, MinMaxScaler,
minmax_scale, PolynomialFeatures)
from sklearn.utils import check_array
# for Matrix Profile
import stumpy
# for KDEAnomalyScorer
import statsmodels.api as sm
from statsmodels.nonparametric.kernel_density import KDEMultivariate
from statsmodels.tsa.arima.model import ARIMA
# EXCLUDED until we upgrade to statsmodels 0.12
#from statsmodels.tsa.forecasting.stl import STLForecast
from .base import (BaseTransformer, BaseRegressor, BaseEstimatorFunction, BaseSimpleAggregator)
from .bif import (AlertHighValue)
from .ui import (UISingle, UIMulti, UIMultiItem, UIFunctionOutSingle, UISingleItem, UIFunctionOutMulti)
# VAE
import torch
import torch.autograd
import torch.nn as nn
logger = logging.getLogger(__name__)
try:
# for gradient boosting
import lightgbm
except (AttributeError, ImportError):
logger.exception('')
logger.debug(f'Could not import lightgm package. Might have issues when using GBMRegressor catalog function')
PACKAGE_URL = 'git+https://github.com/ibm-watson-iot/functions.git@'
_IS_PREINSTALLED = True
Error_SmallWindowsize = 0.0001
Error_Generic = 0.0002
FrequencySplit = 0.3
DefaultWindowSize = 12
SmallEnergy = 1e-20
KMeans_normalizer = 1
Spectral_normalizer = 100 / 2.8
FFT_normalizer = 1
Saliency_normalizer = 1
Generalized_normalizer = 1 / 300
# from
# https://stackoverflow.com/questions/44790072/sliding-window-on-time-series-data
def view_as_windows1(temperature, length, step):
logger.info('VIEW ' + str(temperature.shape) + ' ' + str(length) + ' ' + str(step))
def moving_window(x, length, _step=1):
if type(step) != 'int' or _step < 1:
logger.info('MOVE ' + str(_step))
_step = 1
streams = it.tee(x, length)
return zip(*[it.islice(stream, i, None, _step) for stream, i in zip(streams, it.count(step=1))])
x_ = list(moving_window(temperature, length, step))
return np.asarray(x_)
def view_as_windows(temperature, length, step):
return skiutil.view_as_windows(temperature, window_shape=(length,), step=step)
def custom_resampler(array_like):
# initialize
if 'gap' not in dir():
gap = 0
if array_like.values.size > 0:
gap = 0
return 0
else:
gap += 1
return gap
def min_delta(df):
# minimal time delta for merging
if df is None:
return pd.Timedelta('5 seconds'), df
elif len(df.index.names) > 1:
df2 = df.reset_index(level=df.index.names[1:], drop=True)
else:
df2 = df
try:
mindelta = df2.index.to_series().diff().min()
except Exception as e:
logger.debug('Min Delta error: ' + str(e))
mindelta = pd.Timedelta('5 seconds')
if mindelta == dt.timedelta(seconds=0) or pd.isnull(mindelta):
mindelta = pd.Timedelta('5 seconds')
return mindelta, df2
def set_window_size_and_overlap(windowsize, trim_value=2 * DefaultWindowSize):
# make sure it exists
if windowsize is None:
windowsize = DefaultWindowSize
# make sure it is positive and not too large
trimmed_ws = np.minimum(np.maximum(windowsize, 1), trim_value)
# overlap
if trimmed_ws == 1:
ws_overlap = 0
else:
# larger overlap - half the window
ws_overlap = trimmed_ws // 2
return trimmed_ws, ws_overlap
def dampen_anomaly_score(array, dampening):
if dampening is None:
dampening = 0.9 # gradient dampening
if dampening >= 1:
return array
if dampening < 0.01:
return array
if array.size <= 1:
return array
gradient = np.gradient(array)
# dampened
grad_damp = np.float_power(abs(gradient), dampening) * np.sign(gradient)
# reconstruct (dampened) anomaly score by discrete integration
integral = []
x = array[0]
for x_el in np.nditer(grad_damp):
x = x + x_el
integral.append(x)
# shift array slightly to the right to position anomaly score
array_damp = np.roll(np.asarray(integral), 1)
array_damp[0] = array_damp[1]
# normalize
return array_damp / dampening / 2
# Saliency helper functions
# copied from https://github.com/y-bar/ml-based-anomaly-detection
# remove the boring part from an image resp. time series
def series_filter(values, kernel_size=3):
"""
Filter a time series. Practically, calculated mean value inside kernel size.
As math formula, see https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html.
:param values:
:param kernel_size:
:return: The list of filtered average
"""
filter_values = np.cumsum(values, dtype=float)
logger.info('SERIES_FILTER: ' + str(values.shape) + ',' + str(filter_values.shape) + ',' + str(kernel_size))
filter_values[kernel_size:] = filter_values[kernel_size:] - filter_values[:-kernel_size]
filter_values[kernel_size:] = filter_values[kernel_size:] / kernel_size
for i in range(1, kernel_size):
filter_values[i] /= i + 1
return filter_values
# Saliency class
# see https://www.inf.uni-hamburg.de/en/inst/ab/cv/research/research1-visual-attention.html
class Saliency(object):
def __init__(self, amp_window_size, series_window_size, score_window_size):
self.amp_window_size = amp_window_size
self.series_window_size = series_window_size
self.score_window_size = score_window_size
def transform_saliency_map(self, values):
"""
Transform a time-series into spectral residual, which is method in computer vision.
For example, See https://docs.opencv.org/master/d8/d65/group__saliency.html
:param values: a list or numpy array of float values.
:return: silency map and spectral residual
"""
freq = np.fft.fft(values)
mag = np.sqrt(freq.real ** 2 + freq.imag ** 2)
# remove the boring part of a timeseries
spectral_residual = np.exp(np.log(mag) - series_filter(np.log(mag), self.amp_window_size))
freq.real = freq.real * spectral_residual / mag
freq.imag = freq.imag * spectral_residual / mag
# and apply inverse fourier transform
saliency_map = np.fft.ifft(freq)
return saliency_map
def transform_spectral_residual(self, values):
saliency_map = self.transform_saliency_map(values)
spectral_residual = np.sqrt(saliency_map.real ** 2 + saliency_map.imag ** 2)
return spectral_residual
def merge_score(dfEntity, dfEntityOrig, column_name, score, mindelta):
"""
Fit interpolated score to original entity slice of the full dataframe
"""
# equip score with time values, make sure it's positive
score[score < 0] = 0
dfEntity[column_name] = score
# merge
dfEntityOrig = pd.merge_asof(dfEntityOrig, dfEntity[column_name], left_index=True, right_index=True,
direction='nearest', tolerance=mindelta)
if column_name + '_y' in dfEntityOrig:
merged_score = dfEntityOrig[column_name + '_y'].to_numpy()
else:
merged_score = dfEntityOrig[column_name].to_numpy()
return merged_score
#######################################################################################
# Scalers
#######################################################################################
class Standard_Scaler(BaseEstimatorFunction):
"""
Learns and applies standard scaling
"""
eval_metric = staticmethod(metrics.r2_score)
# class variables
train_if_no_model = True
def set_estimators(self):
self.estimators['standard_scaler'] = (StandardScaler, self.params)
logger.info('Standard Scaler initialized')
def __init__(self, features=None, targets=None, predictions=None):
super().__init__(features=features, targets=targets, predictions=predictions, keep_current_models=True)
# do not run score and call transform instead of predict
self.is_scaler = True
self.experiments_per_execution = 1
self.normalize = True # support for optional scaling in subclasses
self.prediction = self.predictions[0] # support for subclasses with univariate focus
self.params = {}
self.whoami = 'Standard_Scaler'
# used by all the anomaly scorers based on it
def prepare_data(self, dfEntity):
logger.debug(self.whoami + ': prepare Data for ' + self.prediction + ' column')
# operate on simple timestamp index
# needed for aggregated data with 3 or more indices
if len(dfEntity.index.names) > 1:
index_names = dfEntity.index.names
dfe = dfEntity.reset_index(index_names[1:])
else:
dfe = dfEntity
# interpolate gaps - data imputation
try:
dfe = dfe.interpolate(method="time")
except Exception as e:
logger.error('Prepare data error: ' + str(e))
# one dimensional time series - named temperature for catchyness
temperature = dfe[self.prediction].fillna(0).to_numpy(dtype=np.float64)
return dfe, temperature
# dummy function for scaler, can be replaced with anomaly functions
def kexecute(self, entity, df_copy):
return df_copy
def execute(self, df):
df_copy = df.copy()
entities = np.unique(df_copy.index.levels[0])
logger.debug(str(entities))
missing_cols = [x for x in self.predictions if x not in df_copy.columns]
for m in missing_cols:
df_copy[m] = None
for entity in entities:
normalize_entity = self.normalize
try:
check_array(df_copy.loc[[entity]][self.features].values, allow_nd=True)
except Exception as e:
normalize_entity = False
logger.error(
'Found Nan or infinite value in feature columns for entity ' + str(entity) + ' error: ' + str(e))
# support for optional scaling in subclasses
if normalize_entity:
dfe = super()._execute(df_copy.loc[[entity]], entity)
df_copy.loc[entity, self.predictions] = dfe[self.predictions]
else:
self.prediction = self.features[0]
df_copy = self.kexecute(entity, df_copy)
self.prediction = self.predictions[0]
logger.info('Standard_Scaler: Found columns ' + str(df_copy.columns))
return df_copy
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem(name='features', datatype=float, required=True))
inputs.append(UIMultiItem(name='targets', datatype=float, required=True, output_item='predictions',
is_output_datatype_derived=True))
# define arguments that behave as function outputs
outputs = []
return inputs, outputs
class Robust_Scaler(BaseEstimatorFunction):
"""
Learns and applies robust scaling, scaling after outlier removal
"""
eval_metric = staticmethod(metrics.r2_score)
# class variables
train_if_no_model = True
def set_estimators(self):
self.estimators['robust_scaler'] = (RobustScaler, self.params)
logger.info('Robust Scaler initialized')
def __init__(self, features=None, targets=None, predictions=None):
super().__init__(features=features, targets=targets, predictions=predictions, keep_current_models=True)
# do not run score and call transform instead of predict
self.is_scaler = True
self.experiments_per_execution = 1
self.params = {}
def execute(self, df):
df_copy = df.copy()
entities = np.unique(df_copy.index.levels[0])
logger.debug(str(entities))
missing_cols = [x for x in self.predictions if x not in df_copy.columns]
for m in missing_cols:
df_copy[m] = None
for entity in entities:
# per entity - copy for later inplace operations
try:
check_array(df_copy.loc[[entity]][self.features].values, allow_nd=True)
except Exception as e:
logger.error(
'Found Nan or infinite value in feature columns for entity ' + str(entity) + ' error: ' + str(e))
continue
dfe = super()._execute(df_copy.loc[[entity]], entity)
df_copy.loc[entity, self.predictions] = dfe[self.predictions]
return df_copy
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem(name='features', datatype=float, required=True))
inputs.append(UIMultiItem(name='targets', datatype=float, required=True, output_item='predictions',
is_output_datatype_derived=True))
# define arguments that behave as function outputs
outputs = []
return inputs, outputs
class MinMax_Scaler(BaseEstimatorFunction):
"""
Learns and applies minmax scaling
"""
eval_metric = staticmethod(metrics.r2_score)
# class variables
train_if_no_model = True
def set_estimators(self):
self.estimators['minmax_scaler'] = (MinMaxScaler, self.params)
logger.info('MinMax Scaler initialized')
def __init__(self, features=None, targets=None, predictions=None):
super().__init__(features=features, targets=targets, predictions=predictions, keep_current_models=True)
# do not run score and call transform instead of predict
self.is_scaler = True
self.experiments_per_execution = 1
self.params = {}
def execute(self, df):
df_copy = df.copy()
entities = np.unique(df_copy.index.levels[0])
logger.debug(str(entities))
missing_cols = [x for x in self.predictions if x not in df_copy.columns]
for m in missing_cols:
df_copy[m] = None
for entity in entities:
try:
check_array(df_copy.loc[[entity]][self.features].values, allow_nd=True)
except Exception as e:
logger.error(
'Found Nan or infinite value in feature columns for entity ' + str(entity) + ' error: ' + str(e))
continue
dfe = super()._execute(df_copy.loc[[entity]], entity)
df_copy.loc[entity, self.predictions] = dfe[self.predictions]
return df_copy
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem(name='features', datatype=float, required=True))
inputs.append(UIMultiItem(name='targets', datatype=float, required=True, output_item='predictions',
is_output_datatype_derived=True))
# define arguments that behave as function outputs
outputs = []
return inputs, outputs
#######################################################################################
# Anomaly Scorers
#######################################################################################
class AnomalyScorer(BaseTransformer):
"""
Superclass of all unsupervised anomaly detection functions.
"""
def __init__(self, input_item, windowsize, output_items):
super().__init__()
logger.debug(input_item)
self.input_item = input_item
# use 12 by default
self.windowsize, self.windowoverlap = set_window_size_and_overlap(windowsize)
# assume 1 per sec for now
self.frame_rate = 1
# step
self.step = self.windowsize - self.windowoverlap
self.output_items = output_items
self.normalize = False
self.whoami = 'Anomaly'
def get_model_name(self, prefix='model', suffix=None):
name = []
if prefix is not None:
name.append(prefix)
name.extend([self._entity_type.name, self.whoami])
name.append(self.output_items[0])
if suffix is not None:
name.append(suffix)
name = '.'.join(name)
return name
# make sure data is evenly spaced
def prepare_data(self, dfEntity):
logger.debug(self.whoami + ': prepare Data')
# operate on simple timestamp index
if len(dfEntity.index.names) > 1:
index_names = dfEntity.index.names
dfe = dfEntity.reset_index(index_names[1:])
else:
dfe = dfEntity
# interpolate gaps - data imputation
try:
dfe = dfe.dropna(subset=[self.input_item]).interpolate(method="time")
except Exception as e:
logger.error('Prepare data error: ' + str(e))
# one dimensional time series - named temperature for catchyness
temperature = dfe[self.input_item].fillna(0).to_numpy(dtype=np.float64)
return dfe, temperature
def execute(self, df):
logger.debug('Execute ' + self.whoami)
df_copy = df # no copy
# check data type
if not pd.api.types.is_numeric_dtype(df_copy[self.input_item].dtype):
logger.error('Anomaly scoring on non-numeric feature:' + str(self.input_item))
return df_copy
# set output columns to zero
for output_item in self.output_items:
df_copy[output_item] = 0
# delegate to _calc
logger.debug('Execute ' + self.whoami + ' enter per entity execution')
# group over entities
group_base = [pd.Grouper(axis=0, level=0)]
df_copy = df_copy.groupby(group_base).apply(self._calc)
logger.debug('Scoring done')
return df_copy
def _calc(self, df):
entity = df.index.levels[0][0]
# get rid of entity id as part of the index
df = df.droplevel(0)
# Get new data frame with sorted index
dfe_orig = df.sort_index()
# remove all rows with only null entries
dfe = dfe_orig.dropna(how='all')
# minimal time delta for merging
mindelta, dfe_orig = min_delta(dfe_orig)
logger.debug('Timedelta:' + str(mindelta) + ' Index: ' + str(dfe_orig.index))
# one dimensional time series - named temperature for catchyness
# interpolate gaps - data imputation by default
# for missing data detection we look at the timestamp gradient instead
dfe, temperature = self.prepare_data(dfe)
logger.debug(
self.whoami + ', Entity: ' + str(entity) + ', Input: ' + str(self.input_item) + ', Windowsize: ' + str(
self.windowsize) + ', Output: ' + str(self.output_items) + ', Overlap: ' + str(
self.windowoverlap) + ', Inputsize: ' + str(temperature.size))
if temperature.size <= self.windowsize:
logger.debug(str(temperature.size) + ' <= ' + str(self.windowsize))
for output_item in self.output_items:
dfe[output_item] = Error_SmallWindowsize
else:
logger.debug(str(temperature.size) + str(self.windowsize))
for output_item in self.output_items:
dfe[output_item] = Error_Generic
temperature = self.scale(temperature, entity)
scores = self.score(temperature)
# length of time_series_temperature, signal_energy and ets_zscore is smaller than half the original
# extend it to cover the full original length
logger.debug('->')
try:
for i,output_item in enumerate(self.output_items):
# check for fast path, no interpolation required
diff = temperature.size - scores[i].size
# slow path - interpolate result score to stretch it to the size of the input data
if diff > 0:
dfe[output_item] = 0.0006
time_series_temperature = np.linspace(self.windowsize // 2, temperature.size - self.windowsize // 2 + 1,
temperature.size - diff)
linear_interpolate = sp.interpolate.interp1d(time_series_temperature, scores[i], kind='linear',
fill_value='extrapolate')
zScoreII = merge_score(dfe, dfe_orig, output_item,
abs(linear_interpolate(np.arange(0, temperature.size, 1))), mindelta)
# fast path - either cut off or just copy
elif diff < 0:
zScoreII = scores[i][0:temperature.size]
else:
zScoreII = scores[i]
df[output_item] = zScoreII
except Exception as e:
logger.error(self.whoami + ' score integration failed with ' + str(e))
logger.debug('--->')
return df
def score(self, temperature):
#scores = np.zeros((len(self.output_items), ) + temperature.shape)
scores = []
for output_item in self.output_items:
scores.append(np.zeros(temperature.shape))
try:
# super simple 1-dimensional z-score
ets_zscore = abs(sp.stats.zscore(temperature))
scores[0] = ets_zscore
# 2nd argument to return the modified input argument (for no data)
if len(self.output_items) > 1:
scores[1] = temperature
except Exception as e:
logger.error(self.whoami + ' failed with ' + str(e))
return scores
def scale(self, temperature, entity):
normalize_entity = self.normalize
if not normalize_entity:
return temperature
temp = temperature.reshape(-1, 1)
logger.info(self.whoami + ' scaling ' + str(temperature.shape))
try:
check_array(temp, allow_nd=True)
except Exception as e:
logger.error('Found Nan or infinite value in input data, error: ' + str(e))
return temperature
db = self._entity_type.db
scaler_model = None
# per entity - copy for later inplace operations
model_name = self.get_model_name(suffix=entity)
try:
scaler_model = db.model_store.retrieve_model(model_name)
logger.info('load model %s' % str(scaler_model))
except Exception as e:
logger.error('Model retrieval failed with ' + str(e))
# failed to load a model, so train it
if scaler_model is None:
# all variables should be continuous
scaler_model = StandardScaler().fit(temp)
logger.debug('Created Scaler ' + str(scaler_model))
try:
db.model_store.store_model(model_name, scaler_model)
except Exception as e:
logger.error('Model store failed with ' + str(e))
if scaler_model is not None:
temp = scaler_model.transform(temp)
return temp.reshape(temperature.shape)
return temperature
#####
# experimental function to interpolate over larger gaps
####
class Interpolator(AnomalyScorer):
"""
Interpolates NaN and data to be interpreted as NaN (for example 0 as invalid sensor reading)
The window size is typically set large enough to allow for "bridging" gaps
Missing indicates sensor readings to be interpreted as invalid.
"""
def __init__(self, input_item, windowsize, missing, output_item):
super().__init__(input_item, windowsize, [output_item])
logger.debug(input_item)
self.missing = missing
self.whoami = 'Interpolator'
def prepare_data(self, dfEntity):
logger.debug(self.whoami + ': prepare Data')
# operate on simple timestamp index
if len(dfEntity.index.names) > 1:
index_names = dfEntity.index.names
dfe = dfEntity.reset_index(index_names[1:])
else:
dfe = dfEntity
# remove Nan
dfe = dfe[dfe[self.input_item].notna()]
# remove self.missing
dfe = dfe[dfe[self.input_item] != self.missing]
# interpolate gaps - data imputation
try:
dfe = dfe.interpolate(method="time")
except Exception as e:
logger.error('Prepare data error: ' + str(e))
# one dimensional time series - named temperature for catchyness
# replace NaN with self.missing
temperature = dfe[self.input_item].fillna(0).to_numpy(dtype=np.float64)
return dfe, temperature
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to interpolate'))
inputs.append(
UISingle(name='windowsize', datatype=int, description='Minimal size of the window for interpolating data.'))
inputs.append(UISingle(name='missing', datatype=int, description='Data to be interpreted as not-a-number.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=float, description='Interpolated data'))
return (inputs, outputs)
class NoDataAnomalyScoreExt(AnomalyScorer):
"""
An unsupervised anomaly detection function.
Uses z-score AnomalyScorer to find gaps in data.
The function moves a sliding window across the data signal and applies the anomaly model to each window.
The window size is typically set to 12 data points.
"""
def __init__(self, input_item, windowsize, output_item):
super().__init__(input_item, windowsize, [output_item])
self.whoami = 'NoDataExt'
self.normalizer = 1
logger.debug('NoDataExt')
def prepare_data(self, dfEntity):
logger.debug(self.whoami + ': prepare Data')
# operate on simple timestamp index
if len(dfEntity.index.names) > 1:
index_names = dfEntity.index.names
dfe = dfEntity.reset_index(index_names[1:])
else:
dfe = dfEntity
# count the timedelta in seconds between two events
timeSeq = (dfe.index.values - dfe.index[0].to_datetime64()) / np.timedelta64(1, 's')
#dfe = dfEntity.copy()
# one dimensional time series - named temperature for catchyness
# we look at the gradient of the time series timestamps for anomaly detection
# might throw an exception - we catch it in the super class !!
try:
temperature = np.gradient(timeSeq)
dfe[[self.input_item]] = temperature
except Exception as pe:
logger.info("NoData Gradient failed with " + str(pe))
dfe[[self.input_item]] = 0
temperature = dfe[[self.input_item]].values
temperature[0] = 10 ** 10
temperature = temperature.astype('float64').reshape(-1)
return dfe, temperature
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=float, description='No data anomaly score'))
return inputs, outputs
class ChangePointDetector(AnomalyScorer):
'''
An unsupervised anomaly detection function.
Applies a spectral analysis clustering techniqueto extract features from time series data and to create z scores.
Moves a sliding window across the data signal and applies the anomalymodelto each window.
The window size is typically set to 12 data points.
Try several anomaly detectors on your data and use the one that fits your data best.
'''
def __init__(self, input_item, windowsize, chg_pts):
super().__init__(input_item, windowsize, [chg_pts])
logger.debug(input_item)
self.whoami = 'ChangePointDetector'
def score(self, temperature):
scores = []
sc = np.zeros(temperature.shape)
try:
algo = rpt.BottomUp(model="l2", jump=2).fit(temperature)
chg_pts = algo.predict(n_bkps=15)
for j in chg_pts:
x = np.arange(0, temperature.shape[0], 1)
Gaussian = sp.stats.norm(j-1, temperature.shape[0]/20) # high precision
y = Gaussian.pdf(x) * temperature.shape[0]/8 # max is ~1
sc += y
except Exception as e:
logger.error(self.whoami + ' failed with ' + str(e))
scores.append(sc)
return scores
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='chg_pts', datatype=float, description='Change points'))
return inputs, outputs
ENSEMBLE = '_ensemble_'
SPECTRALEXT = 'SpectralAnomalyScoreExt'
class EnsembleAnomalyScore(BaseTransformer):
'''
Call a set of anomaly detectors and return an joint vote along with the individual results
'''
def __init__(self, input_item, windowsize, scorers, thresholds, output_item):
super().__init__()
self.input_item = input_item
self.windowsize = windowsize
self.output_item = output_item
logger.debug(input_item)
self.whoami = 'EnsembleAnomalyScore'
self.list_of_scorers = scorers.split(',')
self.thresholds = list(map(int, thresholds.split(',')))
self.klasses = []
self.instances = []
self.output_items = []
module = importlib.import_module('mmfunctions.anomaly')
for m in self.list_of_scorers:
klass = getattr(module, m)
self.klasses.append(klass)
print(klass.__name__)
if klass.__name__ == SPECTRALEXT:
inst = klass(input_item, windowsize, output_item + ENSEMBLE + klass.__name__,
output_item + ENSEMBLE + klass.__name__ + '_inv')
else:
inst = klass(input_item, windowsize, output_item + ENSEMBLE + klass.__name__)
self.output_items.append(output_item + ENSEMBLE + klass.__name__)
self.instances.append(inst)
def execute(self, df):
logger.debug('Execute ' + self.whoami)
df_copy = df # no copy
binned_indices_list = []
for inst, output, threshold in zip(self.instances, self.output_items, self.thresholds):
logger.info('Execute anomaly scorer ' + str(inst.__class__.__name__) + ' with threshold ' + str(threshold))
tic = time.perf_counter_ns()
df_copy = inst.execute(df_copy)
toc = time.perf_counter_ns()
logger.info('Executed anomaly scorer ' + str(inst.__class__.__name__) + ' in ' +\
str((toc-tic)//1000000) + ' milliseconds')
arr = df_copy[output]
# sort results into bins that depend on the thresholds
# 0 - below 3/4 threshold, 1 - up to the threshold, 2 - crossed the threshold,
# 3 - very high, 4 - extreme
if inst.__class__.__name__ == SPECTRALEXT and isinstance(threshold, int):
# hard coded threshold for inverted values
threshold_ = 5
bins = [threshold * 0.75, threshold, threshold * 1.5, threshold * 2]
binned_indices_list.append(np.searchsorted(bins, arr, side='left'))
if inst.__class__.__name__ == SPECTRALEXT:
bins = [threshold_ * 0.75, threshold_, threshold_ * 1.5, threshold_ * 2]
arr = df_copy[output + '_inv']
binned_indices_list.append(np.searchsorted(bins, arr, side='left'))
binned_indices = np.vstack(binned_indices_list).mean(axis=0)
# should we explicitly drop the columns generated by the ensemble members
#df[self.output_item] = binned_indices
df_copy[self.output_item] = binned_indices
return df_copy
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutSingle(name='output_item', datatype=float, description='Spectral anomaly score (z-score)'))
return inputs, outputs
class SpectralAnomalyScore(AnomalyScorer):
'''
An unsupervised anomaly detection function.
Applies a spectral analysis clustering techniqueto extract features from time series data and to create z scores.
Moves a sliding window across the data signal and applies the anomalymodelto each window.
The window size is typically set to 12 data points.
Try several anomaly detectors on your data and use the one that fits your data best.
'''
def __init__(self, input_item, windowsize, output_item):
if isinstance(output_item, list):
super().__init__(input_item, windowsize, output_item)
else:
super().__init__(input_item, windowsize, [output_item])
logger.debug(input_item)
self.whoami = 'SpectralAnomalyScore'
def score(self, temperature):
scores = []
for output_item in self.output_items:
scores.append(np.zeros(temperature.shape))
try:
# Fourier transform:
# frequency, time, spectral density
frequency_temperature, time_series_temperature, spectral_density_temperature = signal.spectrogram(
temperature, fs=self.frame_rate, window='hanning', nperseg=self.windowsize,
noverlap=self.windowoverlap, detrend='l', scaling='spectrum')
# cut off freqencies too low to fit into the window
frequency_temperatureb = (frequency_temperature > 2 / self.windowsize).astype(int)
frequency_temperature = frequency_temperature * frequency_temperatureb
frequency_temperature[frequency_temperature == 0] = 1 / self.windowsize
signal_energy = np.dot(spectral_density_temperature.T, frequency_temperature)
signal_energy[signal_energy < SmallEnergy] = SmallEnergy
inv_signal_energy = np.divide(np.ones(signal_energy.size), signal_energy)
ets_zscore = abs(sp.stats.zscore(signal_energy)) * Spectral_normalizer
inv_zscore = abs(sp.stats.zscore(inv_signal_energy))
scores[0] = ets_zscore
if len(self.output_items) > 1:
scores[1] = inv_zscore
# 3rd argument to return the raw windowed signal energy
if len(self.output_items) > 2:
scores[2] = signal_energy
# 4th argument to return the modified input argument (for no data)
if len(self.output_items) > 3:
scores[3] = temperature.copy()
logger.debug(
'Spectral z-score max: ' + str(ets_zscore.max()) + ', Spectral inv z-score max: ' + str(
inv_zscore.max()))
except Exception as e:
logger.error(self.whoami + ' failed with ' + str(e))
return scores
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutSingle(name='output_item', datatype=float, description='Spectral anomaly score (z-score)'))
return inputs, outputs
class SpectralAnomalyScoreExt(SpectralAnomalyScore):
'''
An unsupervised anomaly detection function.
Applies a spectral analysis clustering techniqueto extract features from time series data and to create z scores.
Moves a sliding window across the data signal and applies the anomalymodelto each window.
The window size is typically set to 12 data points.
Try several anomaly detectors on your data and use the one that fits your data best.
'''
def __init__(self, input_item, windowsize, output_item, inv_zscore, signal_energy=None):
if signal_energy is None:
super().__init__(input_item, windowsize, [output_item, inv_zscore])
else:
super().__init__(input_item, windowsize, [output_item, inv_zscore, signal_energy])
logger.debug(input_item)
self.whoami = 'SpectralAnomalyScoreExt'
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutSingle(name='output_item', datatype=float, description='Spectral anomaly score (z-score)'))
outputs.append(UIFunctionOutSingle(name='inv_zscore', datatype=float,
description='z-score of inverted signal energy - detects unusually low activity'))
outputs.append(UIFunctionOutSingle(name='signal_enerty', datatype=float,
description='signal energy'))
return inputs, outputs
class KMeansAnomalyScore(AnomalyScorer):
"""
An unsupervised anomaly detection function.
Applies a k-means analysis clustering technique to time series data.
Moves a sliding window across the data signal and applies the anomaly model to each window.
The window size is typically set to 12 data points.
Try several anomaly models on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, output_item, expr=None):
super().__init__(input_item, windowsize, [output_item])
logger.debug(input_item)
self.whoami = 'KMeans'
def score(self, temperature):
scores = []
for output_item in self.output_items:
scores.append(np.zeros(temperature.shape))
try:
# Chop into overlapping windows
slices = view_as_windows(temperature, self.windowsize, self.step)
if self.windowsize > 1:
n_cluster = 40
else:
n_cluster = 20
n_cluster = np.minimum(n_cluster, slices.shape[0] // 2)
logger.debug(self.whoami + 'params, Clusters: ' + str(n_cluster) + ', Slices: ' + str(slices.shape))
cblofwin = CBLOF(n_clusters=n_cluster, n_jobs=-1)
try:
cblofwin.fit(slices)
except Exception as e:
logger.info('KMeans failed with ' + str(e))
self.trace_append('KMeans failed with' + str(e))
return scores
pred_score = cblofwin.decision_scores_.copy() * KMeans_normalizer
scores[0] = pred_score
logger.debug('KMeans score max: ' + str(pred_score.max()))
except Exception as e:
logger.error(self.whoami + ' failed with ' + str(e))
return scores
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=float, description='Anomaly score (kmeans)'))
return inputs, outputs
class GeneralizedAnomalyScore(AnomalyScorer):
"""
An unsupervised anomaly detection function.
Applies the Minimum Covariance Determinant (FastMCD) technique to detect outliers.
Moves a sliding window across the data signal and applies the anomaly model to each window.
The window size is typically set to 12 data points.
Try several anomaly detectors on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, output_item):
super().__init__(input_item, windowsize, [output_item])
logger.debug(input_item)
self.whoami = 'GAM'
self.normalizer = Generalized_normalizer
def feature_extract(self, temperature):
logger.debug(self.whoami + ': feature extract')
slices = view_as_windows(temperature, self.windowsize, self.step)
return slices
def score(self, temperature):
scores = []
for output_item in self.output_items:
scores.append(np.zeros(temperature.shape))
logger.debug(str(temperature.size) + "," + str(self.windowsize))
temperature -= np.mean(temperature.astype(np.float64), axis=0)
mcd = MinCovDet()
# Chop into overlapping windows (default) or run through FFT first
slices = self.feature_extract(temperature)
try:
mcd.fit(slices)
pred_score = mcd.mahalanobis(slices).copy() * self.normalizer
except ValueError as ve:
pred_score = np.zeros(temperature.shape)
logger.info(self.whoami + ", Input: " + str(
self.input_item) + ", WindowSize: " + str(self.windowsize) + ", Output: " + str(
self.output_items[0]) + ", Step: " + str(self.step) + ", InputSize: " + str(
slices.shape) + " failed in the fitting step with \"" + str(ve) + "\" - scoring zero")
except Exception as e:
pred_score = np.zeros(temperature.shape)
logger.error(self.whoami + ", Input: " + str(
self.input_item) + ", WindowSize: " + str(self.windowsize) + ", Output: " + str(
self.output_items[0]) + ", Step: " + str(self.step) + ", InputSize: " + str(
slices.shape) + " failed in the fitting step with " + str(e))
scores[0] = pred_score
logger.debug(self.whoami + ' score max: ' + str(pred_score.max()))
return scores
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name="input_item", datatype=float, description="Data item to analyze", ))
inputs.append(UISingle(name="windowsize", datatype=int,
description="Size of each sliding window in data points. Typically set to 12."))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutSingle(name="output_item", datatype=float, description="Anomaly score (GeneralizedAnomaly)", ))
return inputs, outputs
class NoDataAnomalyScore(GeneralizedAnomalyScore):
"""
An unsupervised anomaly detection function.
Uses FastMCD to find gaps in data.
The function moves a sliding window across the data signal and applies the anomaly model to each window.
The window size is typically set to 12 data points.
"""
def __init__(self, input_item, windowsize, output_item):
super().__init__(input_item, windowsize, output_item)
self.whoami = 'NoData'
self.normalizer = 1
logger.debug('NoData')
def prepare_data(self, dfEntity):
logger.debug(self.whoami + ': prepare Data')
# operate on simple timestamp index
if len(dfEntity.index.names) > 1:
index_names = dfEntity.index.names[1:]
dfe = dfEntity.reset_index(index_names)
else:
dfe = dfEntity
# count the timedelta in seconds between two events
logger.debug('type of index[0] is ' + str(type(dfEntity.index[0])))
try:
timeSeq = (dfe.index.values - dfe.index[0].to_datetime64()) / np.timedelta64(1, 's')
except Exception:
try:
time_to_numpy = np.array(dfe.index[0], dtype='datetime64')
print('5. ', type(time_to_numpy), dfe.index[0][0])
timeSeq = (time_to_numpy - dfe.index[0][0].to_datetime64()) / np.timedelta64(1, 's')
except Exception:
print('Nochens')
timeSeq = 1.0
#dfe = dfEntity.copy()
# one dimensional time series - named temperature for catchyness
# we look at the gradient of the time series timestamps for anomaly detection
# might throw an exception - we catch it in the super class !!
try:
temperature = np.gradient(timeSeq)
dfe[[self.input_item]] = temperature
except Exception as pe:
logger.info("NoData Gradient failed with " + str(pe))
dfe[[self.input_item]] = 0
temperature = dfe[[self.input_item]].values
temperature[0] = 10 ** 10
temperature = temperature.astype('float64').reshape(-1)
return dfe, temperature
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=float, description='No data anomaly score'))
return inputs, outputs
class FFTbasedGeneralizedAnomalyScore(GeneralizedAnomalyScore):
"""
An unsupervised and robust anomaly detection function.
Extracts temporal features from time series data using Fast Fourier Transforms.
Applies the GeneralizedAnomalyScore to the features to detect outliers.
Moves a sliding window across the data signal and applies the anomaly models to each window.
The window size is typically set to 12 data points.
Try several anomaly detectors on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, output_item):
super().__init__(input_item, windowsize, output_item)
self.whoami = 'FFT'
self.normalizer = FFT_normalizer
logger.debug('FFT')
def feature_extract(self, temperature):
logger.debug(self.whoami + ': feature extract')
slices_ = view_as_windows(temperature, self.windowsize, self.step)
slicelist = []
for slice in slices_:
slicelist.append(fftpack.rfft(slice))
return np.stack(slicelist, axis=0)
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name="input_item", datatype=float, description="Data item to analyze", ))
inputs.append(UISingle(name="windowsize", datatype=int,
description="Size of each sliding window in data points. Typically set to 12."))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name="output_item", datatype=float,
description="Anomaly score (FFTbasedGeneralizedAnomalyScore)", ))
return inputs, outputs
class MatrixProfileAnomalyScore(AnomalyScorer):
"""
An unsupervised anomaly detection function.
Applies matrix profile analysis on time series data.
Moves a sliding window across the data signal to calculate the euclidean distance from one window to all others to build a distance profile.
The window size is typically set to 12 data points.
Try several anomaly models on your data and use the one that fits your data best.
"""
DATAPOINTS_AFTER_LAST_WINDOW = 1e-15
INIT_SCORES = 1e-20
ERROR_SCORES = 1e-16
def __init__(self, input_item, window_size, output_item):
super().__init__(input_item, window_size, [output_item])
logger.debug(f'Input item: {input_item}')
self.whoami = 'MatrixProfile'
def score(self, temperature):
scores = []
for output_item in self.output_items:
scores.append(np.zeros(temperature.shape))
try: # calculate scores
matrix_profile = stumpy.aamp(temperature, m=self.windowsize)[:, 0]
# fill in a small value for newer data points outside the last possible window
fillers = np.array([self.DATAPOINTS_AFTER_LAST_WINDOW] * (self.windowsize - 1))
matrix_profile = np.append(matrix_profile, fillers)
except Exception as er:
logger.warning(f' Error in calculating Matrix Profile Scores. {er}')
matrix_profile = np.array([self.ERROR_SCORES] * temperature.shape[0])
scores[0] = matrix_profile
logger.debug('Matrix Profile score max: ' + str(matrix_profile.max()))
return scores
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = [UISingleItem(name="input_item", datatype=float, description="Time series data item to analyze", ),
UISingle(name="window_size", datatype=int,
description="Size of each sliding window in data points. Typically set to 12.")]
# define arguments that behave as function outputs
outputs = [UIFunctionOutSingle(name="output_item", datatype=float,
description="Anomaly score (MatrixProfileAnomalyScore)", )]
return inputs, outputs
class SaliencybasedGeneralizedAnomalyScore(GeneralizedAnomalyScore):
"""
An unsupervised anomaly detection function.
Based on salient region detection models,
it uses fast fourier transform to reconstruct a signal using the salient features of a the signal.
It applies GeneralizedAnomalyScore to the reconstructed signal.
The function moves a sliding window across the data signal and applies its analysis to each window.
The window size is typically set to 12 data points.
Try several anomaly detectors on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, output_item):
super().__init__(input_item, windowsize, output_item)
self.whoami = 'Saliency'
self.saliency = Saliency(windowsize, 0, 0)
self.normalizer = Saliency_normalizer
logger.debug('Saliency')
def feature_extract(self, temperature):
logger.debug(self.whoami + ': feature extract')
slices = view_as_windows(temperature, self.windowsize, self.step)
return slices
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name="input_item", datatype=float, description="Data item to analyze"))
inputs.append(UISingle(name="windowsize", datatype=int,
description="Size of each sliding window in data points. Typically set to 12.", ))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name="output_item", datatype=float,
description="Anomaly score (SaliencybasedGeneralizedAnomalyScore)", ))
return (inputs, outputs)
#######################################################################################
# Anomaly detectors with scaling
#######################################################################################
class KMeansAnomalyScoreV2(KMeansAnomalyScore):
def __init__(self, input_item, windowsize, normalize, output_item, expr=None):
super().__init__(input_item, windowsize, output_item)
logger.debug(input_item)
self.normalize = normalize
self.whoami = 'KMeansV2'
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
inputs.append(UISingle(name='normalize', datatype=bool, description='Flag for normalizing data.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=float, description='Anomaly score (kmeans)'))
return (inputs, outputs)
class GeneralizedAnomalyScoreV2(GeneralizedAnomalyScore):
"""
An unsupervised anomaly detection function.
Applies the Minimum Covariance Determinant (FastMCD) technique to detect outliers.
Moves a sliding window across the data signal and applies the anomaly model to each window.
The window size is typically set to 12 data points.
The normalize switch allows to learn and apply a standard scaler prior to computing the anomaly score.
Try several anomaly detectors on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, normalize, output_item, expr=None):
super().__init__(input_item, windowsize, output_item)
logger.debug(input_item)
# do not run score and call transform instead of predict
self.normalize = normalize
self.whoami = 'GAMV2'
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name="input_item", datatype=float, description="Data item to analyze", ))
inputs.append(UISingle(name="windowsize", datatype=int,
description="Size of each sliding window in data points. Typically set to 12."))
inputs.append(UISingle(name='normalize', datatype=bool, description='Flag for normalizing data.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutSingle(name="output_item", datatype=float, description="Anomaly score (GeneralizedAnomaly)", ))
return inputs, outputs
class FFTbasedGeneralizedAnomalyScoreV2(GeneralizedAnomalyScoreV2):
"""
An unsupervised and robust anomaly detection function.
Extracts temporal features from time series data using Fast Fourier Transforms.
Applies the GeneralizedAnomalyScore to the features to detect outliers.
Moves a sliding window across the data signal and applies the anomaly models to each window.
The window size is typically set to 12 data points.
The normalize switch allows to learn and apply a standard scaler prior to computing the anomaly score.
Try several anomaly detectors on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, normalize, output_item):
super().__init__(input_item, windowsize, normalize, output_item)
self.normalize = normalize
self.whoami = 'FFTV2'
self.normalizer = FFT_normalizer
logger.debug('FFT')
def feature_extract(self, temperature):
logger.debug(self.whoami + ': feature extract')
slices_ = view_as_windows(temperature, self.windowsize, self.step)
slicelist = []
for slice in slices_:
slicelist.append(fftpack.rfft(slice))
return np.stack(slicelist, axis=0)
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name="input_item", datatype=float, description="Data item to analyze", ))
inputs.append(UISingle(name="windowsize", datatype=int,
description="Size of each sliding window in data points. Typically set to 12."))
inputs.append(UISingle(name='normalize', datatype=bool, description='Flag for normalizing data.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name="output_item", datatype=float,
description="Anomaly score (FFTbasedGeneralizedAnomalyScore)", ))
return inputs, outputs
class SaliencybasedGeneralizedAnomalyScoreV2(SaliencybasedGeneralizedAnomalyScore):
"""
An unsupervised anomaly detection function.
Based on salient region detection models,
it uses fast fourier transform to reconstruct a signal using the salient features of a the signal.
It applies GeneralizedAnomalyScore to the reconstructed signal.
The function moves a sliding window across the data signal and applies its analysis to each window.
The window size is typically set to 12 data points.
The normalize switch allows to learn and apply a standard scaler prior to computing the anomaly score.
Try several anomaly detectors on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, normalize, output_item):
super().__init__(input_item, windowsize, output_item)
self.whoami = 'SaliencyV2'
self.normalize = normalize
logger.debug('SaliencyV2')
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name="input_item", datatype=float, description="Data item to analyze"))
inputs.append(UISingle(name="windowsize", datatype=int,
description="Size of each sliding window in data points. Typically set to 12.", ))
inputs.append(UISingle(name='normalize', datatype=bool, description='Flag for normalizing data.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name="output_item", datatype=float,
description="Anomaly score (SaliencybasedGeneralizedAnomalyScore)", ))
return inputs, outputs
KMeansAnomalyScorev2 = KMeansAnomalyScoreV2
FFTbasedGeneralizedAnomalyScorev2 = FFTbasedGeneralizedAnomalyScoreV2
SaliencybasedGeneralizedAnomalyScorev2 = SaliencybasedGeneralizedAnomalyScoreV2
GeneralizedAnomalyScorev2 = GeneralizedAnomalyScoreV2
#######################################################################################
# Base class to handle models
#######################################################################################
class SupervisedLearningTransformer(BaseTransformer):
name = 'SupervisedLearningTransformer'
"""
Base class for anomaly scorers that can be trained with historic data in a notebook
and automatically store a trained model in the tenant database
Inferencing is run in the pipeline
"""
def __init__(self, features, targets):
super().__init__()
logging.debug("__init__" + self.name)
# do NOT automatically train if no model is found (subclasses)
self.auto_train = False
self.delete_model = False
self.features = features
self.targets = targets
parms = []
if features is not None:
parms.extend(features)
if targets is not None:
parms.extend(targets)
parms = '.'.join(parms)
logging.debug("__init__ done with parameters: " + parms)
'''
Generate unique model name from entity, optionally features and target for consistency checks
'''
def get_model_name(self, prefix='model', features=None, targets=None, suffix=None):
name = []
if prefix is not None:
name.append(prefix)
name.extend([self._entity_type.name, self.name])
if features is not None:
name.extend(features)
if targets is not None:
name.extend(targets)
if suffix is not None:
name.append(suffix)
name = '.'.join(name)
return name
def load_model(self, suffix=None):
model_name = self.get_model_name(targets=self.targets, suffix=suffix)
my_model = None
try:
my_model = self._entity_type.db.model_store.retrieve_model(model_name)
logger.info('load model %s' % str(my_model))
except Exception as e:
logger.error('Model retrieval failed with ' + str(e))
pass
# ditch old model
version = 1
if self.delete_model:
if my_model is not None:
if hasattr(my_model, 'version'):
version = my_model.version + 1
logger.debug('Deleting robust model ' + str(version-1) + ' for entity: ' + str(suffix))
my_model = None
return model_name, my_model, version
def execute(self, df):
logger.debug('Execute ' + self.whoami)
df_copy = df # no copy
# check data type
#if df[self.input_item].dtype != np.float64:
for feature in self.features:
if not | pd.api.types.is_numeric_dtype(df_copy[feature].dtype) | pandas.api.types.is_numeric_dtype |
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import xarray as xr
from pandas.api.types import (
is_datetime64_any_dtype,
is_numeric_dtype,
is_string_dtype,
is_timedelta64_dtype,
)
def to_1d(value, unique=False, flat=True, get=None):
# pd.Series converts datetime to Timestamps
if isinstance(value, xr.DataArray):
value = value.values
array = np.atleast_1d(value)
if is_datetime(value):
array = pd.to_datetime(array).values
elif is_timedelta(value):
array = pd.to_timedelta(array).values
if array.ndim > 1 and get is not None:
array = array[get]
if unique:
try:
array = | pd.unique(array) | pandas.unique |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from tqdm import tqdm as pb
import datetime
import re
import warnings
import matplotlib.pyplot as plt
import pylab as mpl
from docx import Document
from docx.shared import Pt
from data_source import local_source
def concat_ts_codes(df): #拼接df中所有TS_CODE为输入条件的格式
result = ''
for code in df["TS_CODE"]:
result = result + 'TS_CODE = "' + code + '" or '
result = result[:-4]
return result
def drop_duplicates_keep_nonnan(df,subset): #保留nan最少的行, 暂时没用
warnings.filterwarnings("ignore")
subset_values = []
df_result = pd.DataFrame(columns=df.columns)
for i in range(len(df)):
subset_value = list(df[subset].iloc[i,:])
if subset_value not in subset_values: subset_values.append(subset_value)
for subset_value in subset_values:
df_sub = df[(df[subset]==subset_value).product(axis=1)==1]
df_sub["nan_count"] = 0
df_sub.loc[:,"nan_count"] = df_sub.isnull().sum(axis=1)
df_sub.sort_values(by='nan_count',ascending=True, inplace=True)
df_sub = | pd.DataFrame(df_sub.iloc[0,:]) | pandas.DataFrame |
# Copyright (C) 2014-2017 <NAME>, <NAME>, <NAME>, <NAME> (in alphabetic order)
#
# This file is part of OpenModal.
#
# OpenModal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# OpenModal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenModal. If not, see <http://www.gnu.org/licenses/>.
from OpenModal.gui.widgets.animation import TableModel,Model
from PyQt5 import QtCore, QtGui,QtWidgets
import pyqtgraph as pg
import numpy as np
from numpy.core.umath_tests import inner1d
import time
import pandas as pd
from pyqtgraph.parametertree import Parameter, ParameterTree
from OpenModal.anim_tools import AnimWidgBase
import os
from OpenModal.keys import keys
import qtawesome as qta
from OpenGL.GL import *
from functools import partial
from OpenModal.gui.templates import COLOR_PALETTE, LIST_FONT_FAMILY, LIST_FONT_SIZE, MENUBAR_WIDTH
from string import Template
SHADER='OpenModal'
GLOPTS= {
GL_DEPTH_TEST: True,
GL_BLEND: False,
GL_ALPHA_TEST: False,
GL_CULL_FACE: False}
#'glLightModeli':(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)}
SMOOTH=True
COMPUTENORMALS=True
DRAW_EDGES_NODES=False
DRAW_EDGES_ELEMENTS=True
DRAW_EDGES_GCS=False
# ## Switch to using white background and black foreground
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
class CustomQTableView(QtWidgets.QTableView):
def __init__(self,parent):
super(self.__class__, self).__init__(parent)
self.catch=False #for catching right/left arrow keypress events in editor mode
self.keys = [QtCore.Qt.Key_Left,
QtCore.Qt.Key_Right]
def focusInEvent(self, event):
self.catch = False
return QtWidgets.QTableView.focusInEvent(self, event)
def focusOutEvent(self, event):
self.catch = True
return QtWidgets.QTableView.focusOutEvent(self, event)
def event(self, event):
if self.catch and event.type() == QtCore.QEvent.KeyRelease and event.key() in self.keys:
self._moveCursor(event.key())
return QtWidgets.QTableView.event(self,event)
def keyPressEvent(self, event):
if not self.catch:
return QtWidgets.QTableView.keyPressEvent(self, event)
self._moveCursor(event.key())
def _moveCursor(self, key):
row = self.currentIndex().row()
col = self.currentIndex().column()
if key == QtCore.Qt.Key_Left and col > 0:
col -= 1
elif key == QtCore.Qt.Key_Right and col < (self.model().columnCount()-1):
col += 1
elif key == QtCore.Qt.Key_Up and row > 0:
row -= 1
elif key == QtCore.Qt.Key_Down and row < (self.model().rowCount()-1):
row += 1
else:
return
self.setCurrentIndex(self.model().createIndex(row,col))
self.edit(self.currentIndex())
def mousePressEvent(self,event):
"""
Reimplement mousePressEvent in order to deselect rows when clicking into blank space
"""
if self.indexAt(event.pos()).isValid():
super(self.__class__, self).mousePressEvent(event)
else:
#user has clicked into blank space...clear selection and send signal
self.selectionModel().clearSelection()
class GeometryWidget(AnimWidgBase):
# def __init__(self, modaldata_object,status_bar,language, preferences=dict(), desktop_widget=None, parent=None):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.setContentsMargins(0, 0, 0, 0)
self.selection=[] # list of nodes clicked with mouse in 3d view
self.selection_ind=[] # list of indicies of selected nodes
self.selection_color=[] # original model color
self.selected_elem_ids=[] #element ids of elements selected in element table view
self.selected_elem_col=None #color of selected elements
self.activated_models=[] # model_ids of currently activated models
#default widget mode!
self.widget_mode = 'nodes'
#connect cliked signal from 3d view
self.model_view.clicked_signal.clicked.connect(self.mouse_clicked)
def color_selected_node(self,test,nodes_index,nodes):
#index of the node in the original dataframe
ind=nodes_index[test]
#check if node was already selected, if it was...deselect it
if ind in self.selection_ind:
#index already selected -> deselect it
loc=self.selection_ind.index(ind)
del self.selection_ind[loc]
del self.selection[loc]
#if ind already in selection -> set default color
self.modaldata.tables['geometry'].ix[ind, 'clr_r']=self.selection_color[0]
self.modaldata.tables['geometry'].ix[ind, 'clr_g']=self.selection_color[1]
self.modaldata.tables['geometry'].ix[ind, 'clr_b']=self.selection_color[2]
self.modaldata.tables['geometry'].ix[ind, 'clr_a']=self.selection_color[3]
else:
#index not yet selected -> add it to selection
self.selection.append(nodes.iloc[test][['node_nums','x','y','z','model_id','color']].values[0])
self.selection_ind.append(ind)
self.selection_color=[self.modaldata.tables['geometry'].ix[ind, 'clr_r'].values[0],
self.modaldata.tables['geometry'].ix[ind, 'clr_g'].values[0],
self.modaldata.tables['geometry'].ix[ind, 'clr_b'].values[0],
self.modaldata.tables['geometry'].ix[ind, 'clr_a'].values[0]]
self.modaldata.tables['geometry'].ix[ind, 'clr_r']=1
self.modaldata.tables['geometry'].ix[ind, 'clr_g']=0
self.modaldata.tables['geometry'].ix[ind, 'clr_b']=0
self.modaldata.tables['geometry'].ix[ind, 'clr_a']=1
def handle_node_clicked(self):
'''
Check if click was near a node, if it was then add it to selection, if coincident with previously selected node,
deselect it. Also node is colored if selected.
:return:
'''
#get cube size for determening selection sphere size
for model_id, model_obj in self.models.items():
if model_obj.activated:
cube_scale, lcs_scale=model_obj.get_cube_and_lcs_scale()
#look only among activated models
act_mod=[]
for model_id, model_obj in self.models.items():
if model_obj.activated:
act_mod.append(model_obj.model_id)
nodes=self.modaldata.tables['geometry'][self.modaldata.tables['geometry']['x'].notnull()]
nodes=nodes[nodes['model_id'].isin(act_mod)]
nodes_index=nodes.index.values
ind=-1
node_data=nodes.ix[:,['x','y','z']].values
# CHECK if nodes are near clicked point
start_point=self.model_view.ray[0] #get ray data from 3d view widget
ray_dir=self.model_view.ray[1]
#sel_sph_r=0.05 # selection sphere radius
sel_sph_r=cube_scale*3
aux_1=-node_data+start_point
aux_1=aux_1.astype(np.float64)
b=inner1d(ray_dir,aux_1)
c=inner1d(aux_1,aux_1)-sel_sph_r**2
#get boolean array - true means that node is under mouse
test=(b**2-c)>=0
#check for coincident nodes!
coincident_nodes=np.sum(test)-1 # =0 if only one node clicked, >0 if multiple nodes clicked
if coincident_nodes==0:
self.color_selected_node(test,nodes_index,nodes)
self.plot_activated_models()
elif coincident_nodes>0:
#TODO: handle this!
print('multiple nodes clicked! - NOT HANDLED YET')
elif coincident_nodes==-1:
#TODO: handle this!
print('blank space clicked')
def clear_node_selection(self):
"""
Clear selected nodes and restore node colors to default
:return:
"""
self.selection=[]
for ind in self.selection_ind:
self.modaldata.tables['geometry'].ix[ind, 'clr_r']=self.selection_color[0]
self.modaldata.tables['geometry'].ix[ind, 'clr_g']=self.selection_color[1]
self.modaldata.tables['geometry'].ix[ind, 'clr_b']=self.selection_color[2]
self.modaldata.tables['geometry'].ix[ind, 'clr_a']=self.selection_color[3]
self.selection_ind=[]
self.selection_color=[]
self.plot_activated_models()
def mouse_clicked(self):
"""
For 2D plot cross hair selection
:param evt:
:return:
"""
#only select nodes if widget is not in geometry mode
if self.widget_mode!='nodes':
self.handle_node_clicked()
if self.widget_mode=='lines':
nr_of_nodes=2
if len(self.selection)==nr_of_nodes:
self.addElement(nr_of_nodes)
self.clear_node_selection()
if self.widget_mode=='elements':
nr_of_nodes=3
if len(self.selection)==nr_of_nodes:
self.addElement(nr_of_nodes)
self.clear_node_selection()
def addElement(self,nr_of_nodes):
"""
Add selection data to modal_data object as new element
:return:
"""
#get next pandas index
if len(self.modaldata.tables['elements_index'].index)==0:
ind=0
node_ind=0
element_id=1
else:
ind= self.modaldata.tables['elements_index'].index.max() + 1
element_id= self.modaldata.tables['elements_index']['element_id'].max() + 1
node_ind= self.modaldata.tables['elements_values'].index.max() + 1
#store data data from selection
#store element
model_id=self.selection[0][4]
def get_color(id,elem_type):
for model_id, model_obj in self.models.items():
if model_id==id:
if elem_type=='triangle':
color=model_obj.cur_triangle_color
elif elem_type=='line':
color=model_obj.cur_line_color
return color
aux_color=self.selection[0][5]
if nr_of_nodes==3:
element_descriptor='triangle'
color=get_color(model_id,element_descriptor)
self.modaldata.tables['elements_index'].loc[ind]=[model_id, element_id, element_descriptor, aux_color, nr_of_nodes,
color.red() / 255., color.green() / 255., color.blue() / 255., color.alpha() / 255.]
#store nodes
for i in range(nr_of_nodes):
node_id=self.selection[i][0]
node_pos=i
self.modaldata.tables['elements_values'].loc[node_ind]=[model_id, element_id, node_id, node_pos]
node_ind=node_ind+1
if nr_of_nodes==2:
element_descriptor='line'
color=get_color(model_id,element_descriptor)
self.modaldata.tables['elements_index'].loc[ind]=[model_id, element_id, element_descriptor, aux_color, nr_of_nodes,
color.red() / 255., color.green() / 255., color.blue() / 255., color.alpha() / 255.]
#store nodes
for i in range(nr_of_nodes):
node_id=self.selection[i][0]
node_pos=i
self.modaldata.tables['elements_values'].loc[node_ind]=[model_id, element_id, node_id, node_pos]
node_ind=node_ind+1
## for line the third node is same as second
#update table model
self.populate_elem_table_view([model_id])
def delete_selection_aux(self):
"""
Delete selection in table view via context menu
:return:
"""
if self.gcs_type==0:
self.delete_selection(self.geom_table_view,self.geom_table_model)
if self.gcs_type==1:
self.delete_selection(self.cyl_geom_table_view,self.cyl_geom_table_model)
def delete_selection(self,geom_table_view,geom_table_model):
if self.widget_mode=='nodes':
cells=geom_table_view.selectedIndexes()
cells.sort()
# start index is where first cell is selected (caution: table view shows only a view into table model,
# selections indexes are relative to current view!)
curr_row=cells[0].model().datatable.index.values[0]
cols=[]
rows=[]
for cell in cells:
rows.append(curr_row+cell.row())
cols.append(cells[0].model().datatable.columns[cell.column()])
geom_table_model.datatable.ix[rows,cols]=np.nan
geom_table_model.dataIn.ix[rows,cols]=np.nan # this is necessary as update method does not work with NANs
geom_table_model.dataIn.update(geom_table_model.datatable)
geom_table_model.dataChanged.emit(geom_table_model.createIndex(0, 0),
geom_table_model.createIndex(geom_table_model.rowCount(0),
geom_table_model.columnCount(0)))
geom_table_model.layoutChanged.emit()
if self.widget_mode=='lines' or self.widget_mode=='elements':
rows=self.elem_table_view.selectionModel().selectedRows()
rows.sort()
el_id_list=[]
for row in rows:
el_id_list.append(self.elem_table_model.datatable['element_id'].iloc[[row.row()]].values[0])
element_id_mask=self.modaldata.tables['elements_values']['element_id'].isin(el_id_list)
self.modaldata.tables['elements_values'].drop(self.modaldata.tables['elements_values']['element_id'].index[element_id_mask], inplace=True)
element_id_mask=self.elem_table_model.datatable['element_id'].isin(el_id_list)
self.elem_table_model.datatable.drop(self.elem_table_model.datatable['element_id'].index[element_id_mask],inplace=True) # change stuff in GUI
self.elem_table_model.dataIn.update(self.elem_table_model.datatable)
element_id_mask=self.elem_table_model.dataIn['element_id'].isin(el_id_list)
self.elem_table_model.dataIn.drop(self.elem_table_model.dataIn['element_id'].index[element_id_mask],inplace=True) # change stuff directly in modal data obj
#PyQt
self.elem_table_model.dataChanged.emit(self.elem_table_model.createIndex(0, 0),
self.elem_table_model.createIndex(self.elem_table_model.rowCount(0),
self.elem_table_model.columnCount(0)))
self.elem_table_model.layoutChanged.emit()
def copy_selection(self):
if self.gcs_type==0:
cells=self.geom_table_view.selectedIndexes()
elif self.gcs_type==1:
cells=self.cyl_geom_table_view.selectedIndexes()
cells.sort()
curr_row=cells[0].row()
text=''
for cell in cells:
if len(text)==0:
text=str(cell.data())
else:
if cell.row()!=curr_row:
#text=text+' \\n '
text=text+os.linesep # os independent newline seperator
curr_row=curr_row+1
else:
text=text+'\t'
text=text+str(cell.data())
QtCore.QCoreApplication.instance().clipboard().setText(text)
def paste_selection(self):
text=QtCore.QCoreApplication.instance().clipboard().text()
lines=text.splitlines()
if self.gcs_type==0:
cells=self.geom_table_view.selectedIndexes()
elif self.gcs_type==1:
cells=self.cyl_geom_table_view.selectedIndexes()
cells.sort()
# start index is where first cell is selected (caution: table view shows only a view into table model,
# selections indexes are relative to current view!)
curr_row=cells[0].model().datatable.index.values[0]+cells[0].row()
curr_col=cells[0].column()
# get selection dimensions
num_of_cols=len(lines[0].split('\t'))
num_of_rows=len(lines)
# expand table if number of rows in clipboard is larger than current table size
for model_id in self.activated_models:
#get node index corresponding with existing geomtry table
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_index=self.modaldata.tables['geometry'].ix[model_mask].index
if (curr_row+num_of_rows)>len(node_index):
# add rows for selected model
rows_to_add=curr_row+num_of_rows-len(node_index)
self.add_geom_rows(rows_to_add=rows_to_add)
# duplicate stuff from clipboard based on the selection size
# we want to copy rows
if num_of_cols==(cells[-1].column()-cells[0].column()+1):
copy_rows=(cells[-1].row()-cells[0].row()+1)/num_of_rows
if copy_rows>1:
lines=lines*np.floor(copy_rows)
# we want to copy columns
elif num_of_rows==(cells[-1].row()-cells[0].row()+1):
copy_cols=(cells[-1].column()-cells[0].column()-num_of_cols+1)/num_of_cols
if copy_cols>0:
lines=[(i+('\t'+i)*np.floor(copy_cols)) for i in lines]
for line in lines:
data=line.split('\t')
for val in data:
if val=='':
#skip empty cell
curr_col=curr_col+1
else:
try:
if self.gcs_type==0:
self.geom_table_model.datatable.set_value(curr_row, cells[0].model().datatable.columns[curr_col], float(val))
if self.gcs_type==1:
self.cyl_geom_table_model.datatable.set_value(curr_row, cells[0].model().datatable.columns[curr_col], float(val))
except ValueError:
if self.gcs_type==0:
self.geom_table_model.datatable.set_value(curr_row, cells[0].model().datatable.columns[curr_col], float(val.replace(',', '.')))
if self.gcs_type==1:
self.cyl_geom_table_model.datatable.set_value(curr_row, cells[0].model().datatable.columns[curr_col], float(val.replace(',', '.')))
curr_col=curr_col+1
curr_col=cells[0].column() #restart column index
curr_row=curr_row+1
if self.gcs_type==0:
self.geom_table_model.dataIn.update(self.geom_table_model.datatable)
self.geom_table_model.dataChanged.emit(self.geom_table_model.createIndex(0, 0),
self.geom_table_model.createIndex(self.geom_table_model.rowCount(0),
self.geom_table_model.columnCount(0)))
self.geom_table_model.layoutChanged.emit()
if self.gcs_type==1:
self.cyl_geom_table_model.dataIn.update(self.geom_table_model.datatable)
self.cyl_geom_table_model.dataChanged.emit(self.cyl_geom_table_model.createIndex(0, 0),
self.cyl_geom_table_model.createIndex(self.cyl_geom_table_model.rowCount(0),
self.cyl_geom_table_model.columnCount(0)))
self.cyl_geom_table_model.layoutChanged.emit()
def keyPressEvent(self,evt):
"""
Catch Ctrl+C and Ctrl+V to handle copying from clipboard
Catch Delete to delete values in selected cells
:param evt:
:return:
"""
if evt.key()==QtCore.Qt.Key_C and evt.modifiers()==QtCore.Qt.ControlModifier:
self.copy_selection()
if evt.key()==QtCore.Qt.Key_V and evt.modifiers()==QtCore.Qt.ControlModifier:
self.paste_selection()
if evt.key()==QtCore.Qt.Key_Delete:
self.delete_selection_aux()
super(self.__class__,self).keyPressEvent(evt)
def create_toolbar_actions(self):
super(self.__class__,self).create_toolbar_actions()
self.act_new_model = QtWidgets.QAction('New model', self,
statusTip='Create new model', triggered=self.new_model)
self.act_delete_model = QtWidgets.QAction('Delete model', self,
statusTip='Delete model', triggered=self.delete_model_dialog)
self.act_nodes_mode = QtWidgets.QAction('Nodes', self,
statusTip='Geometry input mode', triggered=self.nodes_data_mode)
self.act_lines_mode = QtWidgets.QAction('Lines', self,
statusTip='Lines input mode', triggered=self.lines_data_mode)
self.act_elements_mode = QtWidgets.QAction('Elements', self,
statusTip='Elements input mode', triggered=self.elements_data_mode)
def create_model_view_actions(self):
super(self.__class__,self).create_model_view_actions()
self.elem_desel_act = QtWidgets.QAction('Deselect elements', self, checkable=False,
statusTip='Clear element selection', triggered=partial(self.handle_elem_select,True))
def nodes_data_mode(self):
self.elem_table_view.hide()
if self.gcs_type==0:
self.geom_table_view.show()
self.cyl_geom_table_view.hide()
#cartesian gcs
self.geom_table_model.update(self.modaldata.tables['geometry'], self.activated_models, self.fields)
elif self.gcs_type==1:
self.cyl_geom_table_view.show()
self.geom_table_view.hide()
#cylindrical csys
self.cyl_geom_table_model.update(self.modaldata.tables['geometry'], self.activated_models, self.cyl_fields)
self.widget_mode = 'nodes'
self._button3.setChecked(True)
self._button6.setChecked(False)
self._button4.setChecked(False)
def lines_data_mode(self):
self.elem_table_view.show()
self.geom_table_view.hide()
self.cyl_geom_table_view.hide()
self.widget_mode = 'lines'
self._button3.setChecked(False)
self._button6.setChecked(True)
self._button4.setChecked(False)
def elements_data_mode(self):
self.elem_table_view.show()
self.geom_table_view.hide()
self.cyl_geom_table_view.hide()
self.widget_mode = 'elements'
self._button3.setChecked(False)
self._button6.setChecked(False)
self._button4.setChecked(True)
def model_view_context_menu(self, pos):
menu = QtWidgets.QMenu()
menu.addAction(self.act_fit_view)
menu.addAction(self.elem_desel_act)
display_menu = menu.addMenu('Display')
display_menu.addAction(self.plot_nodes_act)
display_menu.addAction(self.plot_lines_act)
display_menu.addAction(self.plot_elements_act)
display_menu.addAction(self.plot_node_lcs_act)
display_menu.addAction(self.plot_node_labels_act)
display_menu.addAction(self.plot_gcs_act)
#display_menu.addMenu('Trace lines')
color_menu = menu.addMenu('Colors')
color_menu.addAction(self.node_color_act)
color_menu.addAction(self.line_color_act)
color_menu.addAction(self.elem_color_act)
csys_menu = menu.addMenu('Change csys')
csys_menu.addAction(self.cart_csys_act)
csys_menu.addAction(self.cyl_csys_act)
menu.exec_(QtGui.QCursor.pos())
def paintEvent(self, event):
# button sizes
w = 140 #this is overridden by css
h = 33
border_thk=1
# app window size
window_width=self.rect().width()
window_height=self.rect().height()
# global positioning of buttons
x_margin=20
x = (window_width - w - x_margin-2*border_thk)
y = 0.2*window_height
offset=h+5
# relative positioning of buttons
self._button.setGeometry(x,y,w,h)
self._button5.setGeometry(x,y+offset,w,h)
self._button2.setGeometry(x,y+2*offset,w,h)
self._b_geom_prim.setGeometry(x,y+3*offset,w,h)
# positioning of elements/geometry table
table_width=window_width*0.6
table_height=window_height*0.3
table_x=window_width/2-table_width/2
table_y=0.68*window_height
x_btn=window_width/2-1.5*w-5
y_btn=40
self._button3.setGeometry(x_btn,table_y-y_btn,w,h)
self._button6.setGeometry(x_btn+w+5,table_y-y_btn,w,h)
self._button4.setGeometry(x_btn+2*w+10,table_y-y_btn,w,h)
self.cyl_geom_table_view.setGeometry(table_x,table_y,table_width,table_height)
self.cyl_geom_table_view.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.geom_table_view.setGeometry(table_x,table_y,table_width,table_height)
self.geom_table_view.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.elem_table_view.setGeometry(table_x,table_y,table_width,table_height)
self.elem_table_view.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
# selected model label
#self._label.setGeometry(window_width/2-self._label.width()/2,0.1*window_height,200,20)
# create buttons for available models
offset=0
x = x_margin
y = 0.2*window_height
for model_id,button in self.model_buttons.items():
width = self._button.width()+20
height = self._button.height()
button.setGeometry(x,y+offset,width,height)
offset=offset+height+5
def table_view_context_menu(self, pos):
menu = QtWidgets.QMenu()
menu.addAction(self.act_delete)
menu.addAction(self.act_copy)
menu.addAction(self.act_paste)
menu.addAction(self.elem_desel_act)
menu.addAction(self.add_rows_act)
menu.exec_(QtGui.QCursor.pos())
def model_btn_context_menu(self, pos):
#get model button which was right clicked
self.sending_button = self.sender()
menu = QtWidgets.QMenu()
menu.addAction(self.act_model_rename)
menu.exec_(QtGui.QCursor.pos())
def model_btn_context_menu_act(self):
self.act_model_rename = QtWidgets.QAction('Rename', self, statusTip='Rename model', triggered=self.rename_model)
def context_menu_act(self):
self.act_delete = QtWidgets.QAction('Delete', self, statusTip='Delete selection', triggered=self.delete_selection_aux)
self.act_copy = QtWidgets.QAction('Copy', self, statusTip='Copy selection', triggered=self.copy_selection)
self.act_paste = QtWidgets.QAction('Paste', self, statusTip='Paste selection', triggered=self.paste_selection)
self.add_rows_act = QtWidgets.QAction('Add 100 rows', self, checkable=False,
statusTip='Add 100 blank rows', triggered=partial(self.add_geom_rows,rows_to_add=100))
def create_layout(self):
"""
Create layout of the central Qwidget and add widgets
:return:
"""
super(self.__class__,self).create_layout()
self._button = QtWidgets.QPushButton(qta.icon('fa.plus-circle', color='white'),'New model', self)
self._button.setObjectName('medium')
self._button.clicked.connect(self.new_model)
self._button5 = QtWidgets.QPushButton(qta.icon('fa.trash-o', color='white'),'Delete model', self)
self._button5.setObjectName('medium')
self._button5.clicked.connect(self.delete_model_dialog)
self._button2 = QtWidgets.QPushButton(qta.icon('fa.search', color='white'),'Fit view', self)
self._button2.setObjectName('medium')
self._button2.clicked.connect(self.autofit_3d_view)
self._b_geom_prim= QtWidgets.QPushButton(qta.icon('fa.industry', color='white'),'Create geometry', self)
self._b_geom_prim.setObjectName('medium')
self._b_geom_prim.clicked.connect(self.create_geom_primitive)
self._button3 = QtWidgets.QPushButton('Add nodes', self)
self._button3.setObjectName('table_button')
self._button3.setCheckable(True)
self._button3.setChecked(True)
self._button3.clicked.connect(self.nodes_data_mode)
self._button6 = QtWidgets.QPushButton('Add lines', self)
self._button6.setObjectName('table_button')
self._button6.setCheckable(True)
self._button6.clicked.connect(self.lines_data_mode)
self._button4 = QtWidgets.QPushButton('Add triangles', self)
self._button4.setObjectName('table_button')
self._button4.setCheckable(True)
self._button4.clicked.connect(self.elements_data_mode)
# common for both tables
self.context_menu_act() #create actions for table context menu
# Context menu actions for model buttons
self.model_btn_context_menu_act()
# geometry Table (cartesian coordinate system)
self.geom_table_model = TableModel(self)
self.fields = ['node_nums','x', 'y', 'z','thz', 'thy', 'thx' , 'model_id']
self.geom_table_model.update(self.modaldata.tables['geometry'], [0], self.fields) # show some data
self.geom_table_view = CustomQTableView(self)
self.geom_table_view.setModel(self.geom_table_model)
self.geom_table_model.dataChanged.connect(self.geometry_changed)
self.geom_table_view.setSortingEnabled(False)
self.geom_table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.geom_table_view.customContextMenuRequested.connect(self.table_view_context_menu)
#replace header from dataframe with custom one
self.geom_table_model.header_labels=[keys['node_nums']['15'],
keys['x']['15'],
keys['y']['15'],
keys['z']['15'],
keys['thz']['15'],
keys['thy']['15'],
keys['thx']['15'] ,
keys['model_id']['15']]
# geometry Table (cylindrical coordinate system)
self.cyl_geom_table_model = TableModel(self)
self.cyl_fields = ['node_nums','r', 'phi', 'z','cyl_thz', 'thy', 'thx' , 'model_id']
self.cyl_geom_table_model.update(self.modaldata.tables['geometry'], [0], self.cyl_fields) # show some data
self.cyl_geom_table_view = CustomQTableView(self)
self.cyl_geom_table_view.setModel(self.cyl_geom_table_model)
self.cyl_geom_table_model.dataChanged.connect(self.geometry_changed)
self.cyl_geom_table_view.hide()
self.cyl_geom_table_view.setSortingEnabled(False)
self.cyl_geom_table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.cyl_geom_table_view.customContextMenuRequested.connect(self.table_view_context_menu)
#replace header from dataframe with custom one
self.cyl_geom_table_model.header_labels=[keys['node_nums']['15'],
keys['r']['15'],
keys['phi']['15'],
keys['z']['15'],
keys['cyl_thz']['15'],
keys['thy']['15'],
keys['thx']['15'] ,
keys['model_id']['15']]
# elements Table
self.elem_table_model = TableModel(self)
#print(self.modal_data.tables['analysis_index'])
self.elem_fields = ['model_id', 'element_id', 'element_descriptor', 'color',
'nr_of_nodes']
self.elem_table_model.update(self.modaldata.tables['elements_index'], [0], self.elem_fields) # show some data
self.elem_table_view = CustomQTableView(self)
self.elem_table_view.setModel(self.elem_table_model)
self.elem_table_view.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.elem_table_model.dataChanged.connect(self.plot_activated_models)
self.elem_table_view.setMinimumHeight(150)
self.elem_table_view.setSortingEnabled(True)
self.elem_table_view.hide()
self.elem_table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.elem_table_view.customContextMenuRequested.connect(self.table_view_context_menu)
#replace header from dataframe with custom one
self.elem_table_model.header_labels=[keys['model_id']['15'],
keys['element_id']['15'],
keys['element_descriptor']['15'],
keys['color']['15'],
keys['nr_of_nodes']['15']]
self.elem_table_view.setColumnHidden(3,True) #hide color
self.elem_table_view.setColumnHidden(4,True) #hide nr_of_nodes
selection = self.elem_table_view.selectionModel()
selection.selectionChanged.connect(self.handle_elem_select)
def restore_elem_color(self,elem_ids):
"""
Change element color to original (before selection)
:param elem_ids:
:return:
"""
#restore color
element_id_mask=self.modaldata.tables['elements_index']['element_id'].isin(elem_ids)
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_r']=self.selected_elem_col[0] # rbg values 0-1
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_g']=self.selected_elem_col[1] # rbg values 0-1
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_b']=self.selected_elem_col[2] # rbg values 0-1
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_a']=self.selected_elem_col[3] # alpha values 0-1
def change_elem_color(self,elem_ids,selection_color):
#change color of selected elements
element_id_mask=self.modaldata.tables['elements_index']['element_id'].isin(elem_ids)
#store current element color
self.selected_elem_col= self.modaldata.tables['elements_index'][element_id_mask][['clr_r', 'clr_g', 'clr_b', 'clr_a']].values[0, :]
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_r']= selection_color[0] / 255. # rbg values 0-1
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_g']= selection_color[1] / 255. # rbg values 0-1
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_b']= selection_color[2] / 255. # rbg values 0-1
self.modaldata.tables['elements_index'].ix[element_id_mask, 'clr_a']= selection_color[3] / 255. # alpha values 0-1
def handle_elem_select(self,deselect=False):
"""
Change color of elements selected in element table
:return:
"""
#element selection color
#TODO: move this to color pallete?
#TODO: fix - selecting mulitple lines and triangles changes their color - element color change must be element type sensitive
rgba_color = QtGui.QColor(255, 0, 0, 255)
rgba_color = pg.colorTuple(rgba_color)
rows=self.elem_table_view.selectionModel().selectedRows()
rows.sort()
new_elem_ids=[] # newly selected elements
for row in rows:
new_elem_ids.append(self.elem_table_model.datatable['element_id'].iloc[[row.row()]].values[0])
if deselect==True:
self.restore_elem_color(self.selected_elem_ids)
self.selected_elem_ids=[]
else:
#restore color of previously selected elements
if len(self.selected_elem_ids)!=0:
self.restore_elem_color(self.selected_elem_ids)
#change color of selected elements
if len(new_elem_ids)!=0:
self.change_elem_color(new_elem_ids,rgba_color)
# store current selection
self.selected_elem_ids=new_elem_ids
self.plot_activated_models(wheel_event=True)
def create_geom_primitive(self):
"""
Create geometry primitives (nodes + triangles) for currently active model
:return:
"""
response,input_data=dialog_geom_primitives.return_data()
if response==1:
if input_data['geom_type']=='line':
self.create_line_geom(input_data)
if input_data['geom_type']=='plane':
self.create_plane_geom(input_data)
if input_data['geom_type']=='box':
self.create_box_geom(input_data)
if input_data['geom_type']=='cylinder':
self.create_cyl_geom(input_data)
def create_line_geom(self,line_data):
"""
Create line geometry based on user input (for currently active model)
:return:
"""
xs=float(line_data['xs']) # s = start point
ys=float(line_data['ys'])
zs=float(line_data['zs'])
xe=float(line_data['xe']) # e = end point
ye=float(line_data['ye'])
ze=float(line_data['ze'])
num_of_points=int(line_data['num_of_points'])
start_num=float(line_data['start_num'])
s_point=np.array((xs,ys,zs))
e_point=np.array((xe,ye,ze))
for model_id in self.activated_models:
node_nums=np.arange(start_num,start_num+num_of_points)
line_vec=(e_point-s_point)
dir_arr=np.tile(line_vec,(num_of_points,1))
div_arr=np.linspace(0,1,num_of_points)
div_arr_rshp=np.tile(div_arr.reshape(num_of_points,1),3)
nodes=np.tile(s_point,(num_of_points,1))+div_arr_rshp*dir_arr
#realign index in order to prevent double node names (geometry data frame starts with 1 by default)
#node_index=node_nums-1
#get node index corresponding with existing geomtry table
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask & node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
if len(node_nums)>len(node_index):
# add rows for selected model
rows_to_add=len(node_nums)-len(node_index)
self.add_geom_rows(rows_to_add=rows_to_add)
# get index
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
#create node data
df=pd.DataFrame(index=node_index, columns=self.modaldata.tables['geometry'].columns)
df['model_id']=model_id
df['node_nums']=node_nums
df['x']=nodes[:,0]
df['y']=nodes[:,1]
df['z']=nodes[:,2]
#TODO: oritent lcs according to line orientation
df['thx']=0
df['thy']=0
df['thz']=0
rgba_color = pg.colorTuple(QtGui.QColor(0,255,0,255))
df['clr_r']=rgba_color[0]/ 255. # rbg values 0-1
df['clr_g']=rgba_color[1]/ 255. # rbg values 0-1
df['clr_b']=rgba_color[2]/ 255. # rbg values 0-1
df['clr_a']=rgba_color[3]/ 255. # alpha values 0-1
#calculate r,phi from x,y
df['r'] = np.sqrt(df['x']**2 + df['y']**2)
df['phi'] = np.arcsin(df['y']/df['r'])*180./np.pi
df['cyl_thz']= 0
#update geometry table with new data
self.modaldata.tables['geometry'].update(df)#,overwrite=True)
#self.modal_data.tables['geometry']=pd.concat([self.modal_data.tables['geometry'],df])
#create element data
#get next pandas index
if len(self.modaldata.tables['elements_index'].index)==0:
ind=0
elem_node_ind=0
element_id=0
else:
ind= self.modaldata.tables['elements_index'].index.max() + 1
element_id= self.modaldata.tables['elements_index']['element_id'].max() + 1
elem_node_ind= self.modaldata.tables['elements_values'].index.max() + 1
for model_id_aux, model_obj_aux in self.models.items():
if model_obj_aux.model_id==model_id:
color=model_obj_aux.cur_triangle_color
element_descriptor='line'
nr_of_nodes=2
tot_num_of_elem=num_of_points-1 #total number of elements
elem_nums=np.arange(ind,ind+tot_num_of_elem)
elem_ids=np.arange(element_id,element_id+tot_num_of_elem)
df_elem=pd.DataFrame(index=elem_nums, columns=self.modaldata.tables['elements_index'].columns)
df_elem['model_id']=model_id
df_elem['element_id']=elem_ids
df_elem['element_descriptor']=element_descriptor
df_elem['color']=color
df_elem['nr_of_nodes']=nr_of_nodes
df_elem['clr_r']=color.red()/255.
df_elem['clr_g']=color.green()/255.
df_elem['clr_b']=color.blue()/255.
df_elem['clr_a']=color.alpha()/255.
if len(self.modaldata.tables['elements_index'].index)==0:
self.modaldata.tables['elements_index']=df_elem
else:
#self.modal_data.tables['elements_index'].update(df_elem)#,overwrite=True)
self.modaldata.tables['elements_index']=pd.concat([self.modaldata.tables['elements_index'], df_elem])
#store nodes
#tot_elem_nums=circ_div*(height_div-1)*2 #total number of elements
#elem_nums=np.arange(element_id,element_id+tot_elem_nums+1)
#walk through nodes and store elements
pos_1=[]
pos_2=[]
node_number=start_num
for i in range(1,int(num_of_points)):
pos_1.append(node_number)
pos_2.append(node_number+1)
node_number=node_number+1
df_elem_index=np.arange(elem_node_ind,elem_node_ind+len(np.tile(elem_ids,2)))
df_elem_nodes=pd.DataFrame(index=df_elem_index, columns=self.modaldata.tables['elements_values'].columns)
#df_elem_nodes['model_id']=model_id
df_elem_nodes['element_id']=np.tile(elem_ids,2)
df_elem_nodes['node_id']=np.asarray(pos_1+pos_2) #node numbers
df_elem_nodes['node_pos']=np.repeat([1,2],len(pos_1)) #node position in element
if len(self.modaldata.tables['elements_values'].index)==0:
self.modaldata.tables['elements_values']=df_elem_nodes
else:
#self.modal_data.tables['elements_values'].update(df_elem_nodes)#,overwrite=True)
self.modaldata.tables['elements_values']=pd.concat([self.modaldata.tables['elements_values'], df_elem_nodes])
#refresh
self.calc_node_lcs()
self.populate_table_view(self.activated_models)
self.populate_elem_table_view(self.activated_models)
self.plot_activated_models()
def add_geom_rows(self,rows_to_add=100):
"""
Add 100 blank rows to geometry table of selected model id
:return:
"""
for model_id in self.activated_models:
#get node index corresponding with existing geomtry table
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
if len(self.modaldata.tables['geometry'][model_mask].index)==0:
ind=0
node_num=0
else:
ind= self.modaldata.tables['geometry'][model_mask].index.max() + 1
node_num = self.modaldata.tables['geometry'].ix[model_mask,'node_nums'].max() + 1
node_nums=np.arange(node_num,node_num+rows_to_add)
node_index=np.arange(ind,ind+rows_to_add)
#create node data
df=pd.DataFrame(index=node_index, columns=self.modaldata.tables['geometry'].columns)
df['model_id']=model_id
df['node_nums']=node_nums
rgba_color = pg.colorTuple(QtGui.QColor(0,255,0,255))
df['clr_r']=rgba_color[0]/ 255. # rbg values 0-1
df['clr_g']=rgba_color[1]/ 255. # rbg values 0-1
df['clr_b']=rgba_color[2]/ 255. # rbg values 0-1
df['clr_a']=rgba_color[3]/ 255. # alpha values 0-1
if len(self.modaldata.tables['geometry'].index)==0:
self.modaldata.tables['geometry']=df
else:
#self.modal_data.tables['elements_values'].update(df_elem_nodes)#,overwrite=True)
self.modaldata.tables['geometry']=pd.concat([self.modaldata.tables['geometry'], df])
#refresh
self.populate_table_view(self.activated_models)
def create_plane_nodes_df(self,plane_orient,len1,len2,div1,div2,start_num,model_id,x_offset,y_offset,z_offset):
maximum_number_of_nodes=div1*div2
node_nums=np.arange(start_num,start_num+maximum_number_of_nodes)
len1_arr = np.linspace(0, len1, div1)
len2_arr = np.linspace(0, len2, div2)
if plane_orient=='XY':
xx, yy = np.meshgrid(len1_arr, len2_arr)
zz=np.zeros((maximum_number_of_nodes))
if plane_orient=='YZ':
yy, zz = np.meshgrid(len1_arr, len2_arr)
xx=np.zeros((maximum_number_of_nodes))
if plane_orient=='ZX':
zz, xx = np.meshgrid(len1_arr, len2_arr)
yy=np.zeros((maximum_number_of_nodes))
#realign index in order to prevent double node names (geometry data frame starts with 1 by default)
#node_index=node_nums-1
#get node index corresponding with existing geomtry table
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
if len(node_nums)>len(node_index):
# add rows for selected model
rows_to_add=len(node_nums)-len(node_index)
self.add_geom_rows(rows_to_add=rows_to_add)
# get index
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
#create node data
df=pd.DataFrame(index=node_index, columns=self.modaldata.tables['geometry'].columns)
df['model_id']=model_id
df['node_nums']=node_nums
df['x']=xx.flatten()+x_offset
df['y']=yy.flatten()+y_offset
df['z']=zz.flatten()+z_offset
df['thx']=0
df['thy']=0
df['thz']=0
rgba_color = pg.colorTuple(QtGui.QColor(0,255,0,255))
df['clr_r']=rgba_color[0]/ 255. # rbg values 0-1
df['clr_g']=rgba_color[1]/ 255. # rbg values 0-1
df['clr_b']=rgba_color[2]/ 255. # rbg values 0-1
df['clr_a']=rgba_color[3]/ 255. # alpha values 0-1
#calculate r,phi from x,y
df['r'] = np.sqrt(df['x']**2 + df['y']**2)
df['phi'] = np.arcsin(df['y']/df['r'])*180./np.pi
df['cyl_thz']= 0
return df
def create_plane_elem_df(self,div1,div2,start_num,model_id,custom_num=None):
#create element data
#get next pandas index
if custom_num==None:
if len(self.modaldata.tables['elements_index'].index)==0:
ind=0
elem_node_ind=0
element_id=0
else:
ind= self.modaldata.tables['elements_index'].index.max() + 1
element_id= self.modaldata.tables['elements_index']['element_id'].max() + 1
elem_node_ind= self.modaldata.tables['elements_values'].index.max() + 1
else:
ind=custom_num['ind']
element_id=custom_num['element_id']
elem_node_ind=custom_num['elem_node_ind']
for model_id_aux, model_obj_aux in self.models.items():
if model_obj_aux.model_id==model_id:
color=model_obj_aux.cur_triangle_color
element_descriptor='triangle'
nr_of_nodes=3
tot_num_of_elem=(div1-1)*(div2-1)*2 #total number of elements
elem_nums=np.arange(ind,ind+tot_num_of_elem)
elem_ids=np.arange(element_id,element_id+tot_num_of_elem)
df_elem=pd.DataFrame(index=elem_nums, columns=self.modaldata.tables['elements_index'].columns)
df_elem['model_id']=model_id
df_elem['element_id']=elem_ids
df_elem['element_descriptor']=element_descriptor
df_elem['color']=color
df_elem['nr_of_nodes']=nr_of_nodes
df_elem['clr_r']=color.red()/255.
df_elem['clr_g']=color.green()/255.
df_elem['clr_b']=color.blue()/255.
df_elem['clr_a']=color.alpha()/255.
#store element nodes
#walk through nodes and store elements
pos_1=[]
pos_2=[]
pos_3=[]
node_number=start_num
k=0
for i in range(1,int(div2+1)): # len1 division
for j in range(1,int(div1+1)): # len2 divisions
if j==div1:
#last column
pass
else:
if i==(div2): #vertical
#last row/last column
pass
else:
pos_1.append(node_number)
pos_2.append(node_number+1)
pos_3.append(node_number+1+div1)
pos_1.append(node_number)
pos_2.append(node_number+div1)
pos_3.append(node_number+1+div1)
node_number=node_number+1
df_elem_index=np.arange(elem_node_ind,elem_node_ind+len(np.tile(elem_ids,3)))
df_elem_nodes=pd.DataFrame(index=df_elem_index, columns=self.modaldata.tables['elements_values'].columns)
#df_elem_nodes['model_id']=model_id
df_elem_nodes['element_id']=np.tile(elem_ids,3)
df_elem_nodes['node_id']=np.asarray(pos_1+pos_2+pos_3) #node numbers
df_elem_nodes['node_pos']=np.repeat([1,2,3],len(pos_1)) #node position in element
return df_elem,df_elem_nodes
def create_plane_geom(self,plane_data):
"""
Create cylinder geometry based on user input (for currently active model)
:return:
"""
plane_orient=plane_data['plane_orient']
len1=float(plane_data['len1'])
len2=float(plane_data['len2'])
div1=float(plane_data['div1'])
div2=float(plane_data['div2'])
x_offset=float(plane_data['x_offset'])
y_offset=float(plane_data['y_offset'])
z_offset=float(plane_data['z_offset'])
start_num=float(plane_data['start_num'])
for model_id in self.activated_models:
# get nodes
df=self.create_plane_nodes_df(plane_orient,len1,len2,div1,div2,start_num,model_id,x_offset,y_offset,z_offset)
#update geometry table with new data
self.modaldata.tables['geometry'].update(df)#,overwrite=True)
# get elements and element connectivity
df_elem,df_elem_nodes=self.create_plane_elem_df(div1,div2,start_num,model_id)
# update modal_data object with new geometry
if len(self.modaldata.tables['elements_index'].index)==0:
self.modaldata.tables['elements_index']=df_elem
else:
self.modaldata.tables['elements_index']=pd.concat([self.modaldata.tables['elements_index'], df_elem])
if len(self.modaldata.tables['elements_values'].index)==0:
self.modaldata.tables['elements_values']=df_elem_nodes
else:
self.modaldata.tables['elements_values']=pd.concat([self.modaldata.tables['elements_values'], df_elem_nodes])
#refresh
self.calc_node_lcs()
self.populate_table_view(self.activated_models)
self.populate_elem_table_view(self.activated_models)
self.plot_activated_models()
def create_box_geom(self,box_data):
"""
Create box geometry based on user input (for currently active model)
:return:
"""
lenx=float(box_data['lenx'])
leny=float(box_data['leny'])
lenz=float(box_data['lenz'])
divx=float(box_data['divx'])
divy=float(box_data['divy'])
divz=float(box_data['divz'])
x_offset=float(box_data['x_offset'])
y_offset=float(box_data['y_offset'])
z_offset=float(box_data['z_offset'])
start_num=float(box_data['start_num'])
for model_id in self.activated_models:
maximum_number_of_nodes=2*divx*divy+(divz-2)*(divy+(divx-1)+(divy-1)+(divx-2))
node_nums=np.arange(start_num,start_num+maximum_number_of_nodes)
# xz plane
spc_x=np.linspace(0,lenx,divx)
x_arr_1=np.repeat(spc_x,divz)
y_arr_1=np.zeros((divx*divz))
spc_z=np.linspace(0,lenz,divz)
z_arr_1=np.tile(spc_z[::-1],divx)
# far side yz plane
x_arr_2=np.repeat([lenx],divy*divz)
spc_y=np.linspace(0,leny,divy)
y_arr_2=np.repeat(spc_y,divz)
z_arr_2=np.tile(spc_z[::-1],divy)
# far side xz plane
spc_x=np.linspace(0,lenx,divx)
x_arr_3=np.repeat(spc_x[::-1],divz)
y_arr_3=np.repeat([leny],divx*divz)
spc_z=np.linspace(0,lenz,divz)
z_arr_3=np.tile(spc_z[::-1],divx)
# yz plane
x_arr_4=np.repeat([0],divy*divz)
spc_y=np.linspace(0,leny,divy)
y_arr_4=np.repeat(spc_y[::-1],divz)
z_arr_4=np.tile(spc_z[::-1],divy)
# xy plane (top)
x_arr_5=np.tile(spc_x,divy)
spc_y=np.linspace(0,leny,divy)
y_arr_5=np.repeat(spc_y,divx)
z_arr_5=np.repeat(lenz,divy*divx)
#remove corner nodes
x_mask=(x_arr_5!=lenx)*(x_arr_5!=0) # True where x coordinate is not on edge
y_mask=(y_arr_5!=leny)*(y_arr_5!=0) # True where y coordinate is not on edge
fin_mask=x_mask*y_mask
x_arr_5=x_arr_5[fin_mask]
y_arr_5=y_arr_5[fin_mask]
z_arr_5=z_arr_5[fin_mask]
# xy plane (bottom)
x_arr_6=np.tile(spc_x,divy)
spc_y=np.linspace(0,leny,divy)
y_arr_6=np.repeat(spc_y,divx)
z_arr_6=np.repeat(0,divy*divx)
#remove corner nodes
x_mask=(x_arr_6!=lenx)*(x_arr_6!=0) # True where x coordinate is not on edge
y_mask=(y_arr_6!=leny)*(y_arr_6!=0) # True where y coordinate is not on edge
fin_mask=x_mask*y_mask
x_arr_6=x_arr_6[fin_mask]
y_arr_6=y_arr_6[fin_mask]
z_arr_6=z_arr_6[fin_mask]
x_arr=np.concatenate((x_arr_1[:-divz],x_arr_2[:-divz],x_arr_3[:-divz],x_arr_4[:-divz],x_arr_5,x_arr_6))
y_arr=np.concatenate((y_arr_1[:-divz],y_arr_2[:-divz],y_arr_3[:-divz],y_arr_4[:-divz],y_arr_5,y_arr_6))
z_arr=np.concatenate((z_arr_1[:-divz],z_arr_2[:-divz],z_arr_3[:-divz],z_arr_4[:-divz],z_arr_5,z_arr_6))
#realign index in order to prevent double node names (geometry data frame starts with 1 by default)
#node_index=node_nums-1
#get node index corresponding with existing geomtry table
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
if len(node_nums)>len(node_index):
# add rows for selected model
rows_to_add=len(node_nums)-len(node_index)
self.add_geom_rows(rows_to_add=rows_to_add)
# get index
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
#create node data
df=pd.DataFrame(index=node_index, columns=self.modaldata.tables['geometry'].columns)
df['model_id']=model_id
df['node_nums']=node_nums
df['x']=x_arr+x_offset
df['y']=y_arr+y_offset
df['z']=z_arr+z_offset
df['thx']=0
df['thy']=0
df['thz']=0
rgba_color = pg.colorTuple(QtGui.QColor(0,255,0,255))
df['clr_r']=rgba_color[0]/ 255. # rbg values 0-1
df['clr_g']=rgba_color[1]/ 255. # rbg values 0-1
df['clr_b']=rgba_color[2]/ 255. # rbg values 0-1
df['clr_a']=rgba_color[3]/ 255. # alpha values 0-1
#calculate r,phi from x,y
df['r'] = np.sqrt(df['x']**2 + df['y']**2)
df['phi'] = np.arcsin(df['y']/df['r'])*180./np.pi
df['cyl_thz']= 0
#update geometry table with new data
self.modaldata.tables['geometry'].update(df)#,overwrite=True)
#self.modal_data.tables['geometry']=pd.concat([self.modal_data.tables['geometry'],df])
#
#create element data
#get next pandas index
if len(self.modaldata.tables['elements_index'].index)==0:
ind=0
elem_node_ind=0
element_id=0
else:
ind= self.modaldata.tables['elements_index'].index.max() + 1
element_id= self.modaldata.tables['elements_index']['element_id'].max() + 1
elem_node_ind= self.modaldata.tables['elements_values'].index.max() + 1
for model_id_aux, model_obj_aux in self.models.items():
if model_obj_aux.model_id==model_id:
color=model_obj_aux.cur_triangle_color
element_descriptor='triangle'
nr_of_nodes=3
tot_num_of_elem=4*(divx-1)*(divz-1)+4*(divy-1)*(divz-1)+4*(divy-1)*(divx-1) #total number of elements
elem_nums=np.arange(ind,ind+tot_num_of_elem)
elem_ids=np.arange(element_id,element_id+tot_num_of_elem)
df_elem=pd.DataFrame(index=elem_nums, columns=self.modaldata.tables['elements_index'].columns)
df_elem['model_id']=model_id
df_elem['element_id']=elem_ids
df_elem['element_descriptor']=element_descriptor
df_elem['color']=color
df_elem['nr_of_nodes']=nr_of_nodes
df_elem['clr_r']=color.red()/255.
df_elem['clr_g']=color.green()/255.
df_elem['clr_b']=color.blue()/255.
df_elem['clr_a']=color.alpha()/255.
if len(self.modaldata.tables['elements_index'].index)==0:
self.modaldata.tables['elements_index']=df_elem
else:
#self.modal_data.tables['elements_index'].update(df_elem)#,overwrite=True)
self.modaldata.tables['elements_index']=pd.concat([self.modaldata.tables['elements_index'], df_elem])
#store nodes
#walk through nodes and store elements
pos_1=[]
pos_2=[]
pos_3=[]
node_number=start_num
num_of_divs=int(2*divx+2*(divy-2)) # number of verticals along z
k=0
for i in range(1,num_of_divs+1):
for j in range(1,int(divz+1)):
if i==num_of_divs:
#last vertical line - elements have nodes also from first vertical line
if j==(divz):
#last row
pass
else:
pos_1.append(node_number)
pos_2.append(node_number+1)
pos_3.append(start_num+k+1)
pos_1.append(node_number)
pos_2.append(start_num+k)
pos_3.append(start_num+k+1)
k=k+1
else:
if j==(divz): #vertical
#last row/last column
pass
else:
pos_1.append(node_number)
pos_2.append(node_number+1)
pos_3.append(node_number+1+divz)
pos_1.append(node_number)
pos_2.append(node_number+divz)
pos_3.append(node_number+1+divz)
node_number=node_number+1
def get_nnum(x,y,z):
# get node number based on known location
x_mask = x_arr == x
y_mask = y_arr == y
z_mask = z_arr == z
fin_mask=x_mask*y_mask*z_mask
nnum=node_nums[fin_mask]
return nnum
x_cord=np.linspace(0,lenx,divx)
y_cord=np.linspace(0,leny,divy)
# Top plane
z_cord=lenz
for i in range(0,int(divy-1)):
for j in range(0,int(divx-1)):
pos_1.append(get_nnum(x_cord[j],y_cord[i],z_cord))
pos_2.append(get_nnum(x_cord[j+1],y_cord[i],z_cord))
pos_3.append(get_nnum(x_cord[j+1],y_cord[i+1],z_cord))
pos_1.append(get_nnum(x_cord[j],y_cord[i],z_cord))
pos_2.append(get_nnum(x_cord[j],y_cord[i+1],z_cord))
pos_3.append(get_nnum(x_cord[j+1],y_cord[i+1],z_cord))
# Bottom plane
z_cord=0
for i in range(0,int(divy-1)):
for j in range(0,int(divx-1)):
pos_1.append(get_nnum(x_cord[j],y_cord[i],z_cord))
pos_2.append(get_nnum(x_cord[j+1],y_cord[i],z_cord))
pos_3.append(get_nnum(x_cord[j+1],y_cord[i+1],z_cord))
pos_1.append(get_nnum(x_cord[j],y_cord[i],z_cord))
pos_2.append(get_nnum(x_cord[j],y_cord[i+1],z_cord))
pos_3.append(get_nnum(x_cord[j+1],y_cord[i+1],z_cord))
df_elem_index=np.arange(elem_node_ind,elem_node_ind+len(np.tile(elem_ids,3)))
df_elem_nodes=pd.DataFrame(index=df_elem_index, columns=self.modaldata.tables['elements_values'].columns)
df_elem_nodes['element_id']=np.tile(elem_ids,3)
df_elem_nodes['node_id']=np.asarray(pos_1+pos_2+pos_3) #node numbers
df_elem_nodes['node_pos']=np.repeat([1,2,3],len(pos_1)) #node position in element
if len(self.modaldata.tables['elements_values'].index)==0:
self.modaldata.tables['elements_values']=df_elem_nodes
else:
#self.modal_data.tables['elements_values'].update(df_elem_nodes)#,overwrite=True)
self.modaldata.tables['elements_values']=pd.concat([self.modaldata.tables['elements_values'], df_elem_nodes])
#refresh
self.calc_node_lcs()
self.populate_table_view(self.activated_models)
self.populate_elem_table_view(self.activated_models)
self.plot_activated_models()
def create_cyl_geom(self,cylinder_data):
"""
Create cylinder geometry based on user input (for currently active model)
:return:
"""
cyl_r=float(cylinder_data['radius'])
cyl_h=float(cylinder_data['height'])
start_num=cylinder_data['start_num']
num_orient=cylinder_data['num_orient']
z_offset=cylinder_data['z_offset']
height_div=float(cylinder_data['height_div'])
circ_div=float(cylinder_data['circ_div'])
for model_id in self.activated_models:
maximum_number_of_nodes=height_div*circ_div
node_nums=np.arange(start_num,start_num+maximum_number_of_nodes)
cyl_r_array=np.repeat(cyl_r,maximum_number_of_nodes)
phi_div=360./circ_div
cyl_phi_single_row=np.arange(0,360.,phi_div)
if num_orient=='Vertical':
cyl_phi_array=np.repeat(cyl_phi_single_row,height_div) # VERTICAL NUMBERING
else:
cyl_phi_array=np.tile(cyl_phi_single_row,height_div) # HORIZONTAL NUMBERING
##bottom->up numbering
#cyl_z_array_single_row=np.arange(0,cyl_h+z_div,z_div)
#top->down numbering
cyl_z_array_single_row=np.linspace(0,cyl_h,height_div)
cyl_z_array_single_row=cyl_z_array_single_row[::-1]
if num_orient=='Vertical':
cyl_z_array=np.tile(cyl_z_array_single_row,circ_div) # VERTICAL NUMBERING
else:
cyl_z_array=np.repeat(cyl_z_array_single_row,circ_div) # HORIZONTAL NUMBERING
#realign index in order to prevent double node names (geometry data frame starts with 1 by default)
#node_index=node_nums-1
#get node index corresponding with existing geomtry table
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
if len(node_nums)>len(node_index):
# add rows for selected model
rows_to_add=len(node_nums)-len(node_index)
self.add_geom_rows(rows_to_add=rows_to_add)
# get index
model_mask=self.modaldata.tables['geometry'].ix[:,'model_id']==model_id
node_mask=self.modaldata.tables['geometry'].ix[:,'node_nums'].isin(node_nums)
final_mask=model_mask*node_mask
node_index=self.modaldata.tables['geometry'].ix[final_mask].index
#create node data
df=pd.DataFrame(index=node_index, columns=self.modaldata.tables['geometry'].columns)
df['model_id']=model_id
df['node_nums']=node_nums
df['r']=cyl_r_array
df['phi']=cyl_phi_array
df['z']=cyl_z_array+z_offset
df['thx']=0
df['thy']=0
df['cyl_thz']=0
rgba_color = pg.colorTuple(QtGui.QColor(0,255,0,255))
df['clr_r']=rgba_color[0]/ 255. # rbg values 0-1
df['clr_g']=rgba_color[1]/ 255. # rbg values 0-1
df['clr_b']=rgba_color[2]/ 255. # rbg values 0-1
df['clr_a']=rgba_color[3]/ 255. # alpha values 0-1
#calculate x,y from r,phi
df['x'] = df['r'] * np.cos(df['phi'].astype(np.float64)*np.pi/180)
df['y'] = df['r'] * np.sin(df['phi'].astype(np.float64)*np.pi/180)
df['thz']= df['cyl_thz'] + df['phi']
#update geometry table with new data
self.modaldata.tables['geometry'].update(df)#,overwrite=True)
#self.modaldata.tables['geometry']=pd.concat([self.modaldata.tables['geometry'],df])
#create element data
#get next pandas index
if len(self.modaldata.tables['elements_index'].index)==0:
ind=0
elem_node_ind=0
element_id=0
else:
ind= self.modaldata.tables['elements_index'].index.max() + 1
element_id= self.modaldata.tables['elements_index']['element_id'].max() + 1
elem_node_ind= self.modaldata.tables['elements_values'].index.max() + 1
for model_id_aux, model_obj_aux in self.models.items():
if model_obj_aux.model_id==model_id:
color=model_obj_aux.cur_triangle_color
element_descriptor='triangle'
nr_of_nodes=3
tot_num_of_elem=circ_div*(height_div-1)*2 #total number of elements
elem_nums=np.arange(ind,ind+tot_num_of_elem)
elem_ids=np.arange(element_id,element_id+tot_num_of_elem)
df_elem=pd.DataFrame(index=elem_nums, columns=self.modaldata.tables['elements_index'].columns)
df_elem['model_id']=model_id
df_elem['element_id']=elem_ids
df_elem['element_descriptor']=element_descriptor
df_elem['color']=color
df_elem['nr_of_nodes']=nr_of_nodes
df_elem['clr_r']=color.red()/255.
df_elem['clr_g']=color.green()/255.
df_elem['clr_b']=color.blue()/255.
df_elem['clr_a']=color.alpha()/255.
if len(self.modaldata.tables['elements_index'].index)==0:
self.modaldata.tables['elements_index']=df_elem
else:
#self.modal_data.tables['elements_index'].update(df_elem)#,overwrite=True)
self.modaldata.tables['elements_index']=pd.concat([self.modaldata.tables['elements_index'], df_elem])
#store nodes
#tot_elem_nums=circ_div*(height_div-1)*2 #total number of elements
#elem_nums=np.arange(element_id,element_id+tot_elem_nums+1)
#walk through nodes and store elements
pos_1=[]
pos_2=[]
pos_3=[]
node_number=start_num
if num_orient=='Vertical':
k=0
for i in range(1,int(circ_div+1)): # circumference division
for j in range(1,int(height_div+1)): # height divisions
if i==circ_div:
#last circumference division - elements have nodes also from first division
if j==(height_div):
#last row
pass
else:
pos_1.append(node_number)
pos_2.append(node_number+1)
pos_3.append(start_num+k+1)
pos_1.append(node_number)
pos_2.append(start_num+k)
pos_3.append(start_num+k+1)
k=k+1
else:
if j==(height_div): #vertical
#last row/last column
pass
else:
pos_1.append(node_number)
pos_2.append(node_number+1)
pos_3.append(node_number+1+height_div)
pos_1.append(node_number)
pos_2.append(node_number+height_div)
pos_3.append(node_number+1+height_div)
node_number=node_number+1
else:
k=0
for i in range(1,int(height_div+1)): # height division
for j in range(1,int(circ_div+1)): # circumference divisions
if j==circ_div:
#last circumference division - elements have nodes also from first division
if i==(height_div):
#last row
pass
else:
pos_1.append((start_num-1)+i*circ_div) # 4, 8
pos_2.append(start_num+k*circ_div) # 1, 5
pos_3.append(start_num+i*circ_div) # 5, 9
pos_1.append((start_num-1)+i*circ_div) # 4, 8
pos_2.append((start_num-1)+(i+1)*circ_div) # 8, 12
pos_3.append(start_num+i*circ_div) # 5, 9
k=k+1
else:
if i==(height_div):
#last row
pass
else:
pos_1.append(node_number) # 1,2
pos_2.append(node_number+1) # 2,3
pos_3.append(node_number+circ_div) # 5,6
pos_1.append(node_number+1) # 1, 2
pos_2.append(node_number+circ_div) # 5, 6
pos_3.append(node_number+1+circ_div) # 6, 7
node_number=node_number+1
df_elem_index=np.arange(elem_node_ind,elem_node_ind+len(np.tile(elem_ids,3)))
df_elem_nodes= | pd.DataFrame(index=df_elem_index, columns=self.modaldata.tables['elements_values'].columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import pandas as pd
import cobra
from cobra_utils.query.met_info import classify_metabolites_by_type
def rxn_info_from_metabolites(model, metabolites, verbose=True):
'''
This function looks for all the reactions where the metabolites in the list participate. Also, it retrieves the genes
associated to those reactions.
Parameters
----------
model : cobra.core.Model.Model
A cobra model.
metabolites : array-like
An iterable object containing a list of metabolite ids present in the model.
verbose : boolean, True by default.
A variable to enable or disable the printings of this function.
Returns
-------
rxn_gene_association : pandas.DataFrame
A pandas dataframe containing the information retrieved. The columns are :
'MetName', 'MetID', 'RxnID', 'RxnName', 'GeneID', 'Subsystem', 'RxnFormula'
'''
if verbose:
print('Using list of metabolites to get reactions where they participate. Also, getting genes of those reactions.')
rxn_gene_association = []
for metabolite in metabolites:
met = model.metabolites.get_by_id(metabolite)
for rxn in met.reactions:
if len(rxn.genes) != 0:
for gene in rxn.genes:
rxn_gene_association.append(
(rxn.id, rxn.name, str(gene.id), rxn.subsystem, rxn.reaction, met.id, met.name))
else:
rxn_gene_association.append(
(rxn.id, rxn.name, '', rxn.subsystem, rxn.reaction, met.id, met.name))
labels = ['RxnID', 'RxnName', 'GeneID', 'Subsystem', 'RxnFormula', 'MetID', 'MetName']
rxn_gene_association = pd.DataFrame.from_records(rxn_gene_association, columns=labels)
if verbose:
print('Information correctly obtained.')
return rxn_gene_association
def rxn_info_from_reactions(model, reactions, verbose=True):
'''
This function looks for all the reactions and genes that are associated from a list of reactions ids.
Parameters
----------
model : cobra.core.Model.Model
A cobra model.
reactions : array-like
An iterable object containing a list of reaction ids present in the model.
verbose : boolean, True by default.
A variable to enable or disable the printings of this function.
Returns
-------
rxn_gene_association : pandas.DataFrame
A pandas dataframe containing the information retrieved. The columns are :
'GeneID', 'RxnID', 'RxnName', 'SubSystem', 'RxnFormula'
'''
if verbose:
print('Using list of reactions to get their information and genes associated.')
rxn_gene_association = []
for reaction in reactions:
rxn = model.reactions.get_by_id(reaction)
if len(rxn.genes) != 0:
for gene in rxn.genes:
rxn_gene_association.append((str(gene.id), rxn.id, rxn.name, rxn.subsystem, rxn.reaction))
else:
rxn_gene_association.append(('', rxn.id, rxn.name, rxn.subsystem, rxn.reaction))
labels = ['GeneID', 'RxnID', 'RxnName', 'SubSystem', 'RxnFormula']
rxn_gene_association = pd.DataFrame.from_records(rxn_gene_association, columns=labels)
if verbose:
print('Information correctly obtained.')
return rxn_gene_association
def rxn_info_from_genes(model, genes, verbose=True):
'''
This function looks for all the reactions and genes that are associated from a list of gene ids.
Parameters
----------
model : cobra.core.Model.Model
A cobra model.
genes : array-like
An iterable object containing a list of gene ids present in the model.
verbose : boolean, True by default.
A variable to enable or disable the printings of this function.
Returns
-------
rxn_gene_association : pandas.DataFrame
A pandas dataframe containing the information retrieved. The columns are :
'GeneID', 'RxnID', 'RxnName', 'SubSystem', 'RxnFormula'
'''
if verbose:
print('Using list of genes to get the reactions associated and their information.')
rxn_gene_association = []
for gene in genes:
g = model.genes.get_by_id(gene)
for rxn in g.reactions:
rxn_gene_association.append((str(g.id), rxn.id, rxn.name, rxn.subsystem, rxn.reaction))
labels = ['GeneID', 'RxnID', 'RxnName', 'SubSystem', 'RxnFormula']
rxn_gene_association = | pd.DataFrame.from_records(rxn_gene_association, columns=labels) | pandas.DataFrame.from_records |
import os
import gc
import re
import json
import random
import numpy as np
import pandas as pd
import scipy.io as sio
from tqdm import tqdm
import matplotlib.pyplot as plt
from daisy.utils.data import incorporate_in_ml100k
from scipy.sparse import csr_matrix
from collections import defaultdict
from IPython import embed
def convert_unique_idx(df, col):
column_dict = {x: i for i, x in enumerate(df[col].unique())}
df[col] = df[col].apply(column_dict.get)
assert df[col].min() == 0
assert df[col].max() == len(column_dict) - 1
return df
def cut_down_data_half(df):
cut_df = pd.DataFrame([])
for u in np.unique(df.user):
aux = df[df['user'] == u].copy()
cut_df = cut_df.append(df.sample(int(len(aux) / 2)))
return cut_df
def filter_users_and_items(df, num_users=None, freq_items=None, top_items=None, keys=['user', 'item']):
'''
Reduces the dataframe to a number of users = num_users and it filters the items by frequency
'''
if num_users is not None:
# df = df[df['user_id'].isin(np.unique(df.user_id)[:num_users])]
df = df[df[keys[0]].isin(np.unique(df[keys[0]])[:num_users])]
# Get top5k books
if top_items is not None:
top5k_books = df[keys[1]].value_counts()[:top_items].index
df = df[df[keys[1]].isin(top5k_books)]
if freq_items is not None:
frequent_items = df['item'].value_counts()[df['item'].value_counts() > freq_items].index
df = df[df[keys[1]].isin(frequent_items)]
return df
def run_statistics(df, src):
path = f'histograms/{src}'
bins = 30
os.makedirs(path, exist_ok=True)
f = open(os.path.join(path, "information.txt"), "w+")
f.write("Information:\n")
f.write("==========================\n")
f.write(f"Interactions: {len(df)}\n")
f.write(f"#users = {df['user'].nunique()}\n")
f.write(f"#items = {df['item'].nunique()}\n")
f.close()
for key in ['user', 'item']:
# OPCIÓ A: HISTOGRAMA
a = pd.DataFrame(df.groupby([key])[key].count())
a.columns = ['value_counts']
a.reset_index(level=[0], inplace=True)
dims = (15, 5)
fig, ax = plt.subplots(figsize=dims)
a["value_counts"].hist(bins=200)
# fig.savefig('hist.jpg')
fig.savefig(os.path.join(path, f'{src}_histogram_{key}_bins={bins}.png'))
fig.clf()
# OPCIÓ : BARPLOT
# a = pd.DataFrame(df_year.groupby(['user'])['user'].count())
# a.columns = ['value_counts']
# a.reset_index(level=[0], inplace=True)
# dims = (15, 5)
# fig, ax = plt.subplots(figsize=dims)
# sns.set_style("darkgrid")
# sns.barplot(ax=ax, x="user", y="value_counts", data=a, palette="Blues_d")
# ax.set(xlabel="User", ylabel="Value Counts")
# plt.xticks(rotation=45)
# plt.show()
# fig.savefig('data.jpg')
def load_rate(src='ml-100k', prepro='origin', binary=True, pos_threshold=None, level='ui', context=False,
gce_flag=False, cut_down_data=False, side_info=False, context_type='', context_as_userfeat=False,
flag_run_statistics=False, remove_top_users=0, remove_on='item'):
"""
Method of loading certain raw data
Parameters
----------
src : str, the name of dataset
prepro : str, way to pre-process raw data input, expect 'origin', f'{N}core', f'{N}filter', N is integer value
binary : boolean, whether to transform rating to binary label as CTR or not as Regression
pos_threshold : float, if not None, treat rating larger than this threshold as positive sample
level : str, which level to do with f'{N}core' or f'{N}filter' operation (it only works when prepro contains 'core' or 'filter')
Returns
-------
df : pd.DataFrame, rating information with columns: user, item, rating, (options: timestamp)
user_num : int, the number of users
item_num : int, the number of items
"""
df = pd.DataFrame()
# import mat73
# a = mat73.loadmat('data/gen-disease/genes_phenes.mat')
# which dataset will use
if src == 'ml-100k':
df = pd.read_csv(f'./data/{src}/u.data', sep='\t', header=None,
names=['user', 'item', 'rating', 'timestamp'], engine='python')
if cut_down_data:
df = cut_down_data_half(df) # from 100k to 49.760 interactions
elif src == 'drugs':
union = False
if union == True:
df = pd.read_csv(f'./data/{src}/train_data_contextUNION_sideeffect.csv', engine='python', index_col=0)
df.drop(columns=['context'], inplace=True)
df.rename(columns={'drug': 'user', 'disease': 'item',
'context_union': 'context',
'proteins': 'item-feat', 'side_effect': 'user-feat'}, inplace=True)
else:
df = pd.read_csv(f'./data/{src}/train_data_allcontext_sideeffect.csv', engine='python', index_col=0)
df.rename(columns={'drug': 'user', 'disease': 'item',
# 'proteins_drug': 'user-feat',
'proteins': 'item-feat', 'side_effect': 'user-feat'}, inplace=True)
if not context:
df = df[['user', 'item']]
else:
if context_as_userfeat:
df = df[['user', 'item', 'user-feat', 'item-feat']]
else:
df = df[['user', 'item', 'context', 'user-feat']]
df['array_context_flag'] = True
df['timestamp'] = 1
df['rating'] = 1
elif src == 'ml-1m':
df = pd.read_csv(f'./data/{src}/ratings.dat', sep='::', header=None,
names=['user', 'item', 'rating', 'timestamp'], engine='python')
# only consider rating >=4 for data density
# df = df.query('rating >= 4').reset_index(drop=True).copy()
elif src == 'ml-10m':
df = pd.read_csv(f'./data/{src}/ratings.dat', sep='::', header=None,
names=['user', 'item', 'rating', 'timestamp'], engine='python')
# df = df.query('rating >= 4').reset_index(drop=True).copy()
elif src == 'ml-20m':
df = pd.read_csv(f'./data/{src}/ratings.csv')
df.rename(columns={'userId': 'user', 'movieId': 'item'}, inplace=True)
# df = df.query('rating >= 4').reset_index(drop=True)
elif src == 'books':
if not os.path.exists(f'./data/{src}/preprocessed_books_complete_timestamp.csv'):
df = pd.read_csv(f'./data/{src}/preprocessed_books_complete.csv', sep=',', engine='python')
df.rename(columns={'user_id': 'user', 'book_id': 'item', 'date_added': 'timestamp'}, inplace=True)
df = convert_unique_idx(df, 'user')
df = convert_unique_idx(df, 'item')
df['rating'] = 1.0
# if type(df['timestamp'][0]) == 'str':
df['date'] = pd.to_datetime(df['timestamp'])
df['timestamp'] = pd.to_datetime(df['date'], utc=True)
df['timestamp'] = df.timestamp.astype('int64') // 10 ** 9
df.to_csv(f'./data/{src}/preprocessed_books_complete_timestamp.csv', sep=',', index=False)
else:
df = pd.read_csv(f'./data/{src}/preprocessed_books_complete_timestamp.csv', sep=',', engine='python')
del df['date']
# reduce users to 3000 and filter items by clicked_frequency > 10
df = filter_users_and_items(df, num_users=4000, freq_items=50, top_items=5000, keys=['user', 'item']) # 35422 books
elif src == 'music':
df = pd.read_csv(f'./data/{src}-context/train.csv')
if side_info:
# ['user_id', 'track_id', 'hashtag', 'created_at', 'score', 'lang', 'tweet_lang', 'time_zone',
# 'instrumentalness', 'liveness', 'speechiness', 'danceability', 'valence', 'loudness', 'tempo',
# 'acousticness', 'energy', 'mode', 'key', 'rating']
df.rename(columns={'user_id': 'user', 'track_id': 'item', 'created_at': 'timestamp', 'speechiness': 'side_info'},
inplace=True)
df = df[['user', 'item', 'timestamp', 'side_info']]
# PREPROCESS SPEECHINESS # VALUE 10 FOR NON EXISTING FEATURE
df['side_info'] = df['side_info'].round(1)
df['side_info'] = df['side_info']*10
df['side_info'] = df['side_info'].fillna(10)
df['side_info'] = df['side_info'].astype(int)
else:
df.rename(columns={'user_id': 'user', 'track_id': 'item', 'created_at': 'timestamp'}, inplace=True)
df = df[['user', 'item', 'timestamp']]
# df = df.query('rating >= 4').reset_index(drop=True)
df = convert_unique_idx(df, 'user')
df = convert_unique_idx(df, 'item')
df = filter_users_and_items(df, num_users=3000, freq_items=20, keys=['user', 'item']) # 18508 songs - 3981 users
# FILTER USERS WHITH LESS THAN 4 INTERACTIONS
df_aux = df.groupby('user').count().reset_index()[['user', 'item']]
indexes = df_aux[df_aux['item'] >= 3]['user'].index
df = df[df['user'].isin(indexes)]
df['rating'] = 1.0
df['timestamp'] = pd.to_datetime(df['timestamp'], utc=True)
df['timestamp'] = df.timestamp.astype('int64') // 10 ** 9
prepro = 'origin'
elif src == 'frappe':
df1 = pd.read_csv(f'./data/{src}/{src}_xin/train.csv', sep=',', header=None)
df2 = pd.read_csv(f'./data/{src}/{src}_xin/test.csv', sep=',', header=None)
df = pd.concat([df1, df2])
df['item'] = df[1].apply(lambda x: x.split('-')[0])
df['i-feature'] = df[1].apply(lambda x: x.split('-')[1])
df['user'] = df[0].apply(lambda x: x.split('-')[0])
df['user-context'] = df[0].apply(lambda x: '-'.join(x.split('-')[1:]))
# http://web.archive.org/web/20180422190150/http://baltrunas.info/research-menu/frappe
# columNames = ['labels', 'user', 'item', 'daytime', 'weekday', 'isweekend', 'homework', 'cost', 'weather',
# 'country', 'city']
context_type = ['daytime', 'weekday', 'isweekend', 'homework', 'cost', 'weather', 'country', 'city']
# df = pd.read_csv(f'./data/{src}/{src}.csv', sep=' ', engine='python', names=columNames)
df = pd.read_csv(f'./data/{src}/{src}.csv', sep='\t')
# TODO: select one context
if context:
# context_type
df = df[['user', 'item']+context_type]
for context_aux in context_type:
df = convert_unique_idx(df, context_aux)
else:
df = df[['user', 'item']]
# treat weight as interaction, as 1
df['rating'] = 1.0
df['timestamp'] = 1
# fake timestamp column
elif src == 'netflix':
cnt = 0
tmp_file = open(f'./data/{src}/training_data.csv', 'w')
tmp_file.write('user,item,rating,timestamp' + '\n')
for f in os.listdir(f'./data/{src}/training_set/'):
cnt += 1
if cnt % 5000 == 0:
print(f'Finish Process {cnt} file......')
txt_file = open(f'./data/{src}/training_set/{f}', 'r')
contents = txt_file.readlines()
item = contents[0].strip().split(':')[0]
for val in contents[1:]:
user, rating, timestamp = val.strip().split(',')
tmp_file.write(','.join([user, item, rating, timestamp]) + '\n')
txt_file.close()
tmp_file.close()
df = pd.read_csv(f'./data/{src}/training_data.csv')
df['rating'] = df.rating.astype(float)
df['timestamp'] = | pd.to_datetime(df['timestamp']) | pandas.to_datetime |
from sklearn.cluster import MeanShift, estimate_bandwidth
import pandas as pd
import numpy as np
from clusteredmvfts.partitioner import KMeansPartitioner
from pyFTS.benchmarks import Measures
from clusteredmvfts.fts import cmvhofts
#Set target and input variables
target_station = 'DHHL_3'
#All neighbor stations with residual correlation greater than .90
neighbor_stations_90 = ['DHHL_3', 'DHHL_4','DHHL_5','DHHL_10','DHHL_11','DHHL_9','DHHL_2', 'DHHL_6','DHHL_7','DHHL_8']
df = | pd.read_pickle("../../notebooks/df_oahu.pkl") | pandas.read_pickle |
# -*- encoding:utf-8 -*-
"""
中间层,从上层拿到x,y,df
拥有create estimator
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import functools
from enum import Enum
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin, ClassifierMixin, RegressorMixin, clone
from sklearn import metrics
from sklearn.datasets import load_iris
from sklearn.feature_selection import RFE, VarianceThreshold
from sklearn.preprocessing import label_binarize, StandardScaler, binarize
from . import ABuMLExecute
from .ABuMLCreater import AbuMLCreater
from ..CoreBu import ABuEnv
from ..CoreBu.ABuFixes import train_test_split, cross_val_score, mean_squared_error_scorer, six
from ..UtilBu import ABuFileUtil
from ..UtilBu.ABuProgress import AbuProgress
from ..UtilBu.ABuDTUtil import warnings_filter
from ..UtilBu.ABuDTUtil import params_to_numpy
from ..CoreBu.ABuFixes import signature
__author__ = '阿布'
__weixin__ = 'abu_quant'
p_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.pardir))
ML_TEST_FILE = os.path.join(p_dir, 'RomDataBu/ml_test.csv')
class _EMLScoreType(Enum):
"""针对有监督学习的度量支持enum"""
"""有监督学习度量准确率"""
E_SCORE_ACCURACY = 'accuracy'
"""有监督学习度量mse"""
E_SCORE_MSE = mean_squared_error_scorer
"""有监督学习度量roc_auc"""
E_SCORE_ROC_AUC = 'roc_auc'
class EMLFitType(Enum):
"""支持常使用的学习器类别enum"""
"""有监督学习:自动选择,根据y的label数量,> 10使用回归否则使用分类"""
E_FIT_AUTO = 'auto'
"""有监督学习:回归"""
E_FIT_REG = 'reg'
"""有监督学习:分类"""
E_FIT_CLF = 'clf'
"""无监督学习:HMM"""
E_FIT_HMM = 'hmm'
"""无监督学习:PCA"""
E_FIT_PCA = 'pca'
"""无监督学习:KMEAN"""
E_FIT_KMEAN = 'kmean'
def entry_wrapper(support=(EMLFitType.E_FIT_CLF, EMLFitType.E_FIT_REG, EMLFitType.E_FIT_HMM,
EMLFitType.E_FIT_PCA, EMLFitType.E_FIT_KMEAN)):
"""
类装饰器函数,对关键字参数中的fiter_type进行标准化,eg,fiter_type参数是'clf', 转换为EMLFitType(fiter_type)
赋予self.fiter_type,检测当前使用的具体学习器不在support参数中不执行被装饰的func函数了,打个log返回
:param support: 默认 support=(EMLFitType.E_FIT_CLF, EMLFitType.E_FIT_REG, EMLFitType.E_FIT_HMM,
EMLFitType.E_FIT_PCA, EMLFitType.E_FIT_KMEAN)
即支持所有,被装饰的函数根据自身特性选择装饰参数
"""
def decorate(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
org_fiter_type = self.fiter_type
if 'fiter_type' in kwargs:
# 如果传递了fiter_type参数,pop出来
fiter_type = kwargs.pop('fiter_type')
# 如果传递的fiter_type参数是str,eg:'clf', 转换为EMLFitType(fiter_type)
if isinstance(fiter_type, six.string_types):
fiter_type = EMLFitType(fiter_type)
self.fiter_type = fiter_type
check_support = self.fiter_type
if self.fiter_type == EMLFitType.E_FIT_AUTO:
# 把auto的归到具体的分类或者回归
check_y = self.y
if 'y' in kwargs:
check_y = kwargs['y']
check_support = EMLFitType.E_FIT_CLF if len(np.unique(check_y)) <= 10 else EMLFitType.E_FIT_REG
if check_support not in support:
# 当前使用的具体学习器不在support参数中不执行被装饰的func函数了,打个log返回
self.log_func('{} not support {}!'.format(func.__name__, check_support.value))
# 如果没能成功执行把类型再切换回来
self.fiter_type = org_fiter_type
return
return func(self, *args, **kwargs)
return wrapper
return decorate
# noinspection PyUnresolvedReferences
class AbuML(object):
"""封装有简单学习及无监督学习方法以及相关操作类"""
@classmethod
def create_test_fiter(cls):
"""
类方法:使用iris数据构造AbuML对象,测试接口,通过简单iris数据对方法以及策略进行验证
iris数据量小,如需要更多数据进行接口测试可使用create_test_more_fiter接口
eg: iris_abu = AbuML.create_test_fiter()
:return: AbuML(x, y, df),
eg: df
y x0 x1 x2 x3
0 0 5.1 3.5 1.4 0.2
1 0 4.9 3.0 1.4 0.2
2 0 4.7 3.2 1.3 0.2
3 0 4.6 3.1 1.5 0.2
4 0 5.0 3.6 1.4 0.2
.. .. ... ... ... ...
145 2 6.7 3.0 5.2 2.3
146 2 6.3 2.5 5.0 1.9
147 2 6.5 3.0 5.2 2.0
148 2 6.2 3.4 5.4 2.3
149 2 5.9 3.0 5.1 1.8
"""
iris = load_iris()
x = iris.data
"""
eg: iris.data
array([[ 5.1, 3.5, 1.4, 0.2],
[ 4.9, 3. , 1.4, 0.2],
[ 4.7, 3.2, 1.3, 0.2],
[ 4.6, 3.1, 1.5, 0.2],
[ 5. , 3.6, 1.4, 0.2],
....... ....... .......
[ 6.7, 3. , 5.2, 2.3],
[ 6.3, 2.5, 5. , 1.9],
[ 6.5, 3. , 5.2, 2. ],
[ 6.2, 3.4, 5.4, 2.3],
[ 5.9, 3. , 5.1, 1.8]])
"""
y = iris.target
"""
eg: y
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
"""
x_df = pd.DataFrame(x, columns=['x0', 'x1', 'x2', 'x3'])
y_df = pd.DataFrame(y, columns=['y'])
df = y_df.join(x_df)
return AbuML(x, y, df)
@classmethod
def load_ttn_raw_df(cls):
"""
读取泰坦尼克测试数据
:return: pd.DataFrame对象,from接口pd.read_csv(train_csv_path)
"""
train_csv_path = ML_TEST_FILE
if not ABuFileUtil.file_exist(train_csv_path):
# 泰坦尼克数据文件如果不存在RuntimeError
raise RuntimeError('{} not exist, please down a ml_test.csv!'.format(train_csv_path))
# 训练文件使用read_csv从文件读取
return pd.read_csv(train_csv_path)
@classmethod
@warnings_filter
def create_test_more_fiter(cls):
"""
类方法:使用泰坦尼克数据构造AbuML对象,测试接口,对方法以及策略进行验证 比iris数据多
eg: ttn_abu = AbuML.create_test_more_fiter()
:return: AbuML(x, y, df),构造AbuML最终的泰坦尼克数据形式如:
eg: df
Survived SibSp Parch Cabin_No Cabin_Yes Embarked_C Embarked_Q \
0 0 1 0 1 0 0 0
1 1 1 0 0 1 1 0
2 1 0 0 1 0 0 0
3 1 1 0 0 1 0 0
4 0 0 0 1 0 0 0
5 0 0 0 1 0 0 1
6 0 0 0 0 1 0 0
7 0 3 1 1 0 0 0
8 1 0 2 1 0 0 0
9 1 1 0 1 0 1 0
.. ... ... ... ... ... ... ...
Embarked_S Sex_female Sex_male Pclass_1 Pclass_2 Pclass_3 \
0 1 0 1 0 0 1
1 0 1 0 1 0 0
2 1 1 0 0 0 1
3 1 1 0 1 0 0
4 1 0 1 0 0 1
5 0 0 1 0 0 1
6 1 0 1 1 0 0
7 1 0 1 0 0 1
8 1 1 0 0 0 1
9 0 1 0 0 1 0
.. ... ... ... ... ... ...
Age_scaled Fare_scaled
0 -0.5614 -0.5024
1 0.6132 0.7868
2 -0.2677 -0.4889
3 0.3930 0.4207
4 0.3930 -0.4863
5 -0.4271 -0.4781
6 1.7877 0.3958
7 -2.0295 -0.2241
8 -0.1943 -0.4243
.. ... ...
"""
raw_df = cls.load_ttn_raw_df()
def set_missing_ages(p_df):
"""
对数据中缺失的年龄使用RandomForestRegressor进行填充
"""
from sklearn.ensemble import RandomForestRegressor
age_df = p_df[['Age', 'Fare', 'Parch', 'SibSp', 'Pclass']]
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()
y_inner = known_age[:, 0]
x_inner = known_age[:, 1:]
rfr_inner = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr_inner.fit(x_inner, y_inner)
predicted_ages = rfr_inner.predict(unknown_age[:, 1::])
p_df.loc[(p_df.Age.isnull()), 'Age'] = predicted_ages
return p_df, rfr_inner
def set_cabin_type(p_df):
"""
对数据中缺失的Cabin处理
"""
p_df.loc[(p_df.Cabin.notnull()), 'Cabin'] = "Yes"
p_df.loc[(p_df.Cabin.isnull()), 'Cabin'] = "No"
return p_df
raw_df, rfr = set_missing_ages(raw_df)
raw_df = set_cabin_type(raw_df)
# 对多label使用get_dummies进行离散二值化处理
dummies_cabin = pd.get_dummies(raw_df['Cabin'], prefix='Cabin')
"""
eg:
data_train['Cabin']:
0 No
1 Yes
2 No
3 Yes
4 No
5 No
6 Yes
7 No
8 No
9 No
...
dummies_cabin:
Cabin_No Cabin_Yes
0 1 0
1 0 1
2 1 0
3 0 1
4 1 0
5 1 0
6 0 1
7 1 0
8 1 0
9 1 0
.. ... ...
"""
dummies__embarked = pd.get_dummies(raw_df['Embarked'], prefix='Embarked')
dummies__sex = | pd.get_dummies(raw_df['Sex'], prefix='Sex') | pandas.get_dummies |
import os
import pickle
from pathlib import Path
from typing import Union
import joblib
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from numpy import interp
from sklearn.metrics import roc_curve, auc
import thoipapy.common
import thoipapy.figs
import thoipapy.utils
import thoipapy.validation
import thoipapy.validation.bocurve
from thoipapy.figs.create_BOcurve_files import save_BO_linegraph_and_barchart, save_extra_BO_figs
from thoipapy.validation.bocurve import parse_BO_data_csv_to_excel
from thoipapy.utils import get_test_and_train_set_lists
def run_testset_trainset_validation(s, logging):
# create list of test and train datasets
# if only one is given, make a list with only one dataset
test_set_list, train_set_list = thoipapy.utils.get_test_and_train_set_lists(s)
validate_LIPS_for_testset(s, logging)
validate_LIPS_for_testset(s, logging, LIPS_name="LIPS_surface_ranked", pred_col="LIPS_surface_ranked")
validate_THOIPA_for_testset_trainset_combination(s, test_set_list, train_set_list, logging)
def validate_THOIPA_for_testset_trainset_combination(s, test_set_list, train_set_list, logging):
""" Creates ROC and BO-curve for a particular testset-trainset combination.
Parameters
----------
s : dict
Settings dictionary for figures.
test_set_list : list
List of test datasets in selection
E.g. ["set03", "set31"]
train_set_list : list
List of training datasets in selection
E.g. ["set02", "set04"]
Saved Files
-----------
THOIPA_pred_csv : csv
THOIPA result for this testset-trainset combination
Columns = "residue_num", "residue_name", "THOIPA"
Index = range index of residues
combined_incl_THOIPA_csv : csv
The combined file with all features. THOIPA prediction is added as a new column
THOIPA_ROC_pkl : pickle
Pickled output dictionary with ROC curves
keys = accessions
values = dictionary with fpr, tpr etc for each protein
Could not be saved easily as a dataframe, because the number of residues is different for each protein
"""
names_excel_path = os.path.join(s["base_dir"], "protein_names.xlsx")
namedict = thoipapy.utils.create_namedict(names_excel_path)
for n, train_set in enumerate(train_set_list):
trainsetname = "set{:02d}".format(int(train_set))
model_pkl = Path(s["data_dir"]) / f"results/{trainsetname}/{trainsetname}_ML_model.lpkl"
for test_set in test_set_list:
testsetname = "set{:02d}".format(int(test_set))
# BO_curve_folder = Path(s["data_dir"]) / f"results{testsetname}/blindvalidation/thoipa.train{trainsetname}"
# THOIPA_BO_curve_data_csv = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "data", "Test{}_Train{}.THOIPA".format(testsetname, trainsetname), "data", "Test{}_Train{}.THOIPA.best_overlap_data.csv".format(testsetname, trainsetname))
THOIPA_BO_curve_data_csv = Path(s["data_dir"]) / f"results/{testsetname}/blindvalidation/thoipa.train{trainsetname}/THOIPA.best_overlap_data.csv"
# THOIPA_ROC_pkl = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "data", "Test{}_Train{}.THOIPA".format(testsetname, trainsetname), "data", "Test{}_Train{}.THOIPA.ROC_data.pkl".format(testsetname, trainsetname))
THOIPA_ROC_pkl = Path(s["data_dir"]) / f"results/{testsetname}/blindvalidation/thoipa.train{trainsetname}/ROC_data.pkl"
bocurve_data_xlsx = Path(s["data_dir"]) / f"results/{testsetname}/blindvalidation/thoipa.train{trainsetname}/bocurve_data.xlsx"
BO_linechart_png = Path(s["data_dir"]) / f"results/{testsetname}/blindvalidation/thoipa.train{trainsetname}/BO_linechart.png"
BO_barchart_png = Path(s["data_dir"]) / f"results/{testsetname}/blindvalidation/thoipa.train{trainsetname}/AUBOC_barchart.png"
thoipapy.utils.make_sure_path_exists(bocurve_data_xlsx, isfile=True)
testset_path = thoipapy.common.get_path_of_protein_set(testsetname, s["sets_dir"])
testdataset_df = pd.read_excel(testset_path)
THOIPA_BO_data_df = pd.DataFrame()
# LIPS_BO_data_df = pd.DataFrame()
# save all outputs to a cross-validation dictionary, to be saved as a pickle file
xv_dict_THOIPA = {}
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
for i in testdataset_df.index:
acc = testdataset_df.loc[i, "acc"]
database = testdataset_df.loc[i, "database"]
acc_db = acc + "-" + database
testdata_combined_file = os.path.join(s["data_dir"], "features", "combined", database,
"{}.surr20.gaps5.combined_features.csv".format(acc))
THOIPA_pred_csv = Path(s["data_dir"]) / f"results/{testsetname}/predictions/thoipa.train{trainsetname}/{database}.{acc}.thoipa.train{trainsetname}.csv"
combined_incl_THOIPA_csv = Path(s["data_dir"]) / f"results/{testsetname}/predictions/thoipa.train{trainsetname}/{database}.{acc}.thoipa.train{trainsetname}_incl_combined.csv"
thoipapy.utils.make_sure_path_exists(combined_incl_THOIPA_csv, isfile=True)
combined_incl_THOIPA_df = save_THOIPA_pred_indiv_prot(s, model_pkl, testdata_combined_file, THOIPA_pred_csv, combined_incl_THOIPA_csv, trainsetname, logging)
#######################################################################################################
# #
# Processing BO curve data for each single protein #
# #
#######################################################################################################
combined_incl_THOIPA_df["LIPS_L*E"] = -1 * combined_incl_THOIPA_df["LIPS_L*E"]
if database == "crystal" or database == "NMR":
# (it is closest distance and low value means high propencity of interfacial)
combined_incl_THOIPA_df["interface_score"] = -1 * combined_incl_THOIPA_df["interface_score"]
THOIPA_BO_single_prot_df = thoipapy.validation.bocurve.calc_best_overlap_from_selected_column_in_df(acc_db, combined_incl_THOIPA_df, pred_col=f"thoipa.train{trainsetname}")
if THOIPA_BO_data_df.empty:
THOIPA_BO_data_df = THOIPA_BO_single_prot_df
else:
THOIPA_BO_data_df = pd.concat([THOIPA_BO_data_df, THOIPA_BO_single_prot_df], axis=1, join="outer")
#######################################################################################################
# #
# Processing ROC data for each single protein, saving to nested dict #
# #
#######################################################################################################
df_for_roc = combined_incl_THOIPA_df.dropna(subset=["interface_score"])
predictor_name = f"thoipa.train{trainsetname}"
fpr, tpr, thresholds = roc_curve(df_for_roc.interface, df_for_roc[predictor_name], drop_intermediate=False)
roc_auc = auc(fpr, tpr)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
xv_dict_THOIPA[acc_db] = {"fpr": fpr, "tpr": tpr, "roc_auc": roc_auc}
#######################################################################################################
# #
# Processing BO CURVE data, saving to csv and running the BO curve analysis script #
# #
#######################################################################################################
THOIPA_BO_data_df.to_csv(THOIPA_BO_curve_data_csv)
# THOIPA_linechart_mean_obs_and_rand = analyse_bo_curve_underlying_data(THOIPA_BO_curve_data_csv, BO_curve_folder, names_excel_path)
parse_BO_data_csv_to_excel(THOIPA_BO_curve_data_csv, bocurve_data_xlsx, s["n_residues_AUBOC_validation"], logging)
AUC_ser = pd.Series(xv_dict_THOIPA[acc_db]["roc_auc"])
AUBOC = save_BO_linegraph_and_barchart(s, bocurve_data_xlsx, BO_linechart_png, BO_barchart_png, namedict, logging, AUC_ser)
if "you_want_more_details" == "TRUE":
other_figs_path: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/other_figs"
save_extra_BO_figs(bocurve_data_xlsx, other_figs_path)
#######################################################################################################
# #
# Processing dictionary with ROC data, saving to pickle #
# #
#######################################################################################################
mean_tpr /= testdataset_df.shape[0]
mean_tpr[-1] = 1.0
mean_roc_auc = auc(mean_fpr, mean_tpr)
ROC_out_dict = {"xv_dict_THOIPA": xv_dict_THOIPA}
ROC_out_dict["true_positive_rate_mean"] = mean_tpr
ROC_out_dict["false_positive_rate_mean"] = mean_fpr
ROC_out_dict["mean_roc_auc"] = mean_roc_auc
# save dict as pickle
with open(THOIPA_ROC_pkl, "wb") as f:
pickle.dump(ROC_out_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
create_ROC_fig_for_testset_trainset_combination(THOIPA_ROC_pkl)
logging.info("test{}_train{} AUC({:.03f}), AUBOC({:.2f}). ({})".format(testsetname, trainsetname, mean_roc_auc, AUBOC, BO_barchart_png))
def validate_LIPS_for_testset(s, logging, LIPS_name="LIPS_LE", pred_col="LIPS_L*E"):
names_excel_path = os.path.join(s["base_dir"], "protein_names.xlsx")
namedict = thoipapy.utils.create_namedict(names_excel_path)
# create list of test and train datasets
# if only one is given, make a list with only one dataset
test_set_list, train_set_list = thoipapy.utils.get_test_and_train_set_lists(s)
for test_set in test_set_list:
testsetname = "set{:02d}".format(int(test_set))
LIPS_BO_curve_data_csv = Path(s["data_dir"]) / "results" / testsetname / f"blindvalidation/{LIPS_name}/{LIPS_name}.best_overlap_data.csv.csv"
# LIPS_BO_curve_data_csv = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "data", "Test{}.{}".format(testsetname, LIPS_name), "Test{}.{}.best_overlap_data.csv".format(testsetname, LIPS_name))
# BO_curve_folder = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "data", "Test{}.{}".format(testsetname, LIPS_name))
BO_curve_folder = Path(s["data_dir"]) / "results" / testsetname / f"blindvalidation/{LIPS_name}"
LIPS_ROC_pkl = Path(s["data_dir"]) / "results" / testsetname / f"blindvalidation/{LIPS_name}/ROC_data.pkl"
# LIPS_ROC_pkl = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "data", "Test{}.{}".format(testsetname, LIPS_name), "data", "Test{}.{}.ROC_data.pkl".format(testsetname, LIPS_name))
thoipapy.utils.make_sure_path_exists(LIPS_BO_curve_data_csv, isfile=True)
bocurve_data_xlsx = os.path.join(BO_curve_folder, "data", "bocurve_data.xlsx")
BO_linechart_png = os.path.join(BO_curve_folder, "BO_linechart.png")
BO_barchart_png = os.path.join(BO_curve_folder, "AUBOC_barchart.png")
thoipapy.utils.make_sure_path_exists(bocurve_data_xlsx, isfile=True)
testset_path = thoipapy.common.get_path_of_protein_set(testsetname, s["sets_dir"])
testdataset_df = pd.read_excel(testset_path)
LIPS_BO_data_df = pd.DataFrame()
# save all outputs to a cross-validation dictionary, to be saved as a pickle file
xv_dict_LIPS = {}
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
for i in testdataset_df.index:
acc = testdataset_df.loc[i, "acc"]
database = testdataset_df.loc[i, "database"]
acc_db = acc + "-" + database
testdata_combined_file = os.path.join(s["data_dir"], "features", "combined", database,
"{}.surr20.gaps5.combined_features.csv".format(acc))
combined_df = pd.read_csv(testdata_combined_file, index_col=0)
#######################################################################################################
# #
# Processing BO curve data for each single protein #
# #
#######################################################################################################
# SAVE LIPS PREDICTION DATA
# this is somewhat inefficient, as it is conducted for every test dataset
# LIPS_pred_csv = os.path.join(os.path.dirname(s["data_dir"]), "features", "Predictions", "testset_trainset", database, "{}.LIPS_pred.csv".format(acc, testsetname))
LIPS_pred_csv = Path(s["data_dir"]) / f"results/{testsetname}/predictions/{LIPS_name}/{database}.{acc}.{LIPS_name}.csv"
LIPS_pred_df = combined_df[["residue_name", "residue_num", "LIPS_polarity", "LIPS_entropy", "LIPS_L*E", "LIPS_surface", "LIPS_surface_ranked"]]
thoipapy.utils.make_sure_path_exists(LIPS_pred_csv, isfile=True)
LIPS_pred_df.to_csv(LIPS_pred_csv)
if pred_col == "LIPS_L*E":
combined_df[pred_col] = -1 * combined_df[pred_col]
if database == "crystal" or database == "NMR":
# (it is closest distance and low value means high propencity of interfacial)
combined_df["interface_score"] = -1 * combined_df["interface_score"]
LIPS_BO_single_prot_df = thoipapy.validation.bocurve.calc_best_overlap_from_selected_column_in_df(acc_db, combined_df, experiment_col="interface_score", pred_col=pred_col)
if LIPS_BO_single_prot_df.empty:
LIPS_BO_data_df = LIPS_BO_single_prot_df
else:
LIPS_BO_data_df = pd.concat([LIPS_BO_data_df, LIPS_BO_single_prot_df], axis=1, join="outer")
#######################################################################################################
# #
# Processing ROC data for each single protein, saving to nested dict #
# #
#######################################################################################################
df_for_roc = combined_df.dropna(subset=["interface_score"])
fpr, tpr, thresholds = roc_curve(df_for_roc.interface, df_for_roc[pred_col], drop_intermediate=False)
roc_auc = auc(fpr, tpr)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
xv_dict_LIPS[acc_db] = {"fpr": fpr, "tpr": tpr, "roc_auc": roc_auc}
#######################################################################################################
# #
# Processing BO CURVE data, saving to csv and running the BO curve analysis script #
# #
#######################################################################################################
LIPS_BO_data_df.to_csv(LIPS_BO_curve_data_csv)
names_excel_path = os.path.join(s["base_dir"], "protein_names.xlsx")
# LIPS_linechart_mean_obs_and_rand = analyse_bo_curve_underlying_data(LIPS_BO_curve_data_csv, BO_curve_folder, names_excel_path)
# parse_BO_data_csv_to_excel(LIPS_BO_curve_data_csv, BO_curve_folder, names_excel_path)
parse_BO_data_csv_to_excel(LIPS_BO_curve_data_csv, bocurve_data_xlsx, s["n_residues_AUBOC_validation"], logging)
AUC_ser = pd.Series(xv_dict_LIPS[acc_db]["roc_auc"])
AUBOC = save_BO_linegraph_and_barchart(s, bocurve_data_xlsx, BO_linechart_png, BO_barchart_png, namedict, logging, AUC_ser)
if "you_want_more_details" == "TRUE":
other_figs_path: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/other_figs"
save_extra_BO_figs(bocurve_data_xlsx, other_figs_path)
#######################################################################################################
# #
# Processing dictionary with ROC data, saving to pickle #
# #
#######################################################################################################
mean_tpr /= testdataset_df.shape[0]
mean_tpr[-1] = 1.0
mean_roc_auc = auc(mean_fpr, mean_tpr)
ROC_out_dict = {"xv_dict_THOIPA": xv_dict_LIPS}
ROC_out_dict["true_positive_rate_mean"] = mean_tpr
ROC_out_dict["false_positive_rate_mean"] = mean_fpr
ROC_out_dict["mean_roc_auc"] = mean_roc_auc
# save dict as pickle
with open(LIPS_ROC_pkl, "wb") as f:
pickle.dump(ROC_out_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
create_ROC_fig_for_testset_trainset_combination(LIPS_ROC_pkl)
logging.info("test{}.{} AUC({:.03f}), AUBOC({:.2f}). ({})".format(testsetname, LIPS_name, mean_roc_auc, AUBOC, BO_barchart_png))
def create_ROC_fig_for_testset_trainset_combination(THOIPA_ROC_pkl):
# plt.rcParams.update({'font.size': 6})
ROC_pkl_basename = os.path.basename(THOIPA_ROC_pkl)[:-4]
ROC_pkl_dir = os.path.dirname(THOIPA_ROC_pkl)
ROC_png = os.path.join(ROC_pkl_dir, "{}.ROC.png".format(ROC_pkl_basename))
thoipapy.utils.make_sure_path_exists(ROC_png, isfile=True)
# open pickle file
with open(THOIPA_ROC_pkl, "rb") as f:
ROC_out_dict = pickle.load(f)
xv_dict_THOIPA = ROC_out_dict["xv_dict_THOIPA"]
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
for acc_db in xv_dict_THOIPA:
roc_auc = xv_dict_THOIPA[acc_db]["roc_auc"]
ax.plot(xv_dict_THOIPA[acc_db]["fpr"], xv_dict_THOIPA[acc_db]["tpr"], lw=1, label='{} ({:0.2f})'.format(acc_db, roc_auc), alpha=0.8)
# mean_roc_auc = auc(df_xv["false_positive_rate"], df_xv["true_positive_rate"])
mean_roc_auc = ROC_out_dict["mean_roc_auc"]
ax.plot(ROC_out_dict["false_positive_rate_mean"], ROC_out_dict["true_positive_rate_mean"], color="k", label='mean (area = %0.2f)' % mean_roc_auc, lw=1.5)
ax.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='random')
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
ax.set_xlabel("False positive rate")
ax.set_ylabel("True positive rate")
ax.legend(loc="lower right")
fig.tight_layout()
fig.savefig(ROC_png, dpi=240)
# fig.savefig(thoipapy.utils.pdf_subpath(ROC_png))
def save_THOIPA_pred_indiv_prot(s, model_pkl, testdata_combined_file, THOIPA_pred_csv, test_combined_incl_pred, trainsetname, logging):
combined_incl_THOIPA_df = | pd.read_csv(testdata_combined_file, sep=',', engine='python', index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
@file
@brief Defines a streaming dataframe.
"""
import pickle
import os
from io import StringIO, BytesIO
from inspect import isfunction
import numpy
import numpy.random as nrandom
import pandas
from pandas.testing import assert_frame_equal
from pandas.io.json import json_normalize
from .dataframe_split import sklearn_train_test_split, sklearn_train_test_split_streaming
from .dataframe_io_helpers import enumerate_json_items, JsonIterator2Stream
class StreamingDataFrameSchemaError(Exception):
"""
Reveals an issue with inconsistant schemas.
"""
pass
class StreamingDataFrame:
"""
Defines a streaming dataframe.
The goal is to reduce the memory footprint.
The class takes a function which creates an iterator
on :epkg:`dataframe`. We assume this function can
be called multiple time. As a matter of fact, the
function is called every time the class needs to walk
through the stream with the following loop:
::
for df in self: # self is a StreamingDataFrame
# ...
The constructor cannot receive an iterator otherwise
this class would be able to walk through the data
only once. The main reason is it is impossible to
:epkg:`*py:pickle` (or :epkg:`dill`)
an iterator: it cannot be replicated.
Instead, the class takes a function which generates
an iterator on :epkg:`DataFrame`.
Most of the methods returns either a :epkg:`DataFrame`
either a @see cl StreamingDataFrame. In the second case,
methods can be chained.
By default, the object checks that the schema remains
the same between two chunks. This can be disabled
by setting *check_schema=False* in the constructor.
The user should expect the data to remain stable.
Every loop should produce the same data. However,
in some situations, it is more efficient not to keep
that constraints. Draw a random @see me sample
is one of these cases.
:param iter_creation: function which creates an iterator or an
instance of @see cl StreamingDataFrame
:param check_schema: checks that the schema is the same
for every :epkg:`dataframe`
:param stable: indicates if the :epkg:`dataframe` remains the same
whenever it is walked through
"""
def __init__(self, iter_creation, check_schema=True, stable=True):
self._delete_ = []
if isinstance(iter_creation, (pandas.DataFrame, dict,
numpy.ndarray, str)):
raise TypeError(
"Unexpected type %r for iter_creation. It must "
"be an iterator." % type(iter_creation))
if isinstance(iter_creation, StreamingDataFrame):
self.iter_creation = iter_creation.iter_creation
self.stable = iter_creation.stable
else:
self.iter_creation = iter_creation
self.stable = stable
self.check_schema = check_schema
def is_stable(self, do_check=False, n=10):
"""
Tells if the :epkg:`dataframe` is supposed to be stable.
@param do_check do not trust the value sent to the constructor
@param n number of rows used to check the stability,
None for all rows
@return boolean
*do_check=True* means the methods checks the first
*n* rows remains the same for two iterations.
"""
if do_check:
for i, (a, b) in enumerate(zip(self, self)):
if n is not None and i >= n:
break
try:
| assert_frame_equal(a, b) | pandas.testing.assert_frame_equal |
"""Transform signaling data to smoothed trajectories."""
import sys
import numpy
import pandas as pd
import geopandas as gpd
import shapely.geometry
import matplotlib.patches
import matplotlib.pyplot as plt
import mobilib.voronoi
SAMPLING = pd.Timedelta('00:01:00')
STD = | pd.Timedelta('00:05:00') | pandas.Timedelta |
import pandas as pd
import os
import re
import pprint
import shutil
# Clean all the obvious typos
corrections ={'BAUGHWJV':'BAUGHMAN',
'BOHNE':'BOEHNE',
'EISEMENGER':'EISENMENGER',
'GEITHER':'GEITHNER',
'KIMBREL':'KIMEREL',
'MATTINGLY': 'MATTLINGLY',
'FORESTALL':'FORRESTAL',
'GRENSPAN':'GREENSPAN',
'GREESPAN':'GREENSPAN',
'GREENPSAN':'GREENSPAN',
'GREENSPAN,':'GREENSPAN',
'GREENPAN':'GREENSPAN',
'McANDREWS':'MCANDREWS',
'MCDONUGH':'MCDONOUGH',
'MOSCOW':'MOSKOW',
'MORRIS':'MORRRIS',
'MONHOLLAN':'MONHOLLON',
'MILIER':'MILLER',
'MILER':'MILLER',
'SCWLTZ':'SCHULTZ',
'SCHELD':'SCHIELD',
'WILLZAMS':'WILLIAMS',
'WALLJCH':'WALLICH',
'VOLCKFR':'VOLCKER',
'VOLCRER':'VOLKER',
'ALLISON for':'ALLISON',
'ALTMA"':'ALTMANN',
'B A U G W':'BAUGW',
'BIES (as read by Ms':'BIES',
'BLACK &':'BLACK',
'MAYO/MR':'MAYO',
'Greene':"GREENE",
'CROSS,':'CROSS',
'GREENSPAN,':'GREENSPAN',
'HOSKINS,':'HOSKINS',
'MACCLAURY':'MACLAURY',
'MORRRIS':'MORRIS',
"O'CONNELL":'O’CONNELL',
'SOLOMON]':'SOLOMON',
'TRUMAN-':'TRUMAN',
'VOLCKER,':'VOLCKER',
'VOLKER,':'VOLCKER',
'WALLlCH':'WALLICH',
'[BALLES]':'BALLES',
'[GARDNER]':'GARDNER',
'[KICHLINE]?':'KICHLINE',
'[PARDEE]':'PARDEE',
'[ROOS]':'ROOS',
'[STERN':'STERN',
'[WILLES]':'WILLES',
'ŞAHIN':'SAHIN',
'[STERN(?)':'STERN',
'[STERN]':'STERN',
'GRALEY':'GRAMLEY',
'ALTMA”':'ALTMANN'}
def name_corr(val):
sentence=""
dictkeys=[key for key, value in corrections.items()]
if val in dictkeys:
val = corrections[val]
else:
if re.match(".*\(\?\)",val):
val = re.search("(.*)(\(\?\))",val)[1]
if val in dictkeys:
val = corrections[val]
if len(val.split(" "))>1:
#print(val.split(" ")[0])
#print(val.split(" ")[1:])
sentencehelp = " ".join(val.split(" ")[1:])
if not len(re.findall("Yes",sentencehelp))>7:
if len(sentencehelp)>10:
sentence = sentencehelp
#print(sentence)
val = val.split(" ")[0]
if val in dictkeys:
val = corrections[val]
#print(val)
return val,sentence
def get_interjections():
base_directory = base_directory = "../../../collection/python/data/transcript_raw_text"
raw_doc = os.listdir(base_directory)
filelist = sorted(raw_doc)
documents = []
if os.path.exists("../output/speaker_data"):
shutil.rmtree("../output/speaker_data")
os.mkdir("../output/speaker_data")
for doc_path in filelist:
with open("{}/{}".format(base_directory,doc_path),'r') as f:
documents.append(f.read().replace("\n"," ").replace(":",".").replace(r"\s\s+"," "))
date = pd.Series(data=filelist).apply(lambda x: x[0:10])
#print(date)
parsed_text = pd.DataFrame()
for doc_index in range(len(documents)):
if doc_index%10 == 0:
print("Working on producing interjections for doc #{} of ~{}".format(doc_index,len(documents)))
#THIS METRIC FAILES FOR 59 out of 4857 occurances
interjections = re.split(' MR\. | MS\. | CHAIRMAN | VICE CHAIRMAN ', documents[doc_index])[1:]
temp_df = pd.DataFrame(columns=['Date','Speaker','content'],index=range(len(interjections)))
#Temporary data frame
for j in range(len(interjections)):
interjection = interjections[j]
temp_df['Date'].loc[j] = date[doc_index]
#speaker = "".join([char for char in if char.isalnum()])
speakercontent = interjection.split('.')[0].strip()
name,sentence = name_corr(speakercontent)
content = ''.join(interjection.split('.')[1:])
if not sentence=="":
content = sentence +" "+content
#print(content)
temp_df['Speaker'].loc[j] = name
temp_df['content'].loc[j] = content
parsed_text = pd.concat([parsed_text,temp_df],ignore_index=True)
parsed_text.to_pickle("parsed_text.pkl")
parsed_text = pd.read_pickle("parsed_text.pkl")
#speakerlist = sorted(parsed_text["Speaker"].unique().tolist())
# Get names of indexes for which we have an unidentified speaker and drop those
indexNames = parsed_text[ (parsed_text['Speaker'] == 'mY0') | (parsed_text['Speaker'] == 'WL”') | (parsed_text['Speaker'] == 'W') | (parsed_text['Speaker'] == 'AL"N') ].index
parsed_text.drop(indexNames , inplace=True)
parsed_text["content"] = parsed_text["content"].apply(lambda x: " ".join(str(x).split()[1:]) if len(str(x).split())>1 and str(x).split()[0]=="LINDSEY" else x)
parsed_text["Speaker"] = parsed_text["Speaker"].apply(lambda x: "LINDSEY" if x=="D" else x)
# Delete content with a check for presence of members.
#parsed_text['check']=parsed_text['content'].apply(lambda x: len(re.findall("Yes",x)))
#parsed_text['d_presence']=parsed_text['check']>7
parsed_text.to_csv("../output/interjections.csv",index=False)
return parsed_text
'''
The FOMC Transcript is split into 2 sections:
1)Economic Discussion, 2) Policy Discussion
This function tags each interjection by an FOMC member with their assosiated FOMC discussion
'''
def tag_interjections_with_section(interjection_df):
separation_df = pd.read_excel("../data/Separation.xlsx")
meeting_df = pd.read_csv("../../../derivation/python/output/meeting_derived_file.csv")
separation_df = separation_df.rename(columns={separation_df.columns[0]:"date_string"})
separation_df.date_string = separation_df.date_string.apply(str)
separation_df['Date'] = pd.to_datetime(separation_df.date_string,format="%Y%m")
interjection_df['Date'] = pd.to_datetime(interjection_df['Date'])
interjection_df = interjection_df[(interjection_df.Date>pd.to_datetime("1987-07-31"))&
(interjection_df.Date<pd.to_datetime("2006-02-01"))]
cc_df = meeting_df[meeting_df.event_type=="Meeting"]
print(cc_df)
cc_df['Date'] = pd.to_datetime(cc_df['start_date'])
cc_df['end_date'] = pd.to_datetime(cc_df['end_date'])
interjection_df = interjection_df[interjection_df['Date'].isin(cc_df['Date'])]
interjection_df = pd.merge(interjection_df,cc_df[['Date','end_date']],on="Date",how="left")
interjection_df['date_string'] = interjection_df.end_date.\
apply(lambda x: x.strftime("%Y%m")).apply(str)
separation_df['date_ind'] = separation_df.date_string.astype(int)
separation_df = separation_df.set_index('date_ind')
meeting_groups = interjection_df.groupby("Date")
tagged_interjections = | pd.DataFrame(columns=interjection_df.columns) | pandas.DataFrame |
from EL.models import resnet
import os
from EL import CONSTS
import torch.nn as nn
from torchvision import transforms
import torch
from sacred import Experiment
import argparse
import numpy as np
from EL.data.data import ChexpertDataset
from EL.models.models import SenderChexpert, ReceiverChexpert
from EL.utils.utils import dump_sender_receiver
from EL.experiments import Trainer
import pandas as pd
from captum.attr import NeuronConductance
LOG_DIR_PATH = os.path.join(CONSTS.RESULTS_DIR, 'logs')
# PLOT_DIR = CONSTS.OUTPUT_DIR
ex = Experiment('EL')
# ex.observers.append(FileStorageObserver(LOG_DIR_PATH))
@ex.config
def config():
batch_size = 100
img_size_x = 224
img_size_y = 224
exp_name = 'chexpert_pleural_game_gs_scratch'
gpu = 0
max_len = 3
embed_dim = 75
lr = 1e-3
hidden_size = 100
vocab_size = 100
temperature = 1
@ex.named_config
def gs():
vocab_size = 100
hidden_size = 50
temperature = 1
exp_name = 'chexpert_pleural_game_gs'
@ex.named_config
def rnn():
embed_dim = 75
vocab_size = 100
hidden_size = 100
temperature = 0.7
max_len = 4
exp_name = 'chexpert_pleural_game_rnn'
def loss(sender_input, _message, _receiver_input, receiver_output, _labels):
loss = nn.CrossEntropyLoss()(receiver_output, _labels)
acc = (torch.argmax(receiver_output, 1) == _labels).sum().item()
return loss, {'acc': acc/len(_labels)}
@ex.automain
def main(_run):
# ===============
# INTRO
# ===============
args = argparse.Namespace(**_run.config)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if args.gpu > -1:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if args.gpu < 0:
device = torch.device("cpu")
else:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data_transforms = {
'test': transforms.Compose([
transforms.Resize((args.img_size_x, 224)),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3, 1, 1))
]),
}
test_dataset = ChexpertDataset(os.path.join(CONSTS.DATA_DIR, 'CheXpert', 'test_pleural.csv'),
root_dir=CONSTS.DATA_DIR, transform=data_transforms['test'])
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
# exp_name = 'chexpert_plueral'
# model_path = os.path.join(CONSTS.RESULTS_DIR, 'models', exp_name, 'best_model.pth')
model = resnet.resnet50(pretrained=False)
sender = SenderChexpert(model=model, output_size=args.vocab_size)
receiver = ReceiverChexpert(input_size=args.hidden_size)
model_path = os.path.join(CONSTS.RESULTS_DIR, 'models', args.exp_name)
if not os.path.exists(model_path):
os.makedirs(model_path)
output_dir = os.path.join(CONSTS.RESULTS_DIR, 'outputs', args.exp_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
tensorboard_path = os.path.join(CONSTS.RESULTS_DIR, 'logs', 'tensorboard', args.exp_name)
if not os.path.exists(tensorboard_path):
os.makedirs(tensorboard_path)
run_type = None
if len(_run.meta_info['options']['UPDATE']) != 0:
run_type = _run.meta_info['options']['UPDATE'][0]
if run_type == 'rnn':
from EL.models.multi_symbol_gs import RnnSenderGS, RnnReceiverGS, SenderReceiverRnnGS
sender = RnnSenderGS(agent=sender, cell='lstm', max_len=args.max_len, embed_dim=args.embed_dim,
force_eos=True, vocab_size=args.vocab_size, hidden_size=args.hidden_size,
temperature=args.temperature, straight_through=False, trainable_temperature=False)
receiver = RnnReceiverGS(agent=receiver, cell='lstm', embed_dim=args.embed_dim, vocab_size=args.vocab_size,
hidden_size=args.hidden_size)
game = SenderReceiverRnnGS(sender=sender, receiver=receiver, loss=loss, length_cost=0.0)
else:
from EL.models.single_symbol_gs import GumbelSoftmaxWrapper, SymbolReceiverWrapper, SymbolGameGS
sender = GumbelSoftmaxWrapper(sender, temperature=args.temperature) # wrapping into a GS interface, requires GS temperature
receiver = SymbolReceiverWrapper(receiver, args.vocab_size, agent_input_size=args.hidden_size)
game = SymbolGameGS(sender, receiver, loss)
checkpoint = torch.load(os.path.join(model_path, 'best_model.pth'))
game.load_state_dict(checkpoint)
game.to(device)
optimizer = torch.optim.Adam(game.parameters(), lr=1e-3)
trainer = Trainer(
game=game, optimizer=optimizer, train_data=test_loader, train_batches_per_epoch=None,
validation_data=test_loader, val_batches_per_epoch=None)
trainer.eval()
sender_inputs, messages, receiver_inputs, receiver_outputs, labels = \
dump_sender_receiver(game=game, dataset=test_loader, gs=True, variable_length=False, device=device)
msgs = []
for m in messages:
msgs.append(int(m.cpu()))
predictions = []
for pred in receiver_outputs:
predictions.append(int(torch.argmax(pred).cpu()))
lbls = []
for l in labels:
lbls.append(int(l.cpu()))
df = | pd.DataFrame({'ID': test_dataset.img_paths, 'Ground Truth': lbls, 'Predictions': predictions, 'Message': msgs}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# /home/smokybobo/opt/repos/git/personal/loadlimit/test/unit/stat/test_tmp.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Tempy"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
import asyncio
from collections import defaultdict, namedtuple
from concurrent.futures import ThreadPoolExecutor
from functools import partial
# Third-party imports
import pandas as pd
from pandas import DataFrame, Series, Timestamp
import pytest
import uvloop
# Local imports
from loadlimit.core import BaseLoop
from loadlimit.event import NoEventTasksError, timedata, shutdown
from loadlimit.stat import timecoro
from loadlimit.util import aiter
# ============================================================================
# Globals
# ============================================================================
pytestmark = pytest.mark.usefixtures('testlogging')
# ============================================================================
# Helpers
# ============================================================================
class Period(defaultdict):
"""Store time series data by key"""
def __init__(self, *args, **kwargs):
super().__init__(list, *args, **kwargs)
self.numdata = 0
self.start_date = None
self.end_date = None
def total(self):
"""Calculate the total number of data points are stored"""
ret = sum(len(s) for slist in self.values()
for s in slist)
self.numdata = ret
return ret
async def atotal(self):
"""Async total calculator"""
ret = 0
async for slist in aiter(self.values()):
async for s in aiter(slist):
ret = ret + len(s)
self.numdata = ret
return ret
def dataframe(self, key, startind=0):
"""Create a dataframe from a stored list of series"""
slist = self[key]
index = list(range(startind, startind + len(slist)))
return DataFrame(slist, index=index)
def clearvals(self, key=None):
"""Clear list of given key
If key is None, clears list of all keys.
"""
if key is not None:
self[key] = []
else:
for key in self:
self[key] = []
self.numdata = 0
async def aclearvals(self, key=None):
"""Async version of clearvals()"""
if key is not None:
self[key] = []
else:
async for key in aiter(self):
self[key] = []
self.numdata = 0
def hdf5_results(store, statsdict):
"""Create results from hdf5 store"""
# Dates
start = statsdict.start_date
end = statsdict.end_date
# Duration (in seconds)
duration = (end - start).total_seconds()
results = {}
index = ['Total', 'Median', 'Average', 'Min', 'Max', 'Rate']
ResultType = namedtuple('ResultType', [n.lower() for n in index])
for name in statsdict:
key = 'timeseries/{}'.format(name)
# Number of iterations
storeobj = store.get_storer(key)
numiter = storeobj.nrows
df = store[key]
delta = df['delta']
r = [numiter]
for val in [delta.median(), delta.mean(), delta.min(),
delta.max()]:
r.append(val.total_seconds() * 1000)
r.append(numiter / duration)
r = ResultType(*r)
results[name] = Series(r, index=index)
dfindex = list(sorted(results, key=lambda k: k))
vals = [results[v] for v in dfindex]
df = DataFrame(vals, index=dfindex)
print(df)
# print(df.info())
def memory_results(statsdict):
"""Create results from hdf5 store"""
# key = 'timeseries/{}'.format(name)
# Dates
start = statsdict.start_date
end = statsdict.end_date
# Duration (in seconds)
duration = (end - start).total_seconds()
results = {}
index = ['Total', 'Median', 'Average', 'Min', 'Max', 'Rate']
ResultType = namedtuple('ResultType', [n.lower() for n in index])
# Number of iterations
# storeobj = store.get_storer(key)
# numiter = storeobj.nrows
for name, slist in statsdict.items():
numiter = len(slist)
df = DataFrame(slist, index=list(range(numiter)))
delta = df['delta']
numiter = len(slist)
r = [numiter]
for val in [delta.median(), delta.mean(), delta.min(),
delta.max()]:
r.append(val.total_seconds() * 1000)
r.append(numiter / duration)
r = ResultType(*r)
results[name] = Series(r, index=index)
dfindex = list(sorted(results, key=lambda k: k))
vals = [results[v] for v in dfindex]
df = | DataFrame(vals, index=dfindex) | pandas.DataFrame |
import functools
import numpy as np
import scipy
import scipy.linalg
import scipy
import scipy.sparse as sps
import scipy.sparse.linalg as spsl
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import logging
import tables as tb
import os
import sandy
import pytest
pd.options.display.float_format = '{:.5e}'.format
__author__ = "<NAME>"
__all__ = [
"CategoryCov",
"EnergyCov",
"triu_matrix",
"corr2cov",
"random_corr",
"random_cov",
"sample_distribution",
]
S = np.array([[1, 1, 1],
[1, 2, 1],
[1, 3, 1]])
var = np.array([[0, 0, 0],
[0, 2, 0],
[0, 0, 3]])
minimal_covtest = pd.DataFrame(
[[9437, 2, 1e-2, 9437, 2, 1e-2, 0.02],
[9437, 2, 2e5, 9437, 2, 2e5, 0.09],
[9437, 2, 1e-2, 9437, 102, 1e-2, 0.04],
[9437, 2, 2e5, 9437, 102, 2e5, 0.05],
[9437, 102, 1e-2, 9437, 102, 1e-2, 0.01],
[9437, 102, 2e5, 9437, 102, 2e5, 0.01]],
columns=["MAT", "MT", "E", "MAT1", "MT1", 'E1', "VAL"]
)
def cov33csv(func):
def inner(*args, **kwargs):
key = "<KEY>"
kw = kwargs.copy()
if key in kw:
if kw[key]:
print(f"found argument '{key}', ignore oher arguments")
out = func(
*args,
index_col=[0, 1, 2],
header=[0, 1, 2],
)
out.index.names = ["MAT", "MT", "E"]
out.columns.names = ["MAT", "MT", "E"]
return out
else:
del kw[key]
out = func(*args, **kw)
return out
return inner
class _Cov(np.ndarray):
"""Covariance matrix treated as a `numpy.ndarray`.
Methods
-------
corr
extract correlation matrix
corr2cov
produce covariance matrix given correlation matrix and standard
deviation array
eig
get covariance matrix eigenvalues and eigenvectors
get_L
decompose and extract lower triangular matrix
sampling
draw random samples
"""
def __new__(cls, arr):
obj = np.ndarray.__new__(cls, arr.shape, float)
obj[:] = arr[:]
if not obj.ndim == 2:
raise sandy.Error("covariance matrix must have two dimensions")
if not np.allclose(obj, obj.T):
raise sandy.Error("covariance matrix must be symmetric")
if (np.diag(arr) < 0).any():
raise sandy.Error("covariance matrix must have positive variances")
return obj
@staticmethod
def _up2down(self):
U = np.triu(self)
L = np.triu(self, 1).T
C = U + L
return C
def eig(self):
"""
Extract eigenvalues and eigenvectors.
Returns
-------
`Pandas.Series`
real part of eigenvalues sorted in descending order
`np.array`
matrix of eigenvectors
"""
E, V = scipy.linalg.eig(self)
E, V = E.real, V.real
return E, V
def corr(self):
"""Extract correlation matrix.
.. note:: zeros on the covariance matrix diagonal are translated
into zeros also on the the correlation matrix diagonal.
Returns
-------
`sandy.formats.utils.Cov`
correlation matrix
"""
std = np.sqrt(np.diag(self))
with np.errstate(divide='ignore', invalid='ignore'):
coeff = np.true_divide(1, std)
coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN
corr = np.multiply(np.multiply(self.T, coeff).T, coeff)
return self.__class__(corr)
def _reduce_size(self):
"""
Reduces the size of the matrix, erasing the null values.
Returns
-------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
"""
nonzero_idxs = np.flatnonzero(np.diag(self))
cov_reduced = self[nonzero_idxs][:, nonzero_idxs]
return nonzero_idxs, cov_reduced
@classmethod
def _restore_size(cls, nonzero_idxs, cov_reduced, dim):
"""
Restore the size of the matrix
Parameters
----------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
dim : int
Dimension of the original matrix.
Returns
-------
cov : sandy.core.cov._Cov
Matrix of specified dimensions.
"""
cov = _Cov(np.zeros((dim, dim)))
for i, ni in enumerate(nonzero_idxs):
cov[ni, nonzero_idxs] = cov_reduced[i]
return cov
def sampling(self, nsmp, seed=None):
"""
Extract random samples from the covariance matrix, either using
the cholesky or the eigenvalue decomposition.
Parameters
----------
nsmp : `int`
number of samples
seed : `int`
seed for the random number generator (default is `None`)
Returns
-------
`np.array`
2D array of random samples with dimension `(self.shape[0], nsmp)`
"""
dim = self.shape[0]
np.random.seed(seed=seed)
y = np.random.randn(dim, nsmp)
nonzero_idxs, cov_reduced = self._reduce_size()
L_reduced = cov_reduced.get_L()
L = self.__class__._restore_size(nonzero_idxs, L_reduced, dim)
samples = np.array(L.dot(y))
return samples
def get_L(self):
"""
Extract lower triangular matrix `L` for which `L*L^T == self`.
Returns
-------
`np.array`
lower triangular matrix
"""
try:
L = scipy.linalg.cholesky(
self,
lower=True,
overwrite_a=False,
check_finite=False
)
except np.linalg.linalg.LinAlgError:
E, V = self.eig()
E[E <= 0] = 0
Esqrt = np.diag(np.sqrt(E))
M = V.dot(Esqrt)
Q, R = scipy.linalg.qr(M.T)
L = R.T
return L
class CategoryCov():
"""
Properties
----------
data
covariance matrix as a dataframe
size
first dimension of the covariance matrix
Methods
-------
corr2cov
create a covariance matrix given a correlation matrix and a standard
deviation vector
from_stack
create a covariance matrix from a stacked `pd.DataFrame`
from_stdev
construct a covariance matrix from a stdev vector
from_var
construct a covariance matrix from a variance vector
get_corr
extract correlation matrix from covariance matrix
get_eig
extract eigenvalues and eigenvectors from covariance matrix
get_L
extract lower triangular matrix such that $C=L L^T$
get_std
extract standard deviations from covariance matrix
invert
calculate the inverse of the matrix
sampling
extract perturbation coefficients according to chosen distribution
and covariance matrix
"""
def __repr__(self):
return self.data.__repr__()
def __init__(self, *args, **kwargs):
self.data = pd.DataFrame(*args, **kwargs)
@property
def data(self):
"""
Covariance matrix as a dataframe.
Attributes
----------
index : `pandas.Index` or `pandas.MultiIndex`
indices
columns : `pandas.Index` or `pandas.MultiIndex`
columns
values : `numpy.array`
covariance values as `float`
Returns
-------
`pandas.DataFrame`
covariance matrix
Notes
-----
..note :: In the future, another tests will be implemented to check
that the covariance matrix is symmetric and have positive variances.
Examples
--------
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array[1])
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array([[1, 2], [2, -4]]))
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array([[1, 2], [3, 4]]))
"""
return self._data
@data.setter
def data(self, data):
self._data = pd.DataFrame(data, dtype=float)
if not len(data.shape) == 2 and data.shape[0] == data.shape[1]:
raise TypeError("Covariance matrix must have two dimensions")
if not (np.diag(data) >= 0).all():
raise TypeError("Covariance matrix must have positive variance")
sym_limit = 10
# Round to avoid numerical fluctuations
if not (data.values.round(sym_limit) == data.values.T.round(sym_limit)).all():
raise TypeError("Covariance matrix must be symmetric")
@property
def size(self):
return self.data.values.shape[0]
def get_std(self):
"""
Extract standard deviations.
Returns
-------
`pandas.Series`
1d array of standard deviations
Examples
--------
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).get_std()
0 1.00000e+00
1 1.00000e+00
Name: STD, dtype: float64
"""
cov = self.to_sparse().diagonal()
std = np.sqrt(cov)
return pd.Series(std, index=self.data.index, name="STD")
def get_eig(self, tolerance=None):
"""
Extract eigenvalues and eigenvectors.
Parameters
----------
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
The replacement condition is implemented as:
.. math::
$$
\frac{e_i}{e_{MAX}} < tolerance
$$
Then, a `tolerance=1e-3` will replace all eigenvalues
1000 times smaller than the largest eigenvalue.
A `tolerance=0` will replace all negative eigenvalues.
Returns
-------
`Pandas.Series`
array of eigenvalues
`pandas.DataFrame`
matrix of eigenvectors
Notes
-----
.. note:: only the real part of the eigenvalues is preserved
.. note:: the discussion associated to the implementeation
of this algorithm is available [here](https://github.com/luca-fiorito-11/sandy/discussions/135)
Examples
--------
Extract eigenvalues of correlation matrix.
>>> sandy.CategoryCov([[1, 0.4], [0.4, 1]]).get_eig()[0]
0 1.40000e+00
1 6.00000e-01
Name: EIG, dtype: float64
Extract eigenvectors of correlation matrix.
>>> sandy.CategoryCov([[1, 0.4], [0.4, 1]]).get_eig()[1]
0 1
0 7.07107e-01 -7.07107e-01
1 7.07107e-01 7.07107e-01
Extract eigenvalues of covariance matrix.
>>> sandy.CategoryCov([[0.1, 0.1], [0.1, 1]]).get_eig()[0]
0 8.90228e-02
1 1.01098e+00
Name: EIG, dtype: float64
Set up a tolerance.
>>> sandy.CategoryCov([[0.1, 0.1], [0.1, 1]]).get_eig(tolerance=0.1)[0]
0 0.00000e+00
1 1.01098e+00
Name: EIG, dtype: float64
Test with negative eigenvalues.
>>> sandy.CategoryCov([[1, 2], [2, 1]]).get_eig()[0]
0 3.00000e+00
1 -1.00000e+00
Name: EIG, dtype: float64
Replace negative eigenvalues.
>>> sandy.CategoryCov([[1, 2], [2, 1]]).get_eig(tolerance=0)[0]
0 3.00000e+00
1 0.00000e+00
Name: EIG, dtype: float64
Check output size.
>>> cov = sandy.CategoryCov.random_cov(50, seed=11)
>>> assert cov.get_eig()[0].size == cov.data.shape[0] == 50
>>> sandy.CategoryCov([[1, 0.2, 0.1], [0.2, 2, 0], [0.1, 0, 3]]).get_eig()[0]
0 9.56764e-01
1 2.03815e+00
2 3.00509e+00
Name: EIG, dtype: float64
Real test on H1 file
>>> endf6 = sandy.get_endf6_file("jeff_33", "xs", 10010)
>>> ek = sandy.energy_grids.CASMO12
>>> err = endf6.get_errorr(ek_errorr=ek, err=1)
>>> cov = err.get_cov()
>>> cov.get_eig()[0].sort_values(ascending=False).head(7)
0 3.66411e-01
1 7.05311e-03
2 1.55346e-03
3 1.60175e-04
4 1.81374e-05
5 1.81078e-06
6 1.26691e-07
Name: EIG, dtype: float64
>>> assert not (cov.get_eig()[0] >= 0).all()
>>> assert (cov.get_eig(tolerance=0)[0] >= 0).all()
"""
E, V = scipy.linalg.eig(self.data)
E = pd.Series(E.real, name="EIG")
V = pd.DataFrame(V.real)
if tolerance is not None:
E[E/E.max() < tolerance] = 0
return E, V
def get_corr(self):
"""
Extract correlation matrix.
Returns
-------
df : :obj: `CetgoryCov`
correlation matrix
Examples
--------
>>> sandy.CategoryCov([[4, 2.4],[2.4, 9]]).get_corr()
0 1
0 1.00000e+00 4.00000e-01
1 4.00000e-01 1.00000e+00
"""
cov = self.data.values
with np.errstate(divide='ignore', invalid='ignore'):
coeff = np.true_divide(1, self.get_std().values)
coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN
corr = np.multiply(np.multiply(cov, coeff).T, coeff)
df = pd.DataFrame(
corr,
index=self.data.index,
columns=self.data.columns,
)
return self.__class__(df)
def invert(self, rows=None):
"""
Method for calculating the inverse matrix.
Parameters
----------
tables : `bool`, optional
Option to use row calculation for matrix calculations. The
default is False.
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`CategoryCov`
The inverse matrix.
Examples
--------
>>> S = sandy.CategoryCov(np.diag(np.array([1, 2, 3])))
>>> S.invert()
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
>>> S = sandy.CategoryCov(np.diag(np.array([0, 2, 3])))
>>> S.invert()
0 1 2
0 0.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
>>> S = sandy.CategoryCov(np.diag(np.array([0, 2, 3])))
>>> S.invert(rows=1)
0 1 2
0 0.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
"""
index = self.data.index
columns = self.data.columns
M_nonzero_idxs, M_reduce = reduce_size(self.data)
cov = sps.csc_matrix(M_reduce.values)
rows_ = cov.shape[0] if rows is None else rows
data = sparse_tables_inv(cov, rows=rows_)
M_inv = restore_size(M_nonzero_idxs, data, len(self.data))
M_inv = M_inv.reindex(index=index, columns=columns).fillna(0)
return self.__class__(M_inv)
def log2norm_cov(self, mu):
"""
Transform covariance matrix to the one of the underlying normal
distribution.
Parameters
----------
mu : iterable
The desired mean values of the target lognormal distribution.
Returns
-------
`CategoryCov` of the underlying normal covariance matrix
Examples
--------
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> cov.log2norm_cov(pd.Series(np.ones(cov.data.shape[0]), index=cov.data.index))
A B C
A 2.19722e+00 1.09861e+00 1.38629e+00
B 1.09861e+00 2.39790e+00 1.60944e+00
C 1.38629e+00 1.60944e+00 2.07944e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = pd.Series([1, 2, .5], index=["A", "B", "C"])
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = [1, 2, .5]
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = np.array([1, 2, .5])
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
Notes
-----
..notes:: Reference for the equation is 10.1016/j.nima.2012.06.036
.. math::
$$
cov(lnx_i, lnx_j) = \ln\left(\frac{cov(x_i,x_j)}{<x_i>\cdot<x_j>}+1\right)
$$
"""
mu_ = np.diag(1 / pd.Series(mu))
mu_ = pd.DataFrame(mu_, index=self.data.index, columns=self.data.index)
return self.__class__(np.log(self.sandwich(mu_).data + 1))
def log2norm_mean(self, mu):
"""
Transform mean values to the mean values of the undelying normal
distribution.
Parameters
----------
mu : iterable
The target mean values.
Returns
-------
`pd.Series` of the underlyig normal distribution mean values
Examples
--------
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = pd.Series(np.ones(cov.data.shape[0]), index=cov.data.index)
>>> cov.log2norm_mean(mu)
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> cov.log2norm_mean([1, 1, 1])
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = np.ones(cov.data.shape[0])
>>> cov.log2norm_mean(mu)
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
Reindexing example
"""
mu_ = pd.Series(mu)
mu_.index = self.data.index
return np.log(mu_**2 / np.sqrt(np.diag(self.data) + mu_**2))
def sampling(self, nsmp, seed=None, rows=None, pdf='normal',
tolerance=None, relative=True):
"""
Extract perturbation coefficients according to chosen distribution with
covariance from given covariance matrix. See note for non-normal
distribution sampling.
The samples' mean will be 1 or 0 depending on `relative` kwarg.
Parameters
----------
nsmp : `int`
number of samples.
seed : `int`, optional, default is `None`
seed for the random number generator (by default use `numpy`
dafault pseudo-random number generator).
rows : `int`, optional, default is `None`
option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
pdf : `str`, optional, default is 'normal'
random numbers distribution.
Available distributions are:
* `'normal'`
* `'uniform'`
* `'lognormal'`
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
relative : `bool`, optional, default is `True`
flag to switch between relative and absolute covariance matrix
handling
* `True`: samples' mean will be 1
* `False`: samples' mean will be 0
Returns
-------
`sandy.Samples`
object containing samples
Notes
-----
.. note:: sampling with uniform distribution is performed on
diagonal covariance matrix, neglecting all correlations.
.. note:: sampling with lognormal distribution gives a set of samples
with mean=1 as lognormal distribution can not have mean=0.
Therefore, `relative` parameter does not apply to it.
Examples
--------
Draw 3 sets of samples using custom seed:
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(3, seed=11)
0 1
0 -7.49455e-01 -2.13159e+00
1 1.28607e+00 1.10684e+00
2 1.48457e+00 9.00879e-01
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(3, seed=11, rows=1)
0 1
0 -7.49455e-01 -2.13159e+00
1 1.28607e+00 1.10684e+00
2 1.48457e+00 9.00879e-01
>>> sample = sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(1000000, seed=11)
>>> sample.data.cov()
0 1
0 9.98662e-01 3.99417e-01
1 3.99417e-01 9.98156e-01
Small negative eigenvalue:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(3, seed=11, tolerance=0)
0 1
0 2.74945e+00 5.21505e+00
1 7.13927e-01 1.07147e+00
2 5.15435e-01 1.64683e+00
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, tolerance=0).data.cov()
0 1
0 9.98662e-01 -1.99822e-01
1 -1.99822e-01 2.99437e+00
Sampling with different `pdf`:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(3, seed=11, pdf='uniform', tolerance=0)
0 1
0 -1.07578e-01 2.34960e+00
1 -6.64587e-01 5.21222e-01
2 8.72585e-01 9.12563e-01
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(3, seed=11, pdf='lognormal', tolerance=0)
0 1
0 3.03419e+00 1.57919e+01
1 5.57248e-01 4.74160e-01
2 4.72366e-01 6.50840e-01
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0).data.cov()
0 1
0 1.00042e+00 -1.58806e-03
1 -1.58806e-03 3.00327e+00
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0).data.cov()
0 1
0 1.00219e+00 1.99199e-01
1 1.99199e-01 3.02605e+00
`relative` kwarg usage:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='normal', tolerance=0, relative=True).data.mean(axis=0)
0 1.00014e+00
1 9.99350e-01
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='normal', tolerance=0, relative=False).data.mean(axis=0)
0 1.41735e-04
1 -6.49679e-04
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0, relative=True).data.mean(axis=0)
0 9.98106e-01
1 9.99284e-01
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0, relative=False).data.mean(axis=0)
0 -1.89367e-03
1 -7.15929e-04
dtype: float64
Lognormal distribution sampling indeoendency from `relative` kwarg
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0, relative=True).data.mean(axis=0)
0 9.99902e-01
1 9.99284e-01
dtype: float64
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0, relative=False).data.mean(axis=0)
0 9.99902e-01
1 9.99284e-01
dtype: float64
"""
dim = self.data.shape[0]
pdf_ = pdf if pdf != 'lognormal' else 'normal'
y = sample_distribution(dim, nsmp, seed=seed, pdf=pdf_) - 1
y = sps.csc_matrix(y)
# the covariance matrix to decompose is created depending on the chosen
# pdf
if pdf == 'uniform':
to_decompose = self.__class__(np.diag(np.diag(self.data)))
elif pdf == 'lognormal':
ones = np.ones(self.data.shape[0])
to_decompose = self.log2norm_cov(ones)
else:
to_decompose = self
L = sps.csr_matrix(to_decompose.get_L(rows=rows,
tolerance=tolerance))
samples = pd.DataFrame(L.dot(y).toarray(), index=self.data.index,
columns=list(range(nsmp)))
if pdf == 'lognormal':
# mean value of lognormally sampled distributions will be one by
# defaul
samples = np.exp(samples.add(self.log2norm_mean(ones), axis=0))
elif relative:
samples += 1
return sandy.Samples(samples.T)
@classmethod
def from_var(cls, var):
"""
Construct the covariance matrix from the variance vector.
Parameters
----------
var : 1D iterable
Variance vector.
Returns
-------
`CategoryCov`
Object containing the covariance matrix.
Example
-------
>>> S = pd.Series(np.array([0, 2, 3]), index=pd.Index([1, 2, 3]))
>>> cov = sandy.CategoryCov.from_var(S)
>>> cov
1 2 3
1 0.00000e+00 0.00000e+00 0.00000e+00
2 0.00000e+00 2.00000e+00 0.00000e+00
3 0.00000e+00 0.00000e+00 3.00000e+00
>>> assert type(cov) is sandy.CategoryCov
>>> S = sandy.CategoryCov.from_var((1, 2, 3))
>>> S
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 2.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 3.00000e+00
>>> assert type(S) is sandy.CategoryCov
>>> assert type(sandy.CategoryCov.from_var([1, 2, 3])) is sandy.CategoryCov
"""
var_ = pd.Series(var)
cov_values = sps.diags(var_.values).toarray()
cov = pd.DataFrame(cov_values,
index=var_.index, columns=var_.index)
return cls(cov)
@classmethod
def from_stdev(cls, std):
"""
Construct the covariance matrix from the standard deviation vector.
Parameters
----------
std : `pandas.Series`
Standard deviations vector.
Returns
-------
`CategoryCov`
Object containing the covariance matrix.
Example
-------
>>> S = pd.Series(np.array([0, 2, 3]), index=pd.Index([1, 2, 3]))
>>> cov = sandy.CategoryCov.from_stdev(S)
>>> cov
1 2 3
1 0.00000e+00 0.00000e+00 0.00000e+00
2 0.00000e+00 4.00000e+00 0.00000e+00
3 0.00000e+00 0.00000e+00 9.00000e+00
>>> assert type(cov) is sandy.CategoryCov
>>> S = sandy.CategoryCov.from_stdev((1, 2, 3))
>>> S
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 4.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 9.00000e+00
>>> assert type(S) is sandy.CategoryCov
>>> assert type(sandy.CategoryCov.from_stdev([1, 2, 3])) is sandy.CategoryCov
"""
std_ = pd.Series(std)
var = std_ * std_
return cls.from_var(var)
@classmethod
def from_stack(cls, data_stack, index, columns, values, rows=10000000,
kind='upper'):
"""
Create a covariance matrix from a stacked dataframe.
Parameters
----------
data_stack : `pd.Dataframe`
Stacked dataframe.
index : 1D iterable, optional
Index of the final covariance matrix.
columns : 1D iterable, optional
Columns of the final covariance matrix.
values : `str`, optional
Name of the column where the values are located.
rows : `int`, optional
Number of rows to take into account into each loop. The default
is 10000000.
kind : `str`, optional
Select if the stack data represents upper or lower triangular
matrix. The default is 'upper.
Returns
-------
`sandy.CategoryCov`
Covarinace matrix.
Examples
--------
If the stack data represents the covariance matrix:
>>> S = pd.DataFrame(np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]]))
>>> S = S.stack().reset_index().rename(columns = {'level_0': 'dim1', 'level_1': 'dim2', 0: 'cov'})
>>> S = S[S['cov'] != 0]
>>> sandy.CategoryCov.from_stack(S, index=['dim1'], columns=['dim2'], values='cov', kind='all')
dim2 0 1 2
dim1
0 1.00000e+00 1.00000e+00 1.00000e+00
1 1.00000e+00 2.00000e+00 1.00000e+00
2 1.00000e+00 1.00000e+00 1.00000e+00
If the stack data represents only the upper triangular part of the
covariance matrix:
>>> test_1 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT", "MT", "E"], columns=["MAT1", "MT1", "E1"], values='VAL').data
>>> test_1
MAT1 9437
MT1 2 102
E1 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT MT E
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> test_2 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT", "MT", "E"], columns=["MAT1", "MT1", "E1"], values='VAL', rows=1).data
>>> test_2
MAT1 9437
MT1 2 102
E1 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT MT E
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> assert (test_1 == test_2).all().all()
If the stack data represents only the lower triangular part of the
covariance matrix:
>>> test_1 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT1", "MT1", "E1"], columns=["MAT", "MT", "E"], values='VAL', kind="lower").data
>>> test_1
MAT 9437
MT 2 102
E 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT1 MT1 E1
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> test_2 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT1", "MT1", "E1"], columns=["MAT", "MT", "E"], values='VAL', kind="lower", rows=1).data
>>> test_2
MAT 9437
MT 2 102
E 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT1 MT1 E1
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> assert (test_1 == test_2).all().all()
"""
cov = segmented_pivot_table(data_stack, rows=rows, index=index,
columns=columns, values=values)
if kind == 'all':
return cls(cov)
else:
return triu_matrix(cov, kind=kind)
def _gls_Vy_calc(self, S, rows=None):
"""
2D calculated output using
.. math::
$$
S\cdot V_{x_{prior}}\cdot S.T
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`pd.DataFrame`
Covariance matrix `Vy_calc` calculated using
S.dot(Vx_prior).dot(S.T)
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> cov._gls_Vy_calc(S)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
>>> cov._gls_Vy_calc(S, rows=1)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
"""
index = pd.DataFrame(S).index
S_ = pd.DataFrame(S).values
rows_ = S_.shape[0] if rows is None else rows
Vy_calc = sparse_tables_dot_multiple([S_, self.data.values,
S_.T], rows=rows_)
return pd.DataFrame(Vy_calc, index=index, columns=index)
def _gls_G(self, S, Vy_extra=None, rows=None):
"""
2D calculated output using
.. math::
$$
S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
Vy_extra : 2D iterable, optional.
2D covariance matrix for y_extra (MXM).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`pd.DataFrame`
Covariance matrix `G` calculated using
S.dot(Vx_prior).dot(S.T) + Vy_extra
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov._gls_G(S, Vy)
0 1
0 6.00000e+00 1.10000e+01
1 1.10000e+01 2.60000e+01
>>> cov._gls_G(S, Vy, rows=1)
0 1
0 6.00000e+00 1.10000e+01
1 1.10000e+01 2.60000e+01
>>> cov._gls_G(S)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
>>> cov._gls_G(S, rows=1)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
"""
# GLS_sensitivity:
Vy_calc = self._gls_Vy_calc(S, rows=rows)
if Vy_extra is not None:
# Data in a appropriate format
Vy_extra_ = sandy.CategoryCov(Vy_extra).data
index = pd.DataFrame(Vy_extra).index
Vy_extra_ = Vy_extra_.values
Vy_calc = Vy_calc.reindex(index=index, columns=index).fillna(0).values
# Calculations:
Vy_calc = sps.csr_matrix(Vy_calc)
Vy_extra_ = sps.csr_matrix(Vy_extra_)
# G calculation
G = Vy_calc + Vy_extra_
G = pd.DataFrame(G.toarray(), index=index, columns=index)
else:
G = Vy_calc
return G
def _gls_G_inv(self, S, Vy_extra=None, rows=None):
"""
2D calculated output using
.. math::
$$
\left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
Vy_extra : 2D iterable, optional
2D covariance matrix for y_extra (MXM).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`pd.DataFrame`
Covariance matrix `G_inv` calculated using
(S.dot(Vx_prior).dot(S.T) + Vy_extra)^-1
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov._gls_G_inv(S, Vy)
0 1
0 7.42857e-01 -3.14286e-01
1 -3.14286e-01 1.71429e-01
>>> cov._gls_G_inv(S, Vy, rows=1)
0 1
0 7.42857e-01 -3.14286e-01
1 -3.14286e-01 1.71429e-01
>>> cov._gls_G_inv(S)
0 1
0 6.25000e+00 -2.75000e+00
1 -2.75000e+00 1.25000e+00
>>> cov._gls_G_inv(S, rows=1)
0 1
0 6.25000e+00 -2.75000e+00
1 -2.75000e+00 1.25000e+00
"""
if Vy_extra is not None:
index = pd.DataFrame(Vy_extra).index
G = self._gls_G(S, Vy_extra=Vy_extra, rows=rows).values
else:
index = pd.DataFrame(S).index
G = self._gls_Vy_calc(S, rows=rows).values
G_inv = sandy.CategoryCov(G).invert(rows=rows).data.values
return pd.DataFrame(G_inv, index=index, columns=index)
def _gls_general_sensitivity(self, S, Vy_extra=None,
rows=None, threshold=None):
"""
Method to obtain general sensitivity according to GLS
.. math::
$$
V_{x_{prior}}\cdot S.T \cdot \left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
Vy_extra : 2D iterable
2D covariance matrix for y_extra (MXM).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
threshold : `int`, optional
threshold to avoid numerical fluctuations. The default is None.
Returns
-------
`GLS`
GLS sensitivity for a given Vy_extra and S.
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov._gls_general_sensitivity(S, Vy)
0 1
0 -2.00000e-01 2.00000e-01
1 2.28571e-01 5.71429e-02
>>> S = pd.DataFrame([[1, 2], [3, 4]], index=[1, 2],columns=[3, 4])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = pd.DataFrame([[1, 0], [0, 1]], index=[1, 2], columns=[1, 2])
>>> cov._gls_general_sensitivity(S, Vy_extra=Vy)
1 2
3 -2.00000e-01 2.00000e-01
4 2.28571e-01 5.71429e-02
>>> cov._gls_general_sensitivity(S, Vy_extra=Vy, rows=1)
1 2
3 -2.00000e-01 2.00000e-01
4 2.28571e-01 5.71429e-02
>>> cov._gls_general_sensitivity(S)
1 2
3 -2.00000e+00 1.00000e+00
4 1.50000e+00 -5.00000e-01
>>> cov._gls_general_sensitivity(S, rows=1)
1 2
3 -2.00000e+00 1.00000e+00
4 1.50000e+00 -5.00000e-01
"""
index = pd.DataFrame(S).columns
columns = pd.DataFrame(S).index
S_ = pd.DataFrame(S).values
# GLS_sensitivity:
G_inv = self._gls_G_inv(S, Vy_extra=Vy_extra, rows=rows).values
rows_ = S_.shape[0] if rows is None else rows
sensitivity = sparse_tables_dot_multiple([self.data.values, S_.T,
G_inv], rows=rows_)
if threshold is not None:
sensitivity[abs(sensitivity) < threshold] = 0
return pd.DataFrame(sensitivity, index=index, columns=columns)
def _gls_constrained_sensitivity(self, S, rows=None,
threshold=None):
"""
Method to obtain sensitivity according to constrained Least-Squares:
.. math::
$$
\left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1} \cdot S \cdot V_{x_{prior}}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
threshold : `int`, optional
threshold to avoid numerical fluctuations. The default is None.
Returns
-------
`pd.DataFrame`
constrained Least-Squares sensitivity.
Notes
-----
..note :: This method is equivalent to `_gls_general_sensitivity`
but for a constrained system
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = CategoryCov.from_var([1, 1])
>>> cov._gls_constrained_sensitivity(S)
0 1
0 -2.00000e+00 1.50000e+00
1 1.00000e+00 -5.00000e-01
>>> cov._gls_constrained_sensitivity(S, rows=1)
0 1
0 -2.00000e+00 1.50000e+00
1 1.00000e+00 -5.00000e-01
"""
# Data in a appropiate format
S_ = pd.DataFrame(S)
index = S_.index
columns = S_.columns
G_inv = self._gls_G_inv(S, rows=rows).values
rows_ = S_.shape[0] if rows is None else rows
sensitivity = sparse_tables_dot_multiple([G_inv, S_,
self.data.values],
rows=rows_)
if threshold is not None:
sensitivity[abs(sensitivity) < threshold] = 0
return pd.DataFrame(sensitivity, index=index, columns=columns)
def _gls_cov_sensitivity(self, S, Vy_extra=None,
rows=None, threshold=None):
"""
Method to obtain covariance sensitivity according to GLS:
.. math::
$$
V_{x_{prior}}\cdot S^T \cdot \left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1} \cdot S
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
Vy_extra : 2D iterable
2D covariance matrix for y_extra (MXM).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
threshold : `int`, optional
threshold to avoid numerical fluctuations. The default is None.
Returns
-------
`pd.DataFrame`
GlS sensitivity for a given Vy and S.
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov._gls_cov_sensitivity(S, Vy)
0 1
0 4.00000e-01 4.00000e-01
1 4.00000e-01 6.85714e-01
>>> S = pd.DataFrame([[1, 2], [3, 4]], index=[1, 2],columns=[3, 4])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = pd.DataFrame([[1, 0], [0, 1]], index=[1, 2], columns=[1, 2])
>>> cov._gls_cov_sensitivity(S, Vy)
3 4
3 4.00000e-01 4.00000e-01
4 4.00000e-01 6.85714e-01
>>> cov._gls_cov_sensitivity(S, Vy, rows=1)
3 4
3 4.00000e-01 4.00000e-01
4 4.00000e-01 6.85714e-01
"""
index = columns = pd.DataFrame(S).columns
S_ = pd.DataFrame(S).values
general_sens = self._gls_general_sensitivity(S, Vy_extra=Vy_extra,
rows=rows,
threshold=threshold).values
rows_ = S_.shape[0] if rows is None else rows
cov_sens = sparse_tables_dot(general_sens, S_, rows=rows_).toarray()
if threshold is not None:
cov_sens[abs(cov_sens) < threshold] = 0
return pd.DataFrame(cov_sens, index=index, columns=columns)
def gls_update(self, S, Vy_extra=None, rows=None,
threshold=None):
"""
Perform GlS update for a given variance and sensitivity:
.. math::
$$
V_{x_{post}} = V_{x_{prior}} - V_{x_{prior}}\cdot S.T \cdot \left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1} \cdot S \cdot V_{x_{prior}}
$$
Parameters
----------
Vy_extra : 2D iterable, optional
2D covariance matrix for y_extra (MXM).
S : 2D iterable
Sensitivity matrix (MXN).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
threshold : `int`, optional
Thereshold to avoid numerical fluctuations. The default is None.
Returns
-------
`CategoryCov`
GLS method apply to a CategoryCov object for a given Vy and S.
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov.gls_update(S, Vy)
0 1
0 6.00000e-01 -4.00000e-01
1 -4.00000e-01 3.14286e-01
>>> cov.gls_update(S, Vy, rows=1)
0 1
0 6.00000e-01 -4.00000e-01
1 -4.00000e-01 3.14286e-01
"""
index, columns = self.data.index, self.data.columns
A = self._gls_cov_sensitivity(S, Vy_extra=Vy_extra,
rows=rows, threshold=threshold).values
rows_ = self.data.shape[0] if rows is None else rows
Vx_prior = self.to_sparse(method='csc_matrix')
diff = sparse_tables_dot(A, Vx_prior, rows=rows_)
# gls update
Vx_post = Vx_prior - diff
Vx_post = Vx_post.toarray()
if threshold is not None:
Vx_post[abs(Vx_post) < threshold] = 0
return self.__class__(pd.DataFrame(Vx_post, index=index, columns=columns))
def constrained_gls_update(self, S, rows=None,
threshold=None):
"""
Perform constrained Least-Squares update for a given sensitivity:
.. math::
$$
V_{x_{post}} = V_{x_{prior}} - V_{x_{prior}}\cdot S.T \cdot \left(S\cdot V_{x_{prior}}\cdot S.T\right)^{-1} \cdot S \cdot V_{x_{prior}}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
threshold : `int`, optional
Thereshold to avoid numerical fluctuations. The default is None.
Returns
-------
`CategoryCov`
Constrained Least-squares method apply to a CategoryCov object
for a given S.
Notes
-----
..note :: This method is equivalent to `gls_update` but for a
constrained system
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> cov_update = cov.constrained_gls_update(S).data.round(decimals=6)
>>> assert np.amax(cov_update.values) == 0.0
>>> cov_update = cov.constrained_gls_update(S, rows=1).data.round(decimals=6)
>>> assert np.amax(cov_update.values) == 0.0
"""
return self.gls_update(S, Vy_extra=None, rows=rows, threshold=threshold)
def sandwich(self, s, rows=None, threshold=None):
"""
Apply the sandwich formula to the CategoryCov object for a given
pandas.Series.
Parameters
----------
s : 1D or 2D iterable
General sensitivities.
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
threshold : `int`, optional
Thereshold to avoid numerical fluctuations. The default is None.
Returns
-------
`float` (if s is 1D iterable)
The resulting scalar number after having applied the sandwich
formula for a given 1D iterable.
`CategoryCov` (if s is 2D iterable)
`CategoryCov` object to which we have applied sandwich
formula for a given 2D iterable.
Warnings
--------
The `CategoryCov` object and the sensitivity (S) must have the same
indices.
Examples
--------
>>> var = np.array([1, 2, 3])
>>> s = pd.Series([1, 2, 3])
>>> cov = sandy.CategoryCov.from_var(var)
>>> cov.sandwich(s)
36.0
>>> s = np.array([1, 2, 3])
>>> var = pd.Series([1, 2, 3])
>>> cov = sandy.CategoryCov.from_var(var)
>>> var = sandy.CategoryCov.from_var(s).data
>>> cov.sandwich(var)
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 8.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 2.70000e+01
"""
if pd.DataFrame(s).shape[1] == 1:
s_ = pd.Series(s)
sandwich = s_.dot(self.data.dot(s_.T))
# sandwich variable is a scalar
return sandwich
else:
s_ = pd.DataFrame(s).T
sandwich = self._gls_Vy_calc(s_, rows=rows)
if threshold is not None:
sandwich[sandwich < threshold] = 0
return self.__class__(sandwich)
def corr2cov(self, std):
"""
Produce covariance matrix given correlation matrix and standard
deviation array.
Same as :obj: `corr2cov` but it works with :obj: `CategoryCov`
instances.
Parameters
----------
corr : :obj: `CategoryCov`
square 2D correlation matrix
std : 1d iterable
array of standard deviations
Returns
-------
:obj: `CategoryCov`
covariance matrix
Examples
--------
Initialize index and columns
>>> idx = ["A", "B", "C"]
>>> std = np.array([1, 2, 3])
>>> corr = sandy.CategoryCov([[1, 0, 2], [0, 3, 0], [2, 0, 1]], index=idx, columns=idx)
>>> corr.corr2cov(std)
A B C
A 1.00000e+00 0.00000e+00 6.00000e+00
B 0.00000e+00 1.20000e+01 0.00000e+00
C 6.00000e+00 0.00000e+00 9.00000e+00
"""
cov = corr2cov(self.data, std)
index = self.data.index
columns = self.data.columns
return self.__class__(cov, index=index, columns=columns)
@classmethod
@cov33csv
def from_csv(cls, file, **kwargs):
"""
Read covariance matrix from csv file using `pandas.read_csv`.
Parameters
----------
file: `str`
csv file containing covariance matrix (with or w/o indices and
columns)
kwargs: `dict`
keyword arguments to pass to `pd.read_csv`
Returns
-------
`CategoryCov`
object containing covariance matrix
Examples
--------
Read a 2x2 matrix from a string in csv format.
>>> from io import StringIO
>>> cov = pd.DataFrame([[1, 0.4],[0.4, 1]])
>>> string = StringIO(cov.to_csv())
>>> sandy.CategoryCov.from_csv(string, index_col=0)
0 1
0 1.00000e+00 4.00000e-01
1 4.00000e-01 1.00000e+00
Now use `pandas.MultiIndex` as `index` and `columns`.
This example represents the case of a cross section covariance matrix
for `MAT=9437`, `MT=18` and two energy points `[1e-5, 1e6]`.
>>> tuples = [(9437, 18, 1e-5), (9437, 18, 1e6)]
>>> index = pd.MultiIndex.from_tuples(tuples, names=("MAT", "MT", "E"))
>>> cov.index = cov.columns = index
>>> string = StringIO(cov.to_csv())
>>> pos = [0, 1, 2]
>>> sandy.CategoryCov.from_csv(string, index_col=pos, header=pos)
MAT 9437
MT 18
E 1e-05 1000000.0
MAT MT E
9437 18 1.00000e-05 1.00000e+00 4.00000e-01
1.00000e+06 4.00000e-01 1.00000e+00
"""
df = pd.read_csv(file, **kwargs)
return cls(df)
@classmethod
def random_corr(cls, size, correlations=True, seed=None, **kwargs):
"""
>>> sandy.CategoryCov.random_corr(2, seed=1)
0 1
0 1.00000e+00 4.40649e-01
1 4.40649e-01 1.00000e+00
>>> sandy.CategoryCov.random_corr(2, correlations=False, seed=1)
0 1
0 1.00000e+00 0.00000e+00
1 0.00000e+00 1.00000e+00
"""
np.random.seed(seed=seed)
corr = np.eye(size)
if correlations:
offdiag = np.random.uniform(-1, 1, size**2).reshape(size, size)
up = np.triu(offdiag, 1)
else:
up = np.zeros([size, size])
corr += up + up.T
return cls(corr, **kwargs)
@classmethod
def random_cov(cls, size, stdmin=0.0, stdmax=1.0, correlations=True,
seed=None, **kwargs):
"""
Construct a covariance matrix with random values
Parameters
----------
size : `int`
Dimension of the original matrix
stdmin : `float`, default is 0
minimum value of the uniform standard deviation vector
stdmax : `float`, default is 1
maximum value of the uniform standard deviation vector
correlation : `bool`, default is True
flag to insert the random correlations in the covariance matrix
seed : `int`, optional, default is `None`
seed for the random number generator (by default use `numpy`
dafault pseudo-random number generator)
Returns
-------
`CategoryCov`
object containing covariance matrix
Examples
--------
>>> sandy.CategoryCov.random_cov(2, seed=1)
0 1
0 2.15373e-02 5.97134e-03
1 5.97134e-03 8.52642e-03
"""
corr = random_corr(size, correlations=correlations, seed=seed)
std = np.random.uniform(stdmin, stdmax, size)
return CategoryCov(corr).corr2cov(std)
def to_sparse(self, method='csr_matrix'):
"""
Method to extract `CategoryCov` values into a sparse matrix
Parameters
----------
method : `str`, optional
SciPy 2-D sparse matrix. The default is 'csr_matrix'.
Methods
-------
`csr_matrix`:
Compressed Sparse Row matrix.
`bsr_matrix`:
Block Sparse Row matrix.
`coo_matrix`:
A sparse matrix in COOrdinate format.
`csc_matrix`:
Compressed Sparse Column matrix.
`dia_matrix`:
Sparse matrix with DIAgonal storage.
`dok_matrix`:
Dictionary Of Keys based sparse matrix.
`lil_matrix`:
Row-based list of lists sparse matrix.
Returns
-------
data_sp : `scipy.sparse.matrix`
`CategoryCov` instance values stored as a sparse matrix
"""
data = self.data.values
if method == 'csr_matrix':
data_sp = sps.csr_matrix(data)
elif method == 'bsr_matrix':
data_sp = sps.bsr_matrix(data)
elif method == 'coo_matrix':
data_sp = sps.coo_matrix(data)
elif method == 'csc_matrix':
data_sp = sps.csc_matrix(data)
elif method == 'dia_matrix':
data_sp = sps.dia_matrix(data)
elif method == 'dok_matrix':
data_sp = sps.dok_matrix(data)
elif method == 'lil_matrix':
data_sp = sps.lil_matrix(data)
else:
raise ValueError('The method does not exist in scipy.sparse')
return data_sp
def get_L(self, rows=None, tolerance=None):
"""
Extract lower triangular matrix `L` for which `L*L^T == self`.
Parameters
----------
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
Returns
-------
`pandas.DataFrame`
Cholesky descomposition low triangular matrix.
Examples
--------
Positive define matrix:
>>> a = np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98]])
>>> sandy.CategoryCov(a).get_L()
0 1 2
0 -2.00000e+00 0.00000e+00 0.00000e+00
1 -6.00000e+00 1.00000e+00 0.00000e+00
2 8.00000e+00 5.00000e+00 3.00000e+00
>>> sandy.CategoryCov(a).get_L(tolerance=0)
0 1 2
0 -2.00000e+00 0.00000e+00 0.00000e+00
1 -6.00000e+00 1.00000e+00 0.00000e+00
2 8.00000e+00 5.00000e+00 3.00000e+00
>>> sandy.CategoryCov(a).get_L(rows=1)
0 1 2
0 -2.00000e+00 0.00000e+00 0.00000e+00
1 -6.00000e+00 1.00000e+00 0.00000e+00
2 8.00000e+00 5.00000e+00 3.00000e+00
Matrix with negative eigenvalues
>>> sandy.CategoryCov([[1, -2],[-2, 3]]).get_L(rows=1, tolerance=0)
0 1
0 -1.08204e+00 0.00000e+00
1 1.75078e+00 0.00000e+00
>>> sandy.CategoryCov([[1, -2],[-2, 3]]).get_L(tolerance=0)
0 1
0 -1.08204e+00 0.00000e+00
1 1.75078e+00 0.00000e+00
Decomposition test:
>>> L = sandy.CategoryCov(a).get_L()
>>> L.dot(L.T)
0 1 2
0 4.00000e+00 1.20000e+01 -1.60000e+01
1 1.20000e+01 3.70000e+01 -4.30000e+01
2 -1.60000e+01 -4.30000e+01 9.80000e+01
Matrix with negative eigenvalues, tolerance of 0:
>>> L = sandy.CategoryCov([[1, -2],[-2, 3]]).get_L(rows=1, tolerance=0)
>>> L.dot(L.T)
0 1
0 1.17082e+00 -1.89443e+00
1 -1.89443e+00 3.06525e+00
"""
index = self.data.index
columns = self.data.columns
# Reduces the size of the matrix, erasing the zero values
nonzero_idxs, cov_reduced = reduce_size(self.data)
# Obtain the eigenvalues and eigenvectors:
E, V = sandy.CategoryCov(cov_reduced).get_eig(tolerance=tolerance)
E = sps.diags(np.sqrt(E)).toarray()
# Construct the matrix:
rows_ = cov_reduced.shape[0] if rows is None else rows
A = sandy.cov.sparse_tables_dot(V, E, rows=rows_).T.toarray()
# QR decomposition:
Q, R = scipy.linalg.qr(A)
L_redu = R.T
# Original size
L = restore_size(nonzero_idxs, L_redu, len(self.data)).values
return pd.DataFrame(L, index=index, columns=columns)
class EnergyCov(CategoryCov):
"""
Dataframe for a multigroup covariance matrix.
.. note:: It is assumed that the covariance matrix is defined over
multi-group energy grids.
Only 'zero' interpolation is supported.
Attributes
----------
data : `pandas.DataFrame`
covariance matrix as a dataframe
Methods
-------
add
change_grid
from_lb1
from_lb2
from_lb5_sym
from_lb5_asym
from_lb6
sum_covs
Raises
------
`sandy.Error`
if index values are not monotonically increasing
`sandy.Error`
if columns values are not monotonically increasing
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def data(self):
"""
Covariance matrix as a dataframe.
Attributes
----------
index : `pandas.Index`
indices
columns : `pandas.MultiIndex`
indices
values : `numpy.array`
covariance values as `float`
Returns
-------
`pandas.DataFrame`
covariance matrix
Raises
------
`sandy.Error`
if `index` or `columns` are not monotonically increasing
"""
return self._data
@data.setter
def data(self, data):
self._data = pd.DataFrame(data)
self._data.index = pd.Index(
self._data.index.values,
name="E",
)
self._data.columns = pd.Index(
self._data.columns.values,
name="E",
)
if not self._data.index.is_monotonic_increasing:
raise sandy.Error("index values are not monotonically increasing")
if not self._data.columns.is_monotonic_increasing:
raise sandy.Error("columns values are not monotonically "
"increasing")
def change_grid(self, ex, ey, inplace=False):
"""
Given one energy grid for the x-axis and one energy grid for the
y-axis, interpolate/extrapolate the covariance matrix over the new
points using the *forward-filling* method.
.. important::
* backward extrapolated values (e.g. below threshold) are replaced
by 0
* forward extrapolated values (e.g. above 20 MeV) are replaced by
the covariance coefficient that refers to the last point in the
original grid
Parameters
----------
ex : `iterable`
energy grid for the x-axis
ey : `iterable`
energy grid for the y-axis
Returns
-------
`sandy.EnergyCov`
Covariance matrix interpolated over the new axes.
Examples
--------
>>> eg = [1e-2, 1e6]
>>> C = sandy.EnergyCov.random_corr(2, seed=1, index=eg, columns=eg)
>>> C.change_grid([0, 1, 1e6, 1e7], [0, 1, 1e6, 1e7])
E 0.00000e+00 1.00000e+00 1.00000e+06 1.00000e+07
E
0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00
1.00000e+00 0.00000e+00 1.00000e+00 4.40649e-01 4.40649e-01
1.00000e+06 0.00000e+00 4.40649e-01 1.00000e+00 1.00000e+00
1.00000e+07 0.00000e+00 4.40649e-01 1.00000e+00 1.00000e+00
"""
df = self.data.reindex(index=ex, method="ffill") \
.reindex(columns=ey, method="ffill") \
.fillna(0)
if not inplace:
return self.__class__(df)
self.data = df
def _plot_matrix(self, ax, xscale='log', yscale='log', cmap='bwr',
vmin=-1, vmax=1, emin=1e-5, emax=2e7, **kwargs):
new_xgrid = np.unique([*self.data.index, *[emin, emax]])
new_ygrid = np.unique([*self.data.columns, *[emin, emax]])
data = self.change_grid(ex=new_xgrid, ey=new_ygrid).data
X, Y = np.meshgrid(data.index.values, data.columns.values)
qmesh = ax.pcolormesh(
X.T,
Y.T,
data.values,
cmap=cmap,
vmin=vmin,
vmax=vmax,
**kwargs,
)
ax.set_xlim([emin, emax])
ax.set_ylim([emin, emax])
plt.colorbar(qmesh)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
return ax
def add(self, cov, inplace=False):
"""
Add the content of another `EnergyCov` (sum).
If the energy grids do not match, interpolate.
Parameters
----------
cov : `sandy.EnergyCov`
multigroup covariance matrices (axes can be different)
inplace : `bool`, optional, default is `False`
flag to operate **inplace**
Returns
-------
`sandy.EnergyCov`
Multi-group covariance matrix.
Examples
--------
>>> eg = [1e-2, 1e6]
>>> C = sandy.EnergyCov.random_corr(2, seed=1, index=eg, columns=eg)
>>> C.add(C)
E 1.00000e-02 1.00000e+06
E
1.00000e-02 2.00000e+00 8.81298e-01
1.00000e+06 8.81298e-01 2.00000e+00
>>> eg = [1e-1, 1]
>>> D = sandy.EnergyCov.random_corr(2, seed=5, index=eg, columns=eg)
>>> C.add(D)
E 1.00000e-02 1.00000e-01 1.00000e+00 1.00000e+06
E
1.00000e-02 1.00000e+00 1.00000e+00 1.00000e+00 4.40649e-01
1.00000e-01 1.00000e+00 2.00000e+00 1.74146e+00 1.18211e+00
1.00000e+00 1.00000e+00 1.74146e+00 2.00000e+00 1.44065e+00
1.00000e+06 4.40649e-01 1.18211e+00 1.44065e+00 2.00000e+00
>>> assert C.add(D).data.equals(D.add(C).data)
"""
ex = np.unique([*self.data.index, *cov.data.index])
ey = np.unique([*self.data.columns, *cov.data.columns])
x = self.change_grid(ex, ey)
y = cov.change_grid(ex, ey)
data = x.data.add(y.data)
if inplace:
self.data = data
else:
return self.__class__(data)
@classmethod
def sum_covs(cls, *covs):
"""
Sum multigroup covariance matrices into a single one.
Parameters
----------
covs : iterable of `sandy.EnergyCov`
list of multigroup covariance matrices (axes can be different)
Returns
-------
`sandy.EnergyCov`
Multi-group covariance matrix.
Examples
--------
Sum two 2x2 correlation matrices with different indices and columns
>>> eg = [1e-2, 1e6]
>>> C = sandy.EnergyCov.random_corr(2, seed=1, index=eg, columns=eg)
>>> eg = [1e-1, 1]
>>> D = sandy.EnergyCov.random_corr(2, seed=5, index=eg, columns=eg)
>>> sandy.EnergyCov.sum_covs(C, D)
E 1.00000e-02 1.00000e-01 1.00000e+00 1.00000e+06
E
1.00000e-02 1.00000e+00 1.00000e+00 1.00000e+00 4.40649e-01
1.00000e-01 1.00000e+00 2.00000e+00 1.74146e+00 1.18211e+00
1.00000e+00 1.00000e+00 1.74146e+00 2.00000e+00 1.44065e+00
1.00000e+06 4.40649e-01 1.18211e+00 1.44065e+00 2.00000e+00
"""
return functools.reduce(lambda x, y: x.add(y), covs)
@classmethod
def from_lb1(cls, evalues, fvalues):
"""Extract square covariance matrix from NI-type sub-subsection data
with flag `lb=1`.
Parameters
----------
evalues : iterable
covariance energy grid (same for both axes)
fvalues : iterable
array of F-values (covriance matrix diagonal)
Returns
-------
`sandy.EnergyCov`
Multi-group covariance matrix.
"""
cov = np.diag(fvalues)
return cls(cov, index=evalues, columns=evalues)
@classmethod
def from_lb2(cls, evalues, fvalues):
"""Extract square covariance matrix from NI-type sub-subsection data
with flag `lb=2`.
Parameters
----------
evalues : `iterable`
covariance energy grid for both axis
fvalues : `iterable`
array of F-values
Returns
-------
`sandy.formats.utils.EnergyCov`
Multi-group covariance matrix.
"""
f = np.array(fvalues)
cov = f*f.reshape(-1,1)
return cls(cov, index=evalues, columns=evalues)
@classmethod
def from_lb5_sym(cls, evalues, fvalues):
"""Extract square symmetric covariance matrix from NI-type sub-subsection data
with flag `lb=5`.
Parameters
----------
evalues : `iterable`
covariance energy grid for both axis
fvalues : `iterable`
array of F-values (flattened upper triangular matrix coefficients)
Returns
-------
`sandy.formats.utils.EnergyCov`
Multi-group covariance matrix.
"""
ne = len(evalues)
cov = np.zeros([ne - 1, ne - 1])
indices = np.triu_indices(ne - 1)
cov[indices] = np.array(fvalues)
cov += np.triu(cov, 1).T
# add zero row and column at the end of the matrix
cov = np.insert(cov, cov.shape[0], [0]*cov.shape[1], axis=0)
cov = np.insert(cov, cov.shape[1], [0]*cov.shape[0], axis=1)
return cls(cov, index=evalues, columns=evalues)
@classmethod
def from_lb5_asym(cls, evalues, fvalues):
"""
Extract square asymmetric covariance matrix from NI-type sub-subsection data
with flag `lb=5`.
Parameters
----------
evalues : `iterable`
covariance energy grid for both axis
fvalues : `iterable`
array of F-values (flattened full matrix)
Returns
-------
`sandy.formats.utils.EnergyCov`
Multi-group covariance matrix.
"""
ne = len(evalues)
cov = np.array(fvalues).reshape(ne - 1, ne - 1)
# add zero row and column at the end of the matrix
cov = np.insert(cov, cov.shape[0], [0]*cov.shape[1], axis=0)
cov = np.insert(cov, cov.shape[1], [0]*cov.shape[0], axis=1)
return cls(cov, index=evalues, columns=evalues)
@classmethod
def from_lb6(cls, evalues_r, evalues_c, fvalues):
"""Extract covariance matrix from NI-type sub-subsection data
with flag `lb6`.
Parameters
----------
evalues_r : `iterable`
covariance energy grid for row axis
evalues_c : `iterable`
covariance energy grid for column axis
fvalues : `iterable`
array of F-values (flattened full matrix)
Returns
-------
`sandy.formats.utils.EnergyCov`
Multi-group covariance matrix.
"""
ner = len(evalues_r)
nec = len(evalues_c)
cov = np.array(fvalues).reshape(ner-1, nec-1)
# add zero row and column at the end of the matrix
cov = np.insert(cov, cov.shape[0], [0]*cov.shape[1], axis=0)
cov = np.insert(cov, cov.shape[1], [0]*cov.shape[0], axis=1)
return cls(cov, index=evalues_r, columns=evalues_c)
class GlobalCov(CategoryCov):
@classmethod
def from_list(cls, iterable):
"""
Extract global cross section/nubar covariance matrix from iterables
of `EnergyCovs`.
Parameters
----------
iterable : iterable
list of tuples/lists/iterables with content `[mat, mt, mat1, mt1, EnergyCov]`
Returns
-------
`XsCov` or `pandas.DataFrame`
global cross section/nubar covariance matrix (empty dataframe if no covariance matrix was found)
"""
columns = ["KEYS_ROWS", "KEYS_COLS", "COV"]
# Reindex the cross-reaction matrices
covs = pd.DataFrame.from_records(iterable).set_axis(columns, axis=1).set_index(columns[:-1]).COV
for (keys_rows,keys_cols), cov in covs.iteritems():
if keys_rows == keys_cols: # diagonal terms
if cov.data.shape[0] != cov.data.shape[1]:
raise SandyError("non-symmetric covariance matrix for ({}, {})".format(keys_rows, keys_cols))
if not np.allclose(cov.data, cov.data.T):
raise SandyError("non-symmetric covariance matrix for ({}, {})".format(keys_rows, keys_cols))
else: # off-diagonal terms
condition1 = (keys_rows,keys_rows) in covs.index
condition2 = (keys_cols,keys_cols) in covs.index
if not (condition1 and condition2):
covs[keys_rows,keys_cols] = np.nan
logging.warn("skip covariance matrix for ({}, {})".format(keys_rows, keys_cols))
continue
ex = covs[keys_rows,keys_rows].data.index.values
ey = covs[keys_cols,keys_cols].data.columns.values
covs[keys_rows,keys_cols] = cov.change_grid(ex, ey)
covs.dropna(inplace=True)
if covs.empty:
logging.warn("covariance matrix is empty")
return pd.DataFrame()
# Create index for global matrix
rows_levels = covs.index.levels[0]
indexlist = [(*keys,e) for keys in rows_levels for e in covs[(keys,keys)].data.index.values]
index = pd.MultiIndex.from_tuples(indexlist, names=cls.labels)
# Create global matrix
matrix = np.zeros((len(index),len(index)))
for (keys_rows,keys_cols), cov in covs.iteritems():
ix = index.get_loc(keys_rows)
ix1 = index.get_loc(keys_cols)
matrix[ix.start:ix.stop,ix1.start:ix1.stop] = cov.data
if keys_rows != keys_cols:
matrix[ix1.start:ix1.stop,ix.start:ix.stop] = cov.data.T
return cls(matrix, index=index, columns=index)
def corr2cov(corr, s):
"""
Produce covariance matrix given correlation matrix and standard
deviation array.
Parameters
----------
corr : 2D iterable
square 2D correlation matrix
s : 1D iterable
1D iterable with standard deviations
Returns
-------
`numpy.ndarray`
square 2D covariance matrix
Examples
--------
Test with integers
>>> s = np.array([1, 2, 3])
>>> corr = np.array([[1, 0, 2], [0, 3, 0], [2, 0, 1]])
>>> corr2cov(corr, s)
array([[ 1, 0, 6],
[ 0, 12, 0],
[ 6, 0, 9]])
Test with float
>>> corr2cov(corr, s.astype(float))
array([[ 1., 0., 6.],
[ 0., 12., 0.],
[ 6., 0., 9.]])
"""
s_ = np.diag(s)
return s_.dot(corr.dot(s_))
def sparse_tables_dot(a, b, rows=1000):
"""
Function to perform multiplications between matrices stored on local
disk instead of memory.
Parameters
----------
a : 2D iterable
Matrix.
b : 2D iterable
Matrix.
rows : `int`, optional.
Number of rows to be calculated in each loop. The default is 1000.
Returns
-------
dot_product : "scipy.sparse.csc_matrix"
The multiplication of 2 matrix.
"""
a_ = sps.csr_matrix(a)
b_ = sps.csc_matrix(b)
l, n = a_.shape[0], b_.shape[1]
f = tb.open_file('dot.h5', 'w')
filters = tb.Filters(complevel=5, complib='blosc')
out_data = f.create_earray(f.root, 'data', tb.Float64Atom(), shape=(0,),
filters=filters)
out_indices = f.create_earray(f.root, 'indices', tb.Int64Atom(), shape=(0,),
filters=filters)
out_indptr = f.create_earray(f.root, 'indptr', tb.Int64Atom(), shape=(0,),
filters=filters)
out_indptr.append(np.array([0]))
max_indptr = 0
for i in range(0, l, rows):
res = a_[i:min(i+rows, l), :].dot(b_)
out_data.append(res.data)
indices = res.indices
indptr = res.indptr
out_indices.append(indices)
out_indptr.append(max_indptr+indptr[1:])
max_indptr += indices.shape[0]
f.flush()
dot_product = sps.csr_matrix((f.root.data[:], f.root.indices[:],
f.root.indptr[:]), shape=(l, n))
f.close()
os.remove('dot.h5')
return dot_product
def sparse_tables_dot_multiple(matrix_list, rows=1000):
"""
Function to perform multiplications between matrices stored on local
disk instead of memory.
Parameters
----------
matrix_list : 1D iterables
Iterable with the matrix inside
rows : `int`, optional.
Number of rows to be calculated in each loop. The default is 1000.
Returns
-------
matrix : "scipy.sparse.csc_matrix"
Result of the multiplication of 2 matrix.
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1]).data.values
>>> sparse_tables_dot_multiple([S, cov, S.T], 1)
array([[ 5., 11.],
[11., 25.]])
"""
matrix = matrix_list[0]
for b in matrix_list[1::]:
intermediate_matrix = sparse_tables_dot(matrix, b, rows=rows)
matrix = intermediate_matrix
return matrix.toarray()
def sparse_tables_inv(a, rows=1000):
"""
Function to perform matrix inversion stored on local
disk instead of memory.
Parameters
----------
a : 2D iterable
Matrix to be inverted.
rows : `int`, optional.
Number of rows to be calculated in each loop. The default is 1000.
Returns
-------
invert_matrix : "numpy.ndarray"
The inverted matrix.
Example
-------
>>> S = sandy.CategoryCov(np.diag(np.array([1, 2, 3]))).data.values
>>> sandy.cov.sparse_tables_inv(S, 1).round(2)
array([[1. , 0. , 0. ],
[0. , 0.5 , 0. ],
[0. , 0. , 0.33]])
"""
a_ = sps.csc_matrix(a)
l, n = a_.shape[0], a_.shape[1]
LU = spsl.splu(a_)
f = tb.open_file('inv.h5', 'w')
filters = tb.Filters(complevel=5, complib='blosc')
out_data = f.create_carray(f.root, 'data', tb.Float64Atom(),
shape=(l, n), filters=filters)
Identity = np.hsplit(sps.diags([1], shape=a_.shape).toarray(), l/rows)
j = 0
for i in range(0, l, rows):
I_split = Identity[j]
tmpResult = LU.solve(I_split)
out_data[:, i:min(i+rows, l)] = tmpResult
f.flush()
j += 1
invert_matrix = sps.csr_matrix(out_data).toarray()
f.close()
os.remove('inv.h5')
return invert_matrix
def segmented_pivot_table(data_stack, index, columns, values, rows=10000000):
"""
Create a pivot table from a stacked dataframe.
Parameters
----------
data_stack : `pd.Dataframe`
Stacked dataframe.
index : 1D iterable, optional
Index of the final covariance matrix.
columns : 1D iterable, optional
Columns of the final covariance matrix.
values : `str`, optional
Name of the column where the values are located.
rows : `int`, optional
Number of rows to take into account into each loop. The default
is 10000000.
Returns
-------
pivot_matrix : `pd.DataFrame`
Covariance matrix created from a stacked data
Examples
--------
>>> S = pd.DataFrame(np.array([[1, 1, 1], [0, 2, 1], [0, 0, 1]]))
>>> S = S.stack().reset_index().rename(columns = {'level_0': 'dim1', 'level_1': 'dim2', 0: 'cov'})
>>> sandy.cov.segmented_pivot_table(S, index=['dim1'], columns=['dim2'], values='cov')
dim2 0 1 2
dim1
0 1 1 1
1 0 2 1
2 0 0 1
>>> sandy.cov.segmented_pivot_table(S, index=['dim1'], columns=['dim2'], values='cov', rows=1)
dim2 0 1 2
dim1
0 1.00000e+00 1.00000e+00 1.00000e+00
1 0.00000e+00 2.00000e+00 1.00000e+00
2 0.00000e+00 0.00000e+00 1.00000e+00
"""
size = data_stack.shape[0]
pivot_matrix = []
for i in range(0, size, rows):
partial_pivot = data_stack[i: min(i+rows, size)].pivot_table(
index=index,
columns=columns,
values=values,
fill_value=0,
aggfunc=np.sum,
)
pivot_matrix.append(partial_pivot)
pivot_matrix = | pd.concat(pivot_matrix) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
'''
calculate intrinsic economic value of a property based on buy or rent decision indifference (arbitrage)
Rent = Price*mortgage_rate
+ DEPRECIATION_RATE*min(Building, Price)
- growth * Land
+ Price*tax
- (Price - 24k) * tax_braket
+ Price * mortgage_insurance
'''
### PATHES to change
HOST = "/home/invisement/PROJECTS/inVisement2/apps-workshop/"
INPUT_PATH = HOST + "data/"
OUTPUT_PATH = HOST + "data/"
def main():
''' read all input files, prepare housing table, calculate intrinsic value, add return columns, and save output '''
housing_table = read_and_join_input_files()
housing_table = prepare_housing_valuation_table(housing_table)
housing_table['intrinsic house value'] = calculate_intrinsic_value (housing_table).round()
housing_table["total return"] = (housing_table['intrinsic house value'] - housing_table['house price'])/housing_table['house price']
housing_table['net annual return'] = housing_table['total return'] * (housing_table['mortgage rate']+PMI_RATE)
housing_table['annual return'] = housing_table['net annual return'] + housing_table['mortgage rate'] + PMI_RATE
return prune_and_save (housing_table)
### LIBRARIES
import datetime, pandas as pd, numpy as np
### CONSTANTS
PMI_RATE = 0.01 # private mortgage insurance rate
FEDERAL_INCOME_TAX_RATE = 0.30 # federal tax bracket rate
DEPRECIATION_RATE = 0.03 # home annual depreciation rate for a base home (building)
STANDARD_TAX_DEDUCTION = 24000
MAX_MORTGAGE_CREDIT = 1000000 # maximum allowance for tax deduction on mortgage
BASE_QUANTILE = 0.3
### INPUTS
PRICE_FILE = INPUT_PATH + "house price by county.csv"
RENT_FILE = INPUT_PATH + "rent by county.csv"
MORTGAGE_FILE = INPUT_PATH + "mortgage rate 30 year fixed.csv"
GROWTH_FILE = INPUT_PATH + "nominal gdp growth.csv"
TAX_FILE = INPUT_PATH + "property tax by fips.csv"
### OUTPUTS
HOUSING_FILE = OUTPUT_PATH + "housing valuation.csv"
LATEST_HOUSING_FILE = OUTPUT_PATH + "latest housing valuation.csv"
### FUNCTIONS
def read_and_join_input_files (base_quantile=BASE_QUANTILE):
price = pd.read_csv(PRICE_FILE, dtype=str)
price['house price'] = pd.to_numeric(price['house price'])
price['fips'] = price['state fips'].str.zfill(2) + price['county fips'].str.zfill(3)
price = price.filter(['fips', 'state', 'county', 'date', 'house price'])
rent = pd.read_csv(RENT_FILE, dtype=str)
rent['rent'] = pd.to_numeric(rent['rent']) * 12 # convert to annual
rent['fips'] = rent['state fips'].str.zfill(2) + rent['county fips'].str.zfill(3)
rent = rent.filter(["fips", "date", "rent"])
rate = (pd.read_csv(MORTGAGE_FILE)
.set_index('date')
.rename(columns = {"mortgage rate 30 year fixed": "mortgage rate"})
.filter(["mortgage rate"])
)
rate = rate/100
rate.index = pd.to_datetime(rate.index)
average_growth = (
pd.read_csv (GROWTH_FILE, index_col="date")#["nominal gdp growth"]
.rename(columns = {"nominal gdp growth": "growth"})
.sort_index()
.filter(["growth"])
.rolling(80)
.mean() # moving average of nominal growth with 80 quarters
)
average_growth = average_growth/100
average_growth.index = pd.to_datetime(average_growth.index)
#fips_to_zipcode = pd.read_csv(config['map path']+"zipcode fips mapping.csv")
property_tax = pd.read_csv(TAX_FILE).filter(["fips", "property tax rate"])
property_tax['fips'] = property_tax['fips'].astype(str).str.zfill(5)
#property_tax_by_zipcode = pd.merge(fips_to_zipcode, property_tax, on=["fips"]).drop_duplicates(subset=['zipcode']).set_index(['zipcode'])['property tax rate']
table = price.merge(rent, how="inner", on=["fips", "date"], suffixes=["", "2"]).sort_values("date")
table['date'] = | pd.to_datetime(table['date']) | pandas.to_datetime |
import math
import numpy as np
import datetime as dt
import pandas_datareader.data as web
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
# Z-score normalization
def scale(data):
col = data.columns[0]
return (data[col] - data[col].mean()) / data[col].std()
# 전일 Close price와 금일 Close price를 이용하여 변동성을 계산한다
def CloseVol(ohlc, n):
rtn = pd.DataFrame(ohlc['Close']).apply(lambda x: np.log(x) - np.log(x.shift(1)))
vol = pd.DataFrame(rtn).rolling(window=n).std()
return pd.DataFrame(vol, index=ohlc.index)
# Yahoo site로 부터 주가 데이터를 수집
def getStockDataYahoo(stockCode, start='', end=''):
# 수집 기간
if start == '':
start = dt.datetime(2007, 1, 1)
else:
start = dt.datetime.strptime(start, '%Y-%m-%d')
if end == '':
end = dt.date.today()
else:
end = dt.datetime.strptime(end, '%Y-%m-%d')
stock = | pd.DataFrame() | pandas.DataFrame |
from __future__ import annotations
import re
import warnings
from enum import Enum, auto
from typing import Dict, List, Union, Tuple, Optional
import numpy as np
import pandas as pd
import torch
from ..exceptions import TsFileParseException
from ..utils import stack_pad
class TsTagValuePattern(Enum):
"""
Enumeration holding the known `.ts` file headers tag value types in the form of regex expressions.
"""
BOOLEAN = re.compile('(?:tru|fals)e')
ANY_CONNECTED_STRING = re.compile('\\w+')
INTEGER_NUMBER = re.compile('\\d+')
CLASS_LABEL = re.compile('(?:tru|fals)e(?:(?<=true)((?: [^\s]+)+)|(?<=false))')
# ((?:tru|fals)e)(?(?<=true)((?: \w+)+))(?=\s)
class TsTag(str, Enum):
"""
Enumeration holding the names of the known `.ts` file tag names.
"""
PROBLEM_NAME = 'problemName'
TIMESTAMPS = 'timeStamps'
MISSING = 'missing'
EQUAL_LENGTH = 'equalLength'
SERIES_LENGTH = 'seriesLength'
CLASS_LABEL = 'classLabel'
UNIVARIATE = 'univariate'
DIMENSIONS = 'dimensions'
class TSFileLoader:
"""
File loader that can load time series files in sktimes `.ts` file format.
Args:
filepath (str): The path to the `.ts` file.
"""
class State(Enum):
"""
TSFileLoader's internal parsing state.
"""
PREFACE = 0
HEADER = 1
BODY = 2
BODY_TIME_STAMPS = 21
# Dict mapping known `.ts` file header tags to their respective parsing expression
header_info: Dict[TsTag, TsTagValuePattern] = {
TsTag.PROBLEM_NAME: TsTagValuePattern.ANY_CONNECTED_STRING,
TsTag.TIMESTAMPS: TsTagValuePattern.BOOLEAN,
TsTag.MISSING: TsTagValuePattern.BOOLEAN,
TsTag.EQUAL_LENGTH: TsTagValuePattern.BOOLEAN,
TsTag.SERIES_LENGTH: TsTagValuePattern.INTEGER_NUMBER,
TsTag.CLASS_LABEL: TsTagValuePattern.CLASS_LABEL,
TsTag.UNIVARIATE: TsTagValuePattern.BOOLEAN,
TsTag.DIMENSIONS: TsTagValuePattern.INTEGER_NUMBER
}
required_meta_info: List[TsTag] = [TsTag.PROBLEM_NAME, TsTag.CLASS_LABEL, TsTag.EQUAL_LENGTH, TsTag.MISSING,
TsTag.TIMESTAMPS]
def as_tensor(self, return_targets: bool = False) -> Union[torch.Tensor, Tuple[torch.Tensor, List[str]]]:
"""Return the loaded data as a 3 dimensional tensor of the form (N, C, S).
Keyword Args:
return_targets (bool):
Returns:
torch.Tensor: A 3 dimensional tensor.
"""
data_ = []
if len(self.data) == 0:
self.parse()
for dim in self.data:
data_.append(stack_pad(dim))
data_ = torch.permute(torch.stack(data_, dim=-1), (0, 2, 1))
if self.header[TsTag.CLASS_LABEL] and return_targets:
return data_, self.targets
return data_
def as_dataframe(self, return_targets: bool = False) -> pd.DataFrame:
"""Return the loaded data as a pandas dataframe.
Keyword Args:
return_targets (bool): Identifies whether the targets should be included in the returned dataframe. If
True, the targets will be added as an additional column 'targets' to the dataframe. This only has an effect
if there are class labels available in the datafile that was parsed in the first place.
Returns:
pd.DataFrame: A nested pandas dataframe holding the dimensions as columns and the number examples as rows,
where every cell contains a pandas Series containing a univariate time series.
If `return_targets` is set, it will also contain a column 'targets' that contains the class labels of every
example.
"""
data = pd.DataFrame(dtype=np.float32)
if len(self.data) == 0:
self.parse()
for dim in range(0, len(self.data)):
data["dim_" + str(dim)] = self.data[dim]
if self.header[TsTag.CLASS_LABEL] and return_targets:
data["targets"] = self.targets
return data
def get_classes(self):
"""Return the classes found in the '.ts' file
Returns:
List[str]: List of class names as string.
"""
if self.header[TsTag.CLASS_LABEL]:
return self.header[TsTag.CLASS_LABEL]
else:
raise AttributeError(f"The '.ts' file {self.filename} does not have any class labels")
def __init__(self, filepath: str, nan_replace_value: Union[int, float, str] = "NaN"):
self.filename = filepath
self.line_number = 1
self.file = open(filepath, "r", encoding="utf-8")
self.state = self.State.PREFACE
self.header = {k: None for k in self.header_info.keys()}
self.data = []
self.targets = []
self.dim = None
self.series_length = 0
self.timestamp_type = None
self.nan_replace_value = nan_replace_value
def parse_header(self, line: str) -> None:
"""Parses a line of a `.ts` file header and updates the internal state of the loader with the extracted
information.
Args:
line (str): The header line to parse.
Returns:
None
"""
if not line.startswith("@"):
raise TsFileParseException(
f"Line number {self.line_number} was interpreted as HEADER but does not start with '@'!"
)
line = line[1:]
if len(line) == 0:
raise TsFileParseException(
f"Line number {self.line_number} contains an empty tag!"
)
tokens = line.split(" ", maxsplit=1)
token_len = len(tokens)
if token_len == 1:
raise TsFileParseException(
f"tag '{tokens[0]}' at line number {self.line_number} requires an associated value!"
)
tag = TsTag(tokens[0])
value_pattern = self.header_info[tag]
value = value_pattern.value.match(tokens[1])
if value:
if len(value.groups()) > 1:
raise TsFileParseException(
"Value extractor should return exactly ONE match!"
)
if len(value.groups()) > 0:
value = value.group(1)
else:
value = value.group(0)
self.header[tag] = self.parse_header_value(value, value_pattern)
def parse_header_value(self, value: str, value_type: TsTagValuePattern) -> Union[bool, str, int, List[str]]:
"""Parse a single header value that was extracted by the header line parser and return its value as the
appropriate python object.
Args:
value (str): Extracted header value that should be parsed.
value_type (TsTagValuePattern): The expected type of the value, which should be applied.
Returns:
bool: If the value is of type BOOLEAN. `value` converted to bool
str: If the value is of type ANY_CONNECTED_STRING. Returns the stripped value string.
List[str]: If the value is of type CLASS_LABEL. Returns a list of space separated string class labels.
"""
if value_type == TsTagValuePattern.BOOLEAN:
return value == 'true'
if value_type == TsTagValuePattern.ANY_CONNECTED_STRING:
return value.strip()
if value_type == TsTagValuePattern.CLASS_LABEL:
if value is None:
return False
return value.strip().split(" ")
if value_type == TsTagValuePattern.INTEGER_NUMBER:
try:
value = int(value)
except ValueError:
raise TsFileParseException(
f"Value '{value}' in line {self.line_number} could not be interpreted as int"
)
return value
def parse_body(self, line: str) -> None:
"""Parse a line of the `@data` content of a `.ts` file if `@timeStamps` is `False`.
Args:
line (str): The `@data` line to parse.
Returns:
None
"""
dimensions = line.split(":")
if not self.data:
if not self.header[TsTag.DIMENSIONS]:
warnings.warn("Meta information for '@dimensions' is missing. Inferring from data.",
UserWarning,
stacklevel=2)
self.dim = len(dimensions)
# last dimension is the target
if self.header[TsTag.CLASS_LABEL]:
self.dim -= 1
self.data = [[] for _ in range(self.dim)]
# Check dimensionality of the data of the current line
# All dimensions should be included for all series, even if they are empty
line_dim = len(dimensions)
if self.header[TsTag.CLASS_LABEL]:
line_dim -= 1
if line_dim != self.dim:
raise TsFileParseException(
f"Inconsistent number of dimensions. Expecting {self.dim} but got {line_dim} "
f"in line number {self.line_number}."
)
# Process the data for each dimension
for dim in range(0, self.dim):
dimension = dimensions[dim].strip()
if dimension:
dimension = dimension.replace("?", self.nan_replace_value)
data_series = dimension.split(",")
data_series = [float(i) for i in data_series]
dim_len = len(data_series)
if self.series_length < dim_len:
if not self.header[TsTag.EQUAL_LENGTH]:
self.series_length = dim_len
else:
raise TsFileParseException(
f"Series length was given as {self.series_length} but dimension {dim} in line "
f"{self.line_number} is of length {dim_len}"
)
self.data[dim].append(pd.Series(data_series))
else:
self.data[dim].append( | pd.Series(dtype="object") | pandas.Series |
import datetime
import apimoex
import pandas as pd
import requests
from tqdm import tqdm
def get_board_tickers(board={"board": "TQBR", "shares": "shares"}):
"""This function returns list with tickers available on a specific board.
:Input:
:board : dict like {'board': 'TQBR', 'shares': 'shares'},
:Output:
: tickers - list
"""
arguments = {"securities.columns": ("SECID," "REGNUMBER," "LOTSIZE," "SHORTNAME")}
brd = board.get("board")
shares = board.get("shares")
request_url = (
"https://iss.moex.com/iss/engines/stock/"
f"markets/{shares}/boards/{brd}/securities.json"
)
with requests.Session() as session:
iss = apimoex.ISSClient(session, request_url, arguments)
iis_data = iss.get()
board_df = pd.DataFrame(iis_data["securities"])
board_df.set_index("SECID", inplace=True)
return board_df.index.tolist()
def _download_moex(
tickers: list, start_date: datetime, end_date: datetime, boards: list
) -> pd.DataFrame:
data = pd.DataFrame()
arguments = {"securities.columns": ("SECID," "REGNUMBER," "LOTSIZE," "SHORTNAME")}
for board in boards:
board_tickers = []
brd = board.get("board")
shares = board.get("shares")
request_url = (
"https://iss.moex.com/iss/engines/stock/"
f"markets/{shares}/boards/{brd}/securities.json"
)
with requests.Session() as session:
iss = apimoex.ISSClient(session, request_url, arguments)
iis_data = iss.get()
board_df = pd.DataFrame(iis_data["securities"])
board_df.set_index("SECID", inplace=True)
columns = ["TRADEDATE", "WAPRICE", "CLOSE"]
stocks_prices = []
if len(tickers) == 0:
board_tickers = board_df.index.tolist()
else:
board_tickers = tickers
pbar = tqdm(total=len(board_df.index))
for stock in board_df.index:
if stock in board_tickers:
stock_data = apimoex.get_board_history(
session=session,
security=stock,
start=start_date,
end=end_date,
columns=columns,
market=shares,
board=brd,
)
stock_df = pd.DataFrame(stock_data)
stock_df["TRADEDATE"] = pd.to_datetime(stock_df["TRADEDATE"])
stock_df.set_index("TRADEDATE", inplace=True)
stock_df = pd.concat([stock_df], axis=1, keys=[stock]).swaplevel(
0, 1, 1
)
stocks_prices.append(stock_df)
pbar.update(1)
pbar.clear()
if len(stocks_prices) > 0:
data1 = pd.concat(stocks_prices, join="outer", axis=1)
if len(data.columns) == 0:
data = data1.copy(deep=True)
else:
data = | pd.concat([data, data1], join="outer", axis=1) | pandas.concat |
import pandas as pd
from xml.etree import ElementTree as etree
from pprint import pprint
from yattag import *
import pdb
#------------------------------------------------------------------------------------------------------------------------
class Line:
tierInfo = []
spokenTextID = ""
rootElement = None
tierElements = []
doc = None
lineNumber = None
# phoneme tokens and their gloss tokens are equal in number, often different in length,
# and displayed in horizontal alignment, each vertical pair in a horizontal space
# large enough to holder the longer of the two. for instance, from <NAME>
# how daylight was stolen, line 3, fourth word, needs 18 character spaces
# gʷә-s-čal
# uncertain-means-get
# this next member variable holds these values once calculated
wordSpacing = []
def __init__(self, doc, lineNumber):
self.doc = doc
self.lineNumber = lineNumber
self.rootElement = self.doc.findall("TIER/ANNOTATION/ALIGNABLE_ANNOTATION")[lineNumber]
self.allElements = findChildren(self.doc, self.rootElement)
# need tier info to guide traverse
self.tierInfo = [doc.attrib for doc in doc.findall("TIER")]
# [{'LINGUISTIC_TYPE_REF': 'default-lt', 'TIER_ID': 'AYA'},
# {'LINGUISTIC_TYPE_REF': 'phonemic', 'PARENT_REF': 'AYA', 'TIER_ID': 'AYA2'},
# {'LINGUISTIC_TYPE_REF': 'translation', 'PARENT_REF': 'AYA', 'TIER_ID': 'ENG'},
# {'LINGUISTIC_TYPE_REF': 'translation', 'PARENT_REF': 'AYA2', 'TIER_ID': 'GL'}]
self.tbl = buildTable(doc, self.allElements)
self.rootSpokenTextID = self.deduceSpokenTextID()
self.wordRepresentation = self.deduceWordRepresentation()
self.traverseAndClassify()
self.words = []
self.glosses = []
self.extractWords()
self.wordSpacing = []
if(len(self.words) > 0):
self.wordSpacing = [10 for x in range(len(self.words))]
#self.deduceStructure()
#self.wordSpacing = [];
#self.calculateSpacingOfWordsAndTheirGlosses()
def getImmediateChildrenOfRoot(self):
rootID = self.deduceSpokenTextID()
def getTierCount(self):
return(len(self.tierElements))
def getTable(self):
return(self.tbl)
def classifyTier(self, tierNumber):
assert(tierNumber < self.getTable().shape[0])
tierInfo = self.getTable().ix[tierNumber].to_dict()
tierType = tierInfo['LINGUISTIC_TYPE_REF']
hasTimes = tierInfo['START'] >= 0 and tierInfo['END'] >= 0
hasText = tierInfo['TEXT'] != ""
# pdb.set_trace()
# the root is the full spoken text, a single string, in practical orthography
# is this tier a direct child of root?
# 1) the full (untokenized) translation
# 2) words, in one of (so far) two representations:
# a) phonetic transcription: tab-delimited text, with a child tier of glosses
# b) a set of direct children, each with 1 or 2 child elements of their own
directRootChildElement = tierInfo["ANNOTATION_REF"] == self.rootSpokenTextID
hasChildren = any((self.tbl["ANNOTATION_REF"] == tierInfo["ANNOTATION_ID"]).tolist())
#pdb.set_trace()
if(hasTimes):
return("spokenText")
if(not hasText):
return("empty")
if(directRootChildElement and hasChildren):
return("nativeMorpheme")
if(not directRootChildElement and not hasChildren):
return("nativeGlossOrFreeTranslation")
return("freeTranslation")
# hasTokenizedText = False
# if(hasText):
# hasTokenizedText = tierInfo['TEXT'].find("\t") > 0
# if((hasTimes)):
# return("spokenText")
# if(tierType == "phonemic" and hasTokenizedText):
# return("tokenizedWords")
# if(tierType == "translation" and hasTokenizedText):
# return("tokenizedGlosses")
# if(tierType == "translation" and hasText and not hasTokenizedText):
# return("freeTranslation")
# return ("unrecognized")
#----------------------------------------------------------------------------------------------------
def deduceSpokenTextID(self):
return(self.tbl.loc[pd.isnull(self.tbl['ANNOTATION_REF'])]["ANNOTATION_ID"][0])
#----------------------------------------------------------------------------------------------------
def deduceWordRepresentation(self):
rootSpokenTextID = self.deduceSpokenTextID()
tbl_emptyLinesRemoved = self.tbl.query("TEXT != ''")
# do not wish to count children with empty text fields
numberOfDirectChildrenOfRoot = tbl_emptyLinesRemoved.ix[self.tbl["ANNOTATION_REF"] == rootSpokenTextID].shape[0]
# add test for present but empty word tier, as in monkey line 1
if(numberOfDirectChildrenOfRoot == 1):
return("noWords")
elif(numberOfDirectChildrenOfRoot == 2):
return("tokenizedWords")
elif(numberOfDirectChildrenOfRoot > 2):
return("wordsDistributedInElements")
else:
print("unrecognized word representation")
#----------------------------------------------------------------------------------------------------
def getWordRepresentation(self):
return(self.wordRepresentation)
#----------------------------------------------------------------------------------------------------
def traverseAndClassify(self):
"""
assumes rootSpokenTextID and wordRepresentation have been figured out
at present, this method only identifies and assigns the freeTranslationRow
"""
rootID = self.rootSpokenTextID
self.spokenTextRow = self.tbl.ix[self.tbl["ANNOTATION_ID"] == rootID].index[0]
tbl = self.tbl # allows more compact expressions
self.wordRows = None
#pdb.set_trace()
# "noWords" "tokenizedWords" "wordsDistributedInElements"
if(self.wordRepresentation == "noWords"):
self.freeTranslationRow = self.tbl.ix[self.tbl["ANNOTATION_REF"] == rootID].index[0]
elif(self.wordRepresentation == "tokenizedWords"):
self.freeTranslationRow = tbl[(tbl.HAS_TABS == False) & (tbl.ANNOTATION_REF == self.rootSpokenTextID)].index.tolist()[0]
wordRow = tbl[(tbl.ANNOTATION_REF == rootID) & tbl.HAS_TABS].index.tolist()[0]
wordRowID = tbl.ix[wordRow, 'ANNOTATION_ID']
glossRow = tbl[tbl.ANNOTATION_REF == wordRowID].index.tolist()[0]
self.glossRows = [glossRow]
self.wordRows = [wordRow]
elif(self.wordRepresentation == "wordsDistributedInElements"):
self.freeTranslationRow = tbl[(tbl.HAS_SPACES == True) & (tbl.ANNOTATION_REF == self.rootSpokenTextID)].index.tolist()[0]
#----------------------------------------------------------------------------------------------------
def calculateSpacingOfPhonemeAndGlossTokens(self):
#phonemesTier = line0.getTable().loc[tbl['LINGUISTIC_TYPE_REF'] == "phonemic"]['TEXT']
#phonemeGlossesTier = line0.getTable().loc[tbl['LINGUISTIC_TYPE_REF'] == "translation"]['TEXT']
# import pdb; pdb.set_trace();
phonemesTierText = self.getTable().ix[1]['TEXT']
phonemeGlossesTierText = self.getTable().ix[3]['TEXT']
phonemes = phonemesTierText.split("\t")
phonemeGlosses = phonemeGlossesTierText.split("\t")
print(phonemes)
print(phonemeGlosses)
assert(len(phonemes) == len(phonemeGlosses))
for i in range(len(phonemes)):
phonemeSize = len(phonemes[i])
glossSize = len(phonemeGlosses[i])
self.phonemeSpacing.append(max(phonemeSize, glossSize) + 1)
#----------------------------------------------------------------------------------------------------
def show(self):
pprint(vars(self))
#----------------------------------------------------------------------------------------------------
def getSpokenText(self):
return(self.tbl.ix[0, "TEXT"])
#----------------------------------------------------------------------------------------------------
def extractWords(self):
tokens = []
if(self.wordRepresentation == "tokenizedWords"):
self.words = self.tbl.ix[self.wordRows[0], "TEXT"].split("\t")
self.glosses = self.tbl.ix[self.glossRows[0], "TEXT"].split("\t")
if(self.wordRepresentation == "wordsDistributedInElements"):
tokens = ["not", "figured", "out", "yet"]
return(tokens)
#----------------------------------------------------------------------------------------------------
def spokenTextToHtml(self, htmlDoc):
spokenTextTier = self.spokenTextRow
tierObj = self.getTable().ix[spokenTextTier].to_dict()
speechText = tierObj['TEXT']
with htmlDoc.tag("div", klass="speech-tier"):
htmlDoc.text(speechText)
#----------------------------------------------------------------------------------------------------
def wordsToHtml(self, htmlDoc):
#tierNumber = self.wordRows[0]
#tierObj = self.getTable().ix[tierNumber].to_dict()
#phonemes = tierObj['TEXT'].split("\t")
#styleString = "grid-template-columns: %s;" % ''.join(["%dch " % len(p) for p in phonemes])
styleString = "grid-template-columns: %s;" % ''.join(["%dch " % p for p in self.wordSpacing])
with htmlDoc.tag("div", klass="phoneme-tier", style=styleString):
for word in self.words:
with htmlDoc.tag("div", klass="phoneme-cell"):
htmlDoc.text(word)
#----------------------------------------------------------------------------------------------------
def glossesToHtml(self, htmlDoc):
#tierObj = self.getTable().ix[tierNumber].to_dict()
#phonemeGlosses = tierObj['TEXT'].split("\t")
#styleString = "grid-template-columns: %s;" % ''.join(["%dch " % len(p) for p in phonemeGlosses])
styleString = "grid-template-columns: %s;" % ''.join(["%dch " % p for p in self.wordSpacing])
with htmlDoc.tag("div", klass="phoneme-tier", style=styleString):
for gloss in self.glosses:
with htmlDoc.tag("div", klass="phoneme-cell"):
htmlDoc.text(gloss)
#----------------------------------------------------------------------------------------------------
def freeTranslationToHtml(self, htmlDoc):
freeTranslationTier = self.freeTranslationRow
tierObj = self.getTable().ix[freeTranslationTier].to_dict()
speechText = tierObj['TEXT']
with htmlDoc.tag("div", klass="freeTranslation-tier"):
htmlDoc.text(speechText)
#----------------------------------------------------------------------------------------------------
def getHtmlHead(self):
playerLibrary = open("player2.js").read();
#s = "<head><script src='http://localhost:9999/player2.js'></script></head>"
s = "<head></head>"
return(s)
#----------------------------------------------------------------------------------------------------
def toHtml(self):
print("toHtml, nothing here yet")
#----------------------------------------------------------------------------------------------------
def tmp(self):
# should be exactly one alignable tier, so it is safe to get the first one found
alignableTierType_id = self.doc.find("LINGUISTIC_TYPE[@TIME_ALIGNABLE='true']").attrib["LINGUISTIC_TYPE_ID"]
pattern = "TIER[@LINGUISTIC_TYPE_REF='%s']" % alignableTierType_id
alignableTierId = self.doc.find(pattern).attrib["TIER_ID"]
# we could use alignableTierId to select the tier, then the ALIGNABLE_ANNOTATION[lineNumber]
# but the ALIGNABLE_ANNOTATION[lineNumber] tag accomplishes the same thing
alignableElement = self.doc.findall("TIER/ANNOTATION/ALIGNABLE_ANNOTATION")[lineNumber]
alignableElementId = alignableElement.attrib["ANNOTATION_ID"]
timeSlot1 = alignableElement.attrib["TIME_SLOT_REF1"]
timeSlot2 = alignableElement.attrib["TIME_SLOT_REF2"]
#------------------------------------------------------------------------------------------------------------------------
def findChildren(doc, rootElement):
elementsToDo = [rootElement]
elementsCompleted = []
while(len(elementsToDo) > 0):
currentElement = elementsToDo[0]
parentRef = currentElement.attrib["ANNOTATION_ID"]
pattern = "TIER/ANNOTATION/REF_ANNOTATION[@ANNOTATION_REF='%s']" % parentRef
childElements = doc.findall(pattern)
elementsToDo.remove(currentElement)
elementsCompleted.append(currentElement)
if(len(childElements) > 0):
elementsToDo.extend(childElements)
return(elementsCompleted)
#------------------------------------------------------------------------------------------------------------------------
def buildTable(doc, lineElements):
tbl_elements = | pd.DataFrame(e.attrib for e in lineElements) | pandas.DataFrame |
import pandas as pd
import os
import numpy as np
SUMMARY_RESULTS='summaryResults/'
NUM_BINS = 100
BITS_IN_BYTE = 8.0
MILLISEC_IN_SEC = 1000.0
M_IN_B = 1000000.0
VIDEO_LEN = 44
K_IN_M = 1000.0
K_IN_B=1000.0
REBUF_P = 4.3
SMOOTH_P = 1
POWER_RANGE= 648 #Difference between max and min avg power
BASE_POWER_XCOVER=1800.0
BASE_POWER_GALAXY=1016.0 #Power consumption without streaming
########################################
###### This part is added for network power estimation
p_alpha = 210
p_betha = 28
SEGMENT_SIZE = 4.0
power_threshold = 1500
byte_to_KB = 1000
KB_to_MB=1000.0
def Estimate_Network_Power_Consumption(thr, chunk_file_size):
return (chunk_file_size * (p_alpha*1/thr+p_betha))
########################################################
# QOE calculation for the entire session based on the standard model
def calculateQOE(total_vmaf, total_rebuf, total_rebuf_count, total_vmaf_change, total_smooth_count):
return 0.07713539*total_vmaf-1.24971639*total_rebuf -2.87757412*total_rebuf_count -0.04938335*total_vmaf_change -1.436473*total_smooth_count
# aggregates the result for each streaming session
def CreateSummaryResults(video, model):
log_dir='./test_results/'+video+'/'
energy_g_dir='../powerMeasurementFiles/'+'galaxy_'+video.upper()+'_streaming.csv'
energy_x_dir='../powerMeasurementFiles/'+'xcover_'+video.upper()+'_streaming.csv'
en_g= | pd.read_csv(energy_g_dir) | pandas.read_csv |
import numpy as np
import pandas as pd
import random
from rpy2.robjects.packages import importr
utils = importr('utils')
prodlim = importr('prodlim')
survival = importr('survival')
#KMsurv = importr('KMsurv')
#cvAUC = importr('pROC')
#utils.install_packages('pseudo')
#utils.install_packages('prodlim')
#utils.install_packages('survival')
#utils.install_packages('KMsurv')
#utils.install_packages('pROC')
import rpy2.robjects as robjects
from rpy2.robjects import r
def sim_event_times_case1(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
denom = np.exp( 1.7* digits+ .6*np.cos(digits)*clinical_data[:,0]+.2*clinical_data[:,1]+.3*clinical_data[:,0] )
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_samples) )/ denom )
censored_times = np.random.uniform(low=0,high=true_times)
censored_indicator = np.random.binomial(n=1,p=.3,size=digits.shape[0])
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
#split data
train_clindata_all = df.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_all['time'])
train_clindata_all = train_clindata_all.iloc[order_time,:]
test_clindata_all = df.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_all['time'])
event_r = robjects.BoolVector(train_clindata_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalenv["time_r"] = time_r
robjects.globalenv["event_r"] = event_r
robjects.globalenv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_all = train_clindata_all.assign(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_df = pd.melt(train_clindata_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_df.rename(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymap= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_df = long_df.applymap(lambda s : mymap.get(s) if s in mymap else s)
train_val_clindata = pd.get_dummies(long_df, columns=['time_point'])
test_clindata_all = test_clindata_all.assign( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_df = pd.melt(test_clindata_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_df.rename(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_all = pd.merge(left=long_test_df, right=test_clindata_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_all.columns.tolist()
long_test_clindata = long_test_clindata_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = pd.get_dummies(long_test_clindata, columns=['time_point'])
covariates = df[['ID'] + df.columns.tolist()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_all['time'], 'event_train': train_clindata_all['event'], 'slide_id_test': test_clindata_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case2(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
denom = np.exp( 1.7* digits+ .6*np.cos(digits)*clinical_data[:,0]+.2*clinical_data[:,1]+.3*clinical_data[:,0] )
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_samples) )/ denom )
denom = np.exp( 1.4*clinical_data[:,0]+2.6*clinical_data[:,1] -.2*clinical_data[:,2] )*6
censored_times = np.sqrt(-np.log(np.random.uniform(low=0,high=1,size=num_samples))/denom )
censored_indicator = (true_times > censored_times)*1
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
train_clindata_all = df.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_all['time'])
train_clindata_all = train_clindata_all.iloc[order_time,:]
test_clindata_all = df.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_all['time'])
event_r = robjects.BoolVector(train_clindata_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalenv["time_r"] = time_r
robjects.globalenv["event_r"] = event_r
robjects.globalenv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_all = train_clindata_all.assign(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_df = pd.melt(train_clindata_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_df.rename(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymap= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_df = long_df.applymap(lambda s : mymap.get(s) if s in mymap else s)
train_val_clindata = pd.get_dummies(long_df, columns=['time_point'])
test_clindata_all = test_clindata_all.assign( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_df = pd.melt(test_clindata_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_df.rename(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_all = pd.merge(left=long_test_df, right=test_clindata_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_all.columns.tolist()
long_test_clindata = long_test_clindata_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = pd.get_dummies(long_test_clindata, columns=['time_point'])
covariates = df[['ID'] + df.columns.tolist()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_all['time'], 'event_train': train_clindata_all['event'], 'slide_id_test': test_clindata_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case3(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
denom = np.exp( 1* digits- 1.6*np.cos(digits)*clinical_data[:,0]+.3*clinical_data[:,1]*clinical_data[:,0] )* (.7/2)
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_samples) )/ denom )
#denom = np.exp( 1.4*clinical_data[:,0]+2.6*clinical_data[:,1] -.2*clinical_data[:,2] )*6
shape_c = np.maximum(0.001,np.exp(-1.8*clinical_data[:,0]+1.4*clinical_data[:,1]+1.5 *clinical_data[:,0]*clinical_data[:,1]))
censored_times = np.random.gamma(shape_c,digits, num_samples)
censored_indicator = (true_times > censored_times)*1
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
train_clindata_all = df.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_all['time'])
train_clindata_all = train_clindata_all.iloc[order_time,:]
test_clindata_all = df.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_all['time'])
event_r = robjects.BoolVector(train_clindata_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalenv["time_r"] = time_r
robjects.globalenv["event_r"] = event_r
robjects.globalenv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_all = train_clindata_all.assign(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_df = pd.melt(train_clindata_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_df.rename(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymap= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_df = long_df.applymap(lambda s : mymap.get(s) if s in mymap else s)
train_val_clindata = pd.get_dummies(long_df, columns=['time_point'])
test_clindata_all = test_clindata_all.assign( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_df = pd.melt(test_clindata_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_df.rename(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_all = pd.merge(left=long_test_df, right=test_clindata_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_all.columns.tolist()
long_test_clindata = long_test_clindata_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = pd.get_dummies(long_test_clindata, columns=['time_point'])
covariates = df[['ID'] + df.columns.tolist()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_all['time'], 'event_train': train_clindata_all['event'], 'slide_id_test': test_clindata_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case4(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
shape = np.maximum(0.001,np.exp(.5*digits+.2*clinical_data[:,0] * np.cos(digits)+1.5*clinical_data[:,1]+1.2*clinical_data[:,0]))
true_times = np.random.gamma(shape,digits, num_samples) # shape = shape; scale = digits
censored_times = np.random.uniform(low=0,high=true_times)
censored_indicator = np.random.binomial(n=1,p=.3,size=digits.shape[0])
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
train_clindata_all = df.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_all['time'])
train_clindata_all = train_clindata_all.iloc[order_time,:]
test_clindata_all = df.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_all['time'])
event_r = robjects.BoolVector(train_clindata_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalenv["time_r"] = time_r
robjects.globalenv["event_r"] = event_r
robjects.globalenv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_all = train_clindata_all.assign(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_df = pd.melt(train_clindata_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_df.rename(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymap= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_df = long_df.applymap(lambda s : mymap.get(s) if s in mymap else s)
train_val_clindata = | pd.get_dummies(long_df, columns=['time_point']) | pandas.get_dummies |
from Bio import AlignIO
import pandas as pd
import os
import sys
# This script makes the file with allele ID similar to bionumerics output
script=sys.argv[0]
base_dir=sys.argv[1]+"/prod_fasta/"
allele_dir=base_dir+"../../all_alleles/"
os.chdir(allele_dir)
dic1={}
def Parse(filename,seqs):
file = open(filename)
seqs={}
name = ''
for line in file:
line = line.rstrip()
if line.startswith('>'):
name=line.replace('>',"")
seqs[name] = ''
else:
seqs[name] = seqs[name] + line
return seqs
for file in os.listdir("."):
if file.endswith(".fasta"):
dic1=dict(dic1,**Parse(file,dic1))
os.chdir(base_dir+'/seqrecords/pergene_seqrecords/muslce_output/')
dic3={}
for file in os.listdir("."):
if file.endswith("fasta2.fasta"):
corename=file.split("_")[0]
alignment=AlignIO.read(file,"fasta")
dic2={}
for record in alignment :
record.id1=record.id.split("_")[::-1][0]
dic2.setdefault(str(record.id1),[]).append(str(record.seq).replace("-",""))
for key1 in dic1:
for key2 in dic2:
if dic1.get(key1) in dic2.get(key2):
dic3.setdefault(str(key2),[]).append(str(key1))
file= open(base_dir+"../../new_49gene.list")
v=[]
for line in file:
line=line.rstrip()
for k,v in dic3.items():
if line not in str(v):
v=v.append(line+"_"+"0")
dic4={}
for k,v in dic3.items():
v=sorted(v,key=lambda x:x.split("_")[0])
dic4[k]=v
df=pd.DataFrame.from_dict(dic4,orient='index')
df.columns=df.iloc[0].apply(lambda x: x.split('_')[0])
df.to_csv("newTtable1_nogap.csv")
dfnew= | pd.read_csv("newTtable1_nogap.csv",index_col=0) | pandas.read_csv |
from datetime import datetime, timedelta
import sys
import fnmatch
import os
import numpy as np
from scipy import io as sio
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import zarr
from numcodecs import Blosc
from mat_files import masc_mat_file_to_dict,masc_mat_triplet_to_dict,triplet_images_reshape
from mat_files import digits_dictionary
from weather_data import blowingsnow
sys.path.insert(1,'/home/grazioli/CODES/python/py-masc-gan-3Deval')
from gan3d_lib import gan3d
def files_in_dir_recursive(top, pattern="*", include_dir=True):
for (root, dirs, files) in os.walk(top):
match_files = (fn for fn in files if
fnmatch.fnmatchcase(fn, pattern))
if include_dir:
match_files = (os.path.join(root,fn) for fn in match_files)
for fn in match_files:
yield fn
def find_matched(data_dir, min_files=3):
files = {}
for fn_full in files_in_dir_recursive(
data_dir, pattern="*_flake_*_cam_?.mat"):
fn = fn_full.split("/")[-1]
fn = ".".join(fn.split(".")[:-1])
fn_parts = fn.split("_")
cam = int(fn_parts[-1])
flake_id = int(fn_parts[-3])
timestamp = "_".join(fn.split("_")[:2])
time = datetime.strptime(timestamp, "%Y.%m.%d_%H.%M.%S")
key = (time,flake_id)
if key not in files:
files[key] = {}
files[key][cam] = fn_full
print(len(files))
files = {k: files[k] for k in files if len(files[k])>=min_files}
print(len(files))
delete_keys = []
for (i,k) in enumerate(files):
if i%1000==0:
print("{}/{}, {} deleted".format(i,len(files),len(delete_keys)))
if any(not valid_file(files[k][c]) for c in files[k]):
delete_keys.append(k)
for k in delete_keys:
del files[k]
print(len(files))
return files
def valid_file(fn, xhi_min=7,
max_intens_min=0.03,
min_size=8,
max_size=2048):
m = sio.loadmat(fn)
xhi = m["roi"]["xhi"][0,0][0,0]
if xhi < xhi_min:
return False
max_intens = m["roi"]["max_intens"][0,0][0,0]
if max_intens < max_intens_min:
return False
shape = m["roi"]["data"][0,0].shape
size = np.max(shape)
if not (min_size <= size <= max_size):
return False
# Check if any nan in riming
if np.isnan(m["roi"][0,0]['riming_probs'][0]).any():
return False
return True
def valid_triplet(triplet_files,
min_size=12,
max_ysize_var=1.4,
max_ysize_delta=60, # Pixels
xhi_low=8,
xhi_high=8.5):
mat = [sio.loadmat(triplet_files[i]) for i in range(3)]
def get_size(m):
shape = m["roi"]["data"][0,0].shape
return shape[0]
def get_xhi(m):
return m["roi"]["xhi"][0,0][0,0]
sizes = [get_size(m) for m in mat]
largest = max(sizes)
smallest = min(sizes)
xhis = [get_xhi(m) for m in mat]
xhi_min = min(xhis)
xhi_max = max(xhis)
return (largest>=min_size) and (largest-smallest <= max_ysize_delta) and (largest/smallest<=max_ysize_var) and ((xhi_min >= xhi_low) or (xhi_max >= xhi_high))
def filter_triplets(files):
return {k: files[k] for k in files if valid_triplet(files[k])}
def filter_condensation(df0,
threshold_var=2.5,
Dmax_min=0.001):
"""
Filter condensation based on excessive stability of adjacent descriptors
Input:
df0: pandas dataframe of one camera
threshold_var : minimum allowed variation [%] of some descriptors
Dmax_min : minumum Dmax to consider for this filter
"""
df0_s = df0.copy() #Save
ind=[]
df0 = df0[['area','perim','Dmax','roi_centroid_X','roi_centroid_Y']]
# Shift forward
diff1 = 100*np.abs(df0.diff()/df0)
diff1['mean'] = diff1.mean(axis=1)
diff1 = diff1[['mean']]
# Shift backward
diff2 = 100*np.abs(df0.diff(periods=-1)/df0)
diff2['mean'] = diff2.mean(axis=1)
diff2 = diff2[['mean']]
df0_1= df0_s[diff1['mean'] < threshold_var]
df0_1 = df0_1[df0_1.Dmax > Dmax_min]
ind.extend(np.asarray(df0_1.index))
df0_2= df0_s[diff2['mean'] < threshold_var]
df0_2 = df0_2[df0_2.Dmax > Dmax_min]
ind.extend(np.asarray(df0_2.index))
return ind
def create_triplet_dataframes(triplet_files, out_dir,campaign_name='EPFL'):
"""
Put in a dataframe the descriptors of the images for each cam
"""
c0=[]
c1=[]
c2=[]
tri=[]
for (i,k) in enumerate(sorted(triplet_files.keys())):
if i%10000 == 0:
print("{}/{}".format(i,len(triplet_files)))
triplet = triplet_files[k]
# Create and increment the data frames
c0.append(masc_mat_file_to_dict(triplet[0]))
c1.append(masc_mat_file_to_dict(triplet[1]))
c2.append(masc_mat_file_to_dict(triplet[2]))
tri.append(masc_mat_triplet_to_dict(triplet,campaign=campaign_name))
c0 = pd.DataFrame.from_dict(c0)
c1 = pd.DataFrame.from_dict(c1)
c2 = pd.DataFrame.from_dict(c2)
tri = pd.DataFrame.from_dict(tri)
# Filter possible contaminations from condensation
print("Filtering possible condensation")
ind=[]
ind.extend(filter_condensation(c0))
ind.extend(filter_condensation(c1))
ind.extend(filter_condensation(c2))
bad_indexes = list(set(ind))
bad_indexes.sort()
c0.drop(bad_indexes,inplace=True)
c0 = c0.reset_index()
c1.drop(bad_indexes,inplace=True)
c1 = c1.reset_index()
c2.drop(bad_indexes,inplace=True)
c2 = c2.reset_index()
tri.drop(bad_indexes,inplace=True)
tri = tri.reset_index()
print("Removed flakes total: "+str(len(bad_indexes)))
# Write tables
table = pa.Table.from_pandas(c0)
pq.write_table(table, out_dir+campaign_name+'_cam0.parquet')
table = pa.Table.from_pandas(c1)
pq.write_table(table, out_dir+campaign_name+'_cam1.parquet')
table = pa.Table.from_pandas(c2)
pq.write_table(table, out_dir+campaign_name+'_cam2.parquet')
table = pa.Table.from_pandas(tri)
pq.write_table(table, out_dir+campaign_name+'_triplet.parquet')
# Update the triplet files removing the filtered files
sorted_keys = sorted(triplet_files)
remove_keys= [sorted_keys[bad_indexes[jj]] for jj in range(len(bad_indexes))]
return {k: triplet_files[k] for k in triplet_files if k not in remove_keys }
def create_triplet_image_array(triplet_files, out_dir,campaign_name='EPFL',dim_in=1024,chunks_n=16):
"""
Create an image array of (resized) triplet. Store on disk for each campaign
"""
# Define the output array (data flushed directly)
compressor=Blosc(cname='zstd', clevel=2, shuffle=Blosc.BITSHUFFLE)
z1 = zarr.open(out_dir+campaign_name+'.zarr', mode='w',
shape = [len(triplet_files),dim_in,dim_in,3],compressor=compressor,
dtype='u1',chunks=[chunks_n,dim_in,dim_in,3] ) # Size N files, 1024, 1024, 3
for (i,k) in enumerate(sorted(triplet_files.keys())):
if i%10000 == 0:
print("{}/{}".format(i,len(triplet_files)))
triplet = triplet_files[k]
z1[i,:,:,:] = triplet_images_reshape(triplet,newshape=[dim_in,dim_in])
def add_gan3d_to_parquet(triplet_parquet,gan3d_folder):
"""
Add GAN3D mass and volume to triplet files
"""
ganfile = gan3d_folder+'masc_3D_print_grids.nc'
mascfile = gan3d_folder+'masc_3D_print_triplets.nc'
# Get the gan3d files
g3d = gan3d(ganfile=ganfile,mascfile=mascfile)
# Read the parquet file
table = pd.read_parquet(triplet_parquet)
flake_uid = table.datetime.apply(lambda x: x.strftime('%Y%m%d%H%M%S'))+'_'+table.flake_number_tmp.apply(str)
# Get GAN time in proper format and fill the precooked vector
mass = np.asarray(table['gan3d_mass'])
vol = np.asarray(table['gan3d_volume'])
r_g = np.asarray(table['gan3d_gyration'])
for i in range(len(g3d.time)):
tt = timestamp=datetime.utcfromtimestamp(g3d.time[i]).strftime("%Y%m%d%H%M%S")+'_'+str(g3d.particle_id[i])
try:
mass[(np.where(flake_uid == tt))]=g3d.mass_1d[i]
vol[(np.where(flake_uid == tt))]=g3d.V_ch[i]
r_g[(np.where(flake_uid == tt))]=g3d.r_g[i]
except:
print("Flake id: "+tt+" not in the database")
table['gan3d_mass'] = mass
table['gan3d_volume'] = vol
table['gan3d_gyration'] = r_g
# Store table and overwrite
table=table.round(decimals=digits_dictionary())
table = pa.Table.from_pandas(table)
pq.write_table(table, triplet_parquet)
def add_bs_to_parquet(triplet_parquet,file_bs,verbose=False):
"""
Add Blowing Snow information to triplet files
Input:
triplet_parquet: parquet file with info about MASC triplets
file_bs : CSV file of blowing snow
"""
# Read the parquet file
table = pd.read_parquet(triplet_parquet)
flake_uid = table.datetime.apply(lambda x: x.strftime('%Y%m%d%H%M%S'))+'_'+table.flake_number_tmp.apply(str)
# Read the blowingsnow file
bs = blowingsnow(file_bs)
# Fill the precooked vector
bs_nor_angle = np.asarray(table['bs_normalized_angle'])
bs_mix_ind = np.asarray(table['bs_mixing_ind'])
bs_precip_type = table['bs_precip_class_name'].copy()
# Intersect arrays
ind1=np.intersect1d(flake_uid,bs.flake_uid,return_indices=True)[1]
ind2=np.intersect1d(flake_uid,bs.flake_uid,return_indices=True)[2]
# Fill
bs_nor_angle[ind1] = bs.df["Normalized_Angle"][ind2]
bs_mix_ind[ind1] = bs.df["Flag_mixed"][ind2]
# Add the class ID
bs_class_id = np.asarray([0] * len(table))
# Fill also a precooked flag
bs_precip_type[bs_nor_angle > 0.881]='blowing_snow'
bs_class_id[bs_nor_angle > 0.881] = 3
bs_precip_type[bs_nor_angle < 0.193]='precip'
bs_class_id[bs_nor_angle < 0.193] = 1
bs_precip_type[bs_mix_ind >= 0.0]='mixed'
bs_class_id[bs_mix_ind >= 0.0] = 2
table['bs_normalized_angle'] = bs_nor_angle
table['bs_mixing_ind'] = bs_mix_ind
table['bs_precip_class_name'] = bs_precip_type
table['bs_precip_class_id'] = bs_class_id
# Store table and overwrite
table=table.round(decimals=digits_dictionary())
table = pa.Table.from_pandas(table)
pq.write_table(table, triplet_parquet)
def add_weather_to_parquet(triplet_parquet,file_weather, verbose=False):
""""
Add weather data (from pre-compiled minute-scaled pickle) to the triplet file
"""
# Read the parquet file and get the time string
table = pd.read_parquet(triplet_parquet)
flake_uid = table.datetime.round('min') # Round to minute as weather info is in minute
# Read the blowingsnow file
env = pd.read_pickle(file_weather)
# Fill the precooked vectors of environmental info
T = np.asarray(table['env_T'])
P = np.asarray(table['env_P'])
DD = np.asarray(table['env_DD'])
FF = np.asarray(table['env_FF'])
RH = np.asarray(table['env_RH'])
# Ugly unefficent loop
for i in range(len(flake_uid)):
ID = flake_uid[i]
# Find the closest emvironmental info
try:
index = env.index.searchsorted(ID)
vec = env.iloc[index]
T[i] = vec["T"]
P[i] = vec["P"]
DD[i] = vec["DD"]
FF[i] = vec["FF"]
RH[i] = vec["RH"]
except:
if verbose:
print("Cannot find environemntal information for this datetime: ")
print(ID)
table['env_T'] = T
table['env_P'] = P
table['env_DD'] = DD
table['env_FF'] = FF
table['env_RH'] = RH
# Store table and overwrite
table=table.round(decimals=digits_dictionary())
table = pa.Table.from_pandas(table)
pq.write_table(table, triplet_parquet)
def merge_triplet_dataframes(path,
campaigns,
out_path,
out_name='all'):
"""
Merge triplet dataframes into a single one.
Input:
path : input path
campaigns: list of campaign names (parquet must exist)
out_path: out_path
out_name: string used in the output name
"""
# Dataframes
databases=['cam0','cam1','cam2','triplet']
for db in databases:
print('Merging database: '+db)
# Read the parquet files
for i in range(len(campaigns)):
print('Merging campaign: '+campaigns[i])
if i == 0:
df = pd.read_parquet(path+campaigns[i]+'_'+db+'.parquet')
else:
df = pd.concat([df, pd.read_parquet(path+campaigns[i]+'_'+db+'.parquet')], axis=0).reset_index(drop=True)
# Write to file ---------------------
print('Writing output')
if db == 'triplet':
df = df.rename(columns={'n_roi':'flake_n_roi'})
df=df.drop(columns="index")
df=df.round(decimals=digits_dictionary())
table = pa.Table.from_pandas(df)
pq.write_table(table, out_path+out_name+'_'+db+'.parquet')
def add_trainingset_flag(cam_parquet,
trainingset_pkl_path,
cam=None):
"""
Add to a single-cam parquet the information flags (adding columns)
indicating if a given cam view was used in a training set for
melting, hydro classif or riming degree
Input
cam_parquet: parquet file to add the columns to
trainingset_pkl_path: path where the pickles of the trainingset flags are locally stored
cam = 'cam0', 'cam1' or 'cam2'
"""
print('CAM: '+cam)
# Read the parquet file
table = pd.read_parquet(cam_parquet)
flake_uid = table.datetime.apply(lambda x: x.strftime('%Y.%m.%d_%H.%M.%S'))+'_flake_'+table.flake_number_tmp.apply(str)
# 1 Add hydro columns
add = pd.read_pickle(trainingset_pkl_path+'hydro_trainingset_'+cam+'.pkl')
is_in = np.asarray([0] * len(table))
value_in = np.asarray([np.nan] * len(table))
# Intersect
intersect = np.intersect1d(flake_uid,add.flake_id,return_indices=True)
ind1=intersect[1]
ind2=intersect[2]
# Fill
is_in[ind1] = 1
value_in[ind1] = add.class_id.iloc[ind2]
table['hl_snowflake'] = is_in
table['hl_snowflake_class_id'] = value_in
print('Found: '+str(len(ind1))+' in training, for hydro' )
# 2 Add melting columns
add = | pd.read_pickle(trainingset_pkl_path+'melting_trainingset_'+cam+'.pkl') | pandas.read_pickle |
"""
Tests the coalescence tree object.
"""
import os
import random
import shutil
import sqlite3
import sys
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from setup_tests import setUpAll, tearDownAll, skipLongTest
from pycoalescence import Simulation
from pycoalescence.coalescence_tree import CoalescenceTree, get_parameter_description
from pycoalescence.sqlite_connection import check_sql_table_exist
def setUpModule():
"""
Creates the output directory and moves logging files
"""
setUpAll()
t = CoalescenceTree("sample/sample.db")
t.clear_calculations()
def tearDownModule():
"""
Removes the output directory
"""
tearDownAll()
class TestNullSimulationErrors(unittest.TestCase):
"""
Tests that simulations that are not linked raise the correct error.
"""
def testRaisesError(self):
"""
Tests that a null simulation will raise an error when any operation is performed.
"""
t = CoalescenceTree()
with self.assertRaises(RuntimeError):
t.get_species_richness()
with self.assertRaises(RuntimeError):
t.calculate_fragment_richness()
with self.assertRaises(RuntimeError):
t.calculate_alpha_diversity()
with self.assertRaises(RuntimeError):
t.calculate_beta_diversity()
with self.assertRaises(RuntimeError):
t.calculate_fragment_abundances()
with self.assertRaises(RuntimeError):
t.calculate_fragment_octaves()
with self.assertRaises(RuntimeError):
t.calculate_octaves()
with self.assertRaises(RuntimeError):
t.get_fragment_list()
with self.assertRaises(RuntimeError):
t.get_alpha_diversity()
with self.assertRaises(RuntimeError):
t.get_beta_diversity()
with self.assertRaises(RuntimeError):
t.get_community_references()
with self.assertRaises(RuntimeError):
t.get_metacommunity_references()
with self.assertRaises(RuntimeError):
t.get_species_locations()
with self.assertRaises(RuntimeError):
t.get_species_abundances()
with self.assertRaises(RuntimeError):
t.get_species_list()
with self.assertRaises(RuntimeError):
_ = t.get_simulation_parameters()
with self.assertRaises(RuntimeError):
t.get_fragment_abundances("null", 1)
with self.assertRaises(RuntimeError):
t.get_species_richness()
with self.assertRaises(RuntimeError):
t.get_octaves(1)
class TestParameterDescriptions(unittest.TestCase):
"""
Tests that program correctly reads from the parameter_descriptions.json dictionary.
"""
def testReadsCorrectly(self):
"""
Tests that the dictionary is read correctly.
"""
tmp_dict = {
"habitat_change_rate": "the rate of change from present density maps to historic density maps",
"sample_file": "the sample area map for spatially selective sampling. Can be null to sample all " "cells",
"sample_x": "the sample map x dimension",
"sample_y": "the sample map y dimension",
"sample_x_offset": "the sample x map offset from the grid",
"sample_y_offset": "the sample y map offset from the grid",
"output_dir": "the output directory for the simulation database",
"seed": "the random seed to start the simulation, for repeatability",
"coarse_map_x": "the coarse density map x dimension",
"fine_map_file": "the density map file location at the finer resolution, covering a smaller area",
"tau": "the tau dispersal value for fat-tailed dispersal",
"grid_y": "the simulated grid y dimension",
"dispersal_relative_cost": "the relative rate of moving through non-habitat compared to habitat",
"fine_map_y_offset": "the number of cells the fine map is offset from the sample map in the y "
"dimension, at the fine resolution",
"gen_since_historical": "the number of generations that occur before the historical, or historic,"
" state is reached",
"dispersal_method": "the dispersal method used. Can be one of 'normal', 'norm-uniform' or " "'fat-tail'.",
"historical_fine_map": "the historical, or historic, coarse density map file location",
"coarse_map_scale": "the scale of the coarse density map compared to the fine density map. 1 "
"means equal density",
"grid_x": "the simulated grid x dimension",
"coarse_map_file": "the density map file location at the coarser resolution, covering a larger " "area",
"min_num_species": "the minimum number of species known to exist (currently has no effect)",
"historical_coarse_map": "the historical, or historic, coarse density map file location",
"m_probability": "the probability of choosing from the uniform dispersal kernel in normal-uniform"
" dispersal",
"sigma": "the sigma dispersal value for normal, fat-tailed and normal-uniform dispersals",
"deme": "the number of individuals inhabiting a cell at a map density of 1",
"time_config_file": "will be 'set' if temporal sampling is used, 'null' otherwise",
"coarse_map_y": "the coarse density map y dimension",
"fine_map_x": "the fine density map x dimension",
"coarse_map_y_offset": "the number of cells the coarse map is offset from the fine map in the y "
"dimension, at the fine resolution",
"cutoff": "the maximal dispersal distance possible, for normal-uniform dispersal",
"fine_map_y": "the fine density map y dimension",
"sample_size": "the proportion of individuals to sample from each cell (0-1)",
"fine_map_x_offset": "the number of cells the fine map is offset from the sample map in the x "
"dimension, at the fine resolution",
"speciation_rate": "the minimum speciation rate the simulation was run with",
"task": "the job or task reference number given to this simulation",
"coarse_map_x_offset": "the number of cells the coarse map is offset from the fine map in the x "
"dimension, at the fine resolution",
"landscape_type": "if false, landscapes have hard boundaries. Otherwise, can be infinite, "
"with 1s everywhere, or tiled_coarse or tiled_fine for repeated units of tiled "
"maps",
"max_time": "the maximum simulation time to run for (in seconds)",
"sim_complete": "set to true upon simulation completion, false for incomplete simulations",
"protracted": "if true, the simulation was run with protracted speciation.",
"min_speciation_gen": "the minimum number of generations required before speciation can occur",
"max_speciation_gen": "the maximum number of generations a lineage can exist before it is " "speciated",
"dispersal_map": "a tif file where rows represent cumulative dispersal probability to every other "
"cell, using the row number = x + (y * x_max)",
}
t = CoalescenceTree("sample/sample.db")
sim_output = t.get_simulation_parameters()
for key in sim_output.keys():
self.assertIn(key, get_parameter_description().keys())
self.assertEqual(get_parameter_description(key), t.get_parameter_description(key))
for key in get_parameter_description().keys():
self.assertIn(key, sim_output.keys())
for key in tmp_dict.keys():
self.assertEqual(tmp_dict[key], get_parameter_description(key))
self.assertDictEqual(tmp_dict, get_parameter_description())
with self.assertRaises(KeyError):
get_parameter_description(key="notakey")
dispersal_parameters = t.dispersal_parameters()
expected_disp_dict = {
"dispersal_method": "normal",
"sigma": 3.55,
"tau": 0.470149,
"m_probability": 0,
"cutoff": 0,
}
for key in dispersal_parameters.keys():
self.assertIn(key, tmp_dict.keys())
self.assertIn(key, expected_disp_dict.keys())
for key, val in expected_disp_dict.items():
self.assertIn(key, dispersal_parameters.keys())
if isinstance(val, float):
self.assertAlmostEqual(val, dispersal_parameters[key])
else:
self.assertEqual(val, dispersal_parameters[key])
class TestCoalescenceTreeSettingSpeciationParameters(unittest.TestCase):
"""Tests that the correct errors are raised when speciation parameters are supplied incorrectly."""
@classmethod
def setUpClass(cls):
"""Generates the temporary databases to attempt analysis on."""
src = [os.path.join("sample", "sample{}.db".format(x)) for x in [2, 3]]
cls.dst = [os.path.join("output", "sample{}.db".format(x)) for x in [2, 3]]
for tmp_src, tmp_dst in zip(src, cls.dst):
if os.path.exists(tmp_dst):
os.remove(tmp_dst)
shutil.copy(tmp_src, tmp_dst)
def testSetSpeciationRates(self):
"""Tests setting speciation rates works as intended and raises appropriate errors"""
ct = CoalescenceTree(self.dst[0])
for attempt in ["a string", ["a", "string"], [["list", "list2"], 0.2, 0.1], [None]]:
with self.assertRaises(TypeError):
ct._set_speciation_rates(attempt)
with self.assertRaises(RuntimeError):
ct._set_speciation_rates(None)
for attempt in [-10, -2.0, 1.1, 100, [-1, 0.1, 0.2], [0.2, 0.8, 1.1]]:
with self.assertRaises(ValueError):
ct._set_speciation_rates(attempt)
expected_list = [0.1, 0.2, 0.3]
ct._set_speciation_rates(expected_list)
self.assertEqual(expected_list, ct.applied_speciation_rates_list)
ct._set_speciation_rates(0.2)
self.assertEqual([0.2], ct.applied_speciation_rates_list)
def testSetRecordFragments(self):
"""Tests that setting the record_fragments flag works as expected."""
ct = CoalescenceTree(self.dst[0])
ct._set_record_fragments(True)
self.assertEqual("null", ct.record_fragments)
ct._set_record_fragments(False)
self.assertEqual("F", ct.record_fragments)
for each in ["PlotBiodiversityMetrics.db", "doesntexist.csv"]:
config_path = os.path.join("sample", each)
with self.assertRaises(IOError):
ct._set_record_fragments(config_path)
expected = os.path.join("sample", "FragmentsTest.csv")
ct._set_record_fragments(expected)
self.assertEqual(expected, ct.record_fragments)
def testSetRecordSpatial(self):
"""Tests that the setting the record_spatial flag works as expected"""
ct = CoalescenceTree(self.dst[0])
ct._set_record_spatial("T")
self.assertTrue(ct.record_spatial)
ct._set_record_spatial("F")
self.assertFalse(ct.record_spatial)
with self.assertRaises(TypeError):
ct._set_record_spatial("nota bool")
ct._set_record_spatial(True)
self.assertTrue(ct.record_spatial)
def testSetMetacommunityParameters(self):
"""Tests that setting the metacommunity parameters works as expected."""
ct = CoalescenceTree(self.dst[0])
for size, spec in [[-10, 0.1], [10, -0.1], [10, 1.1]]:
with self.assertRaises(ValueError):
ct.fragments = "F"
ct._set_record_fragments(False)
ct._set_record_spatial(False)
ct.times = [0.0]
ct._set_metacommunity_parameters(size, spec)
ct._set_metacommunity_parameters()
self.assertEqual(0.0, ct.metacommunity_size)
self.assertEqual(0.0, ct.metacommunity_speciation_rate)
ct._set_metacommunity_parameters(10, 0.1, "simulated")
self.assertEqual(10, ct.metacommunity_size)
self.assertEqual(0.1, ct.metacommunity_speciation_rate)
def testSetProtractedParameters(self):
"""Tests that setting the protracted parameters works as expected."""
ct = CoalescenceTree(self.dst[0])
with self.assertRaises(ValueError):
ct._set_protracted_parameters(0.1, 100)
ct = CoalescenceTree(self.dst[1])
ct._set_protracted_parameters(10, 100)
self.assertEqual((10.0, 100.0), ct.protracted_parameters[0])
ct.protracted_parameters = []
for min_proc, max_proc in [[200, 5000], [80, 50], [200, 11000]]:
with self.assertRaises(ValueError):
ct._check_protracted_parameters(min_proc, max_proc)
with self.assertRaises(ValueError):
ct._set_protracted_parameters(min_proc, max_proc)
with self.assertRaises(ValueError):
ct.add_protracted_parameters(min_proc, max_proc)
ct._set_protracted_parameters(50, 5000)
self.assertEqual((50.0, 5000.0), ct.protracted_parameters[0])
ct.protracted_parameters = []
ct._set_protracted_parameters()
self.assertEqual((0.0, 0.0), ct.protracted_parameters[0])
def testSetSampleFile(self):
"""Tests that the sample file is correctly set."""
ct = CoalescenceTree(self.dst[0])
for file in ["notafile.tif", os.path.join("sample", "sample.db")]:
with self.assertRaises(IOError):
ct._set_sample_file(file)
ct._set_sample_file()
self.assertEqual("null", ct.sample_file)
expected_file = os.path.join("sample", "SA_sample_coarse.tif")
ct._set_sample_file(expected_file)
self.assertEqual(expected_file, ct.sample_file)
def testSetTimes(self):
"""Tests that times are correctly set."""
ct = CoalescenceTree(self.dst[0])
ct._set_times(None)
self.assertEqual(0.0, ct.times[0])
with self.assertRaises(TypeError):
ct.add_times(0.5)
with self.assertRaises(TypeError):
ct.add_times([0.2, 0.5, "string"])
ct.times = None
ct.add_times([0.2, 0.5, 10])
self.assertEqual([0.0, 0.2, 0.5, 10.0], ct.times)
ct.times = None
ct._set_times(0.2)
self.assertEqual([0.0, 0.2], ct.times)
ct.times = None
ct._set_times([0.1, 0.5, 10.0])
self.assertEqual([0.0, 0.1, 0.5, 10.0], ct.times)
class TestCoalescenceTreeParameters(unittest.TestCase):
"""Tests that parameters are correctly obtained from the databases and the relevant errors are raised."""
def testCommunityParameters1(self):
"""Tests the community parameters make sense in a very simple community."""
shutil.copyfile(os.path.join("sample", "sample3.db"), os.path.join("output", "temp_sample3.db"))
t = CoalescenceTree(os.path.join("output", "temp_sample3.db"), logging_level=50)
self.assertEqual([], t.get_metacommunity_references())
self.assertEqual([1], t.get_community_references())
params = t.get_community_parameters(1)
expected_dict = {
"speciation_rate": 0.001,
"time": 0.0,
"fragments": 0,
"metacommunity_reference": 0,
"min_speciation_gen": 100.0,
"max_speciation_gen": 10000.0,
}
self.assertEqual(expected_dict, params)
with self.assertRaises(sqlite3.Error):
t.get_metacommunity_parameters(1)
with self.assertRaises(KeyError):
t.get_community_parameters(2)
with self.assertRaises(KeyError):
t.get_community_reference(0.1, 0.0, 0, 0, 0.0, min_speciation_gen=100.0, max_speciation_gen=10000.0)
with self.assertRaises(KeyError):
_ = t.get_community_reference(speciation_rate=0.001, time=0.0, fragments=False)
ref = t.get_community_reference(
speciation_rate=0.001, time=0.0, fragments=False, min_speciation_gen=100.0, max_speciation_gen=10000.0
)
self.assertEqual(1, ref)
self.assertEqual(expected_dict, t.get_community_parameters(ref))
t.wipe_data()
with self.assertRaises(IOError):
t.get_community_parameters_pd()
def testCommunityParameters2(self):
"""Tests the community parameters make sense in a very simple community."""
t = CoalescenceTree(os.path.join("sample", "sample4.db"))
self.assertEqual([1, 2, 3, 4, 5], t.get_community_references())
expected_params1 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 0}
expected_params2 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 1}
expected_params3 = {"speciation_rate": 0.2, "time": 0.0, "fragments": 0, "metacommunity_reference": 1}
expected_params4 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 2}
expected_params5 = {"speciation_rate": 0.2, "time": 0.0, "fragments": 0, "metacommunity_reference": 2}
expected_meta_params1 = {
"speciation_rate": 0.001,
"metacommunity_size": 10000.0,
"option": "simulated",
"external_reference": 0,
}
expected_meta_params2 = {
"speciation_rate": 0.001,
"metacommunity_size": 10000.0,
"option": "analytical",
"external_reference": 0,
}
params1 = t.get_community_parameters(1)
params2 = t.get_community_parameters(2)
params3 = t.get_community_parameters(3)
params4 = t.get_community_parameters(4)
params5 = t.get_community_parameters(5)
params6 = t.get_metacommunity_parameters(1)
params7 = t.get_metacommunity_parameters(2)
self.assertEqual([1, 2], t.get_metacommunity_references())
self.assertEqual(expected_params1, params1)
self.assertEqual(expected_params2, params2)
self.assertEqual(expected_params3, params3)
self.assertEqual(expected_params4, params4)
self.assertEqual(expected_params5, params5)
self.assertEqual(expected_meta_params1, params6)
self.assertEqual(expected_meta_params2, params7)
with self.assertRaises(KeyError):
t.get_community_parameters(6)
with self.assertRaises(KeyError):
t.get_metacommunity_parameters(3)
ref1 = t.get_community_reference(speciation_rate=0.1, time=0.0, fragments=False)
with self.assertRaises(KeyError):
t.get_community_reference(
speciation_rate=0.1, time=0.0, fragments=False, min_speciation_gen=0.1, max_speciation_gen=10000.0
)
ref2 = t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="simulated",
)
with self.assertRaises(KeyError):
t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.01,
metacommunity_option="simulated",
)
ref3 = t.get_community_reference(
speciation_rate=0.2,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="simulated",
)
ref4 = t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="analytical",
)
ref5 = t.get_community_reference(
speciation_rate=0.2,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="analytical",
)
self.assertEqual(1, ref1)
self.assertEqual(2, ref2)
self.assertEqual(3, ref3)
self.assertEqual(4, ref4)
self.assertEqual(5, ref5)
expected_community_params_list = []
for reference in t.get_community_references():
params = t.get_community_parameters(reference)
params["reference"] = reference
expected_community_params_list.append(params)
expected_community_params = pd.DataFrame(expected_community_params_list)
actual_output = t.get_community_parameters_pd()
assert_frame_equal(expected_community_params, actual_output, check_like=True)
def testIsComplete(self):
"""Tests sims are correctly identified as complete."""
t = CoalescenceTree(os.path.join("sample", "sample4.db"))
self.assertTrue(t.is_complete)
class TestCoalescenceTreeAnalysis(unittest.TestCase):
"""Tests analysis is performed correctly"""
@classmethod
def setUpClass(cls):
"""Sets up the Coalescence object test case."""
dst1 = os.path.join("output", "sampledb0.db")
for i in range(0, 11):
dst = os.path.join("output", "sampledb{}.db".format(i))
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(os.path.join("sample", "sample.db"), dst)
shutil.copyfile(os.path.join("sample", "nse_reference.db"), os.path.join("output", "nse_reference1.db"))
random.seed(2)
cls.test = CoalescenceTree(dst1, logging_level=50)
cls.test.clear_calculations()
cls.test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
cls.test.calculate_fragment_richness()
cls.test.calculate_fragment_octaves()
cls.test.calculate_octaves_error()
cls.test.calculate_alpha_diversity()
cls.test.calculate_beta_diversity()
cls.test2 = CoalescenceTree()
cls.test2.set_database(os.path.join("sample", "sample_nofrag.db"))
dstx = os.path.join("output", "sampledbx.db")
shutil.copyfile(dst1, dstx)
c = CoalescenceTree(dstx)
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_goodness_of_fit()
@classmethod
def tearDownClass(cls):
"""
Removes the files from output."
"""
cls.test.clear_calculations()
def testComparisonDataNoExistError(self):
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.import_comparison_data(os.path.join("sample", "doesnotexist.db"))
def testFragmentOctaves(self):
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'P09' AND octave == 0"
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 7, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'P09' AND octave == 0 "
" AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 7, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'cerrogalera' AND octave == 1 "
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 3, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'whole' AND octave == 1 "
" AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 221, msg="Fragment octaves not correctly calculated.")
def testFragmentAbundances(self):
"""
Tests that fragment abundances are produced properly by the fragment detection functions.
"""
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'P09' " " AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'P09' " " AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'cerrogalera' "
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
def testSpeciesAbundances(self):
"""Tests that the produced species abundances are correct by comparing species richness."""
num = self.test.cursor.execute(
"SELECT COUNT(species_id) FROM SPECIES_ABUNDANCES WHERE community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 1029, msg="Species abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(species_id) FROM SPECIES_ABUNDANCES WHERE community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 884, msg="Species abundances not correctly calculated.")
def testGetOctaves(self):
"""Tests getting the octaves."""
c = CoalescenceTree(os.path.join("output", "sampledb4.db"))
c.clear_calculations()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_richness()
self.assertEqual([[0, 585], [1, 231], [2, 59], [3, 5]], c.get_octaves(1))
c = CoalescenceTree(os.path.join("output", "sampledb4.db"))
c.clear_calculations()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_richness()
actual = c.get_octaves_pd().head()
expected = pd.DataFrame(
[[1, 0, 585], [1, 1, 231], [1, 2, 59], [1, 3, 5], [2, 0, 760]],
columns=["community_reference", "octave", "richness"],
)
assert_frame_equal(actual, expected, check_like=True)
def testSpeciesLocations(self):
"""
Tests that species locations have been correctly assigned.
"""
num = self.test.cursor.execute(
"SELECT species_id FROM SPECIES_LOCATIONS WHERE x==1662 AND y==4359 " " AND community_reference == 1"
).fetchall()
self.assertEqual(len(set(num)), 2, msg="Species locations not correctly assigned")
all_list = self.test.get_species_locations()
select_list = self.test.get_species_locations(community_reference=1)
self.assertListEqual([1, 1662, 4359, 1], all_list[0])
self.assertListEqual([1, 1662, 4359], select_list[0])
def testAlphaDiversity(self):
"""
Tests that alpha diversity is correctly calculated and fetched for each parameter reference
"""
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.get_alpha_diversity_pd()
self.assertEqual(9, self.test.get_alpha_diversity(1))
self.assertEqual(10, self.test.get_alpha_diversity(2))
expected_alphas_list = []
for reference in self.test.get_community_references():
expected_alphas_list.append(
{"community_reference": reference, "alpha_diversity": self.test.get_alpha_diversity(reference)}
)
expected_alphas = pd.DataFrame(expected_alphas_list).reset_index(drop=True)
actual_alphas = self.test.get_alpha_diversity_pd().reset_index(drop=True)
assert_frame_equal(expected_alphas, actual_alphas, check_like=True)
def testBetaDiversity(self):
"""
Tests that beta diversity is correctly calculated and fetched for the reference
"""
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.get_beta_diversity_pd()
self.assertAlmostEqual(98.111111111, self.test.get_beta_diversity(1), places=5)
self.assertAlmostEqual(102.8, self.test.get_beta_diversity(2), places=5)
expected_betas_list = []
for reference in self.test.get_community_references():
expected_betas_list.append(
{"community_reference": reference, "beta_diversity": self.test.get_beta_diversity(reference)}
)
expected_betas = pd.DataFrame(expected_betas_list).reset_index(drop=True)
actual_betas = self.test.get_beta_diversity_pd().reset_index(drop=True)
assert_frame_equal(expected_betas, actual_betas, check_like=True)
def testGetNumberIndividuals(self):
"""Tests that the number of individuals is obtained correctly."""
c = CoalescenceTree(os.path.join("output", "sampledb7.db"))
self.assertEqual(1504, c.get_number_individuals(community_reference=1))
self.assertEqual(12, c.get_number_individuals(fragment="P09", community_reference=1))
c.wipe_data()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
with self.assertRaises(IOError):
c.get_number_individuals(fragment="none")
with self.assertRaises(IOError):
c.get_number_individuals()
def testGetFragmentAbundances(self):
"""Tests that fragment abundances are correctly obtained."""
c = CoalescenceTree(os.path.join("sample", "sample3.db"))
with self.assertRaises(IOError):
c.get_fragment_abundances(fragment="P09", reference=1)
with self.assertRaises(IOError):
c.get_fragment_abundances_pd()
abundances = self.test.get_fragment_abundances(fragment="P09", reference=1)
expected_abundances = [[302, 1], [303, 1], [304, 1], [305, 1], [306, 1], [307, 1], [546, 2], [693, 1], [732, 3]]
self.assertEqual(expected_abundances, abundances[:10])
all_abundances = self.test.get_all_fragment_abundances()
expected_abundances2 = [
[1, "P09", 302, 1],
[1, "P09", 303, 1],
[1, "P09", 304, 1],
[1, "P09", 305, 1],
[1, "P09", 306, 1],
[1, "P09", 307, 1],
[1, "P09", 546, 2],
[1, "P09", 693, 1],
[1, "P09", 732, 3],
[1, "cerrogalera", 416, 1],
]
self.assertEqual(expected_abundances2, all_abundances[:10])
df = pd.DataFrame(
expected_abundances2, columns=["community_reference", "fragment", "species_id", "no_individuals"]
)
actual_df = self.test.get_fragment_abundances_pd().head(n=10)
assert_frame_equal(df, actual_df, check_like=True)
def testGetFragmentListErrors(self):
"""Tests the error is raised when obtaining fragment list."""
c = CoalescenceTree(os.path.join("output", "sampledb8.db"))
c.wipe_data()
with self.assertRaises(IOError):
c.get_fragment_list()
def testClearGoodnessFit(self):
"""Tests that goodness of fit are correctly cleared."""
c = CoalescenceTree(os.path.join("output", "sampledbx.db"))
exec_command = "SELECT * FROM BIODIVERSITY_METRICS WHERE metric LIKE 'goodness_%'"
self.assertTrue(len(c.cursor.execute(exec_command).fetchall()) >= 1)
c._clear_goodness_of_fit()
self.assertFalse(len(c.cursor.execute(exec_command).fetchall()) >= 1)
def testGetBiodiversityMetrics(self):
"""Tests that biodiversity metrics are correctly obtained from the database."""
c1 = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c1.get_biodiversity_metrics()
c2 = CoalescenceTree(os.path.join("sample", "sample2.db"))
expected_biodiversity_metrics = pd.DataFrame(
[
[1, "fragment_richness", "fragment2", 129.0, np.NaN, np.NaN],
[2, "fragment_richness", "fragment2", 130.0, np.NAN, np.NaN],
[1, "fragment_richness", "fragment1", 174.0, np.NaN, np.NaN],
[2, "fragment_richness", "fragment1", 175.0, np.NaN, np.NaN],
[1, "fragment_richness", "whole", 1163.0, np.NaN, np.NaN],
[2, "fragment_richness", "whole", 1170.0, np.NaN, np.NaN],
],
columns=["community_reference", "metric", "fragment", "value", "simulated", "actual"],
).reset_index(drop=True)
actual_biodiversity_metrics = c2.get_biodiversity_metrics().reset_index(drop=True).fillna(value=np.nan)
assert_frame_equal(expected_biodiversity_metrics, actual_biodiversity_metrics)
def testRaisesErrorNoFragmentsAlpha(self):
"""
Tests that an error is raised when alpha diversity is calculated without any fragment abundance data
"""
with self.assertRaises(IOError):
self.test2.calculate_alpha_diversity()
def testRaisesErrorNoFragmentsBeta(self):
"""
Tests that an error is raised when alpha diversity is calculated without any fragment abundance data
"""
with self.assertRaises(IOError):
self.test2.calculate_beta_diversity()
def testRaisesErrorNoFragmentsRichness(self):
"""
Tests that an error is raised when fragment richness is calculated without any fragment abundance data
"""
with self.assertRaises(IOError):
self.test2.calculate_fragment_richness()
def testRaisesErrorNoFragmentsOctaves(self):
"""
Tests that an error is raised when fragment richness is calculated without any fragment abundance data
"""
with self.assertRaises(IOError):
self.test2.calculate_fragment_octaves()
@unittest.skipIf(sys.version[0] != "3", "Skipping Python 3.x tests")
def testModelFitting2(self):
"""
Tests that the goodness-of-fit calculations are correctly performed.
"""
random.seed(2)
self.test.calculate_goodness_of_fit()
self.assertAlmostEqual(self.test.get_goodness_of_fit(), 0.30140801329929373, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_octaves(), 0.0680205429120108, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_richness(), 0.9244977999898334, places=6)
@unittest.skipIf(sys.version[0] == "3", "Skipping Python 2.x tests")
def testModelFitting3(self):
"""
Tests that the goodness-of-fit calculations are correctly performed.
"""
random.seed(2)
self.test.calculate_goodness_of_fit()
self.assertAlmostEqual(self.test.get_goodness_of_fit(), 0.30140801329929373, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_octaves(), 0.0680205429120108, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_richness(), 0.9244977999898334, places=6)
def testErrorIfNotApplied(self):
"""Tests that an error is raised if outputting is attempted without applying any community parameters."""
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(RuntimeError):
c.output()
def testFragmentNumbersMatching(self):
"""Checks behaviour when matching fragment numbers."""
test = CoalescenceTree(os.path.join("output", "sampledb1.db"), logging_level=50)
test.clear_calculations()
with self.assertRaises(RuntimeError):
test._check_fragment_numbers_match()
with self.assertRaises(ValueError):
test.calculate_fragment_abundances()
test._check_fragment_numbers_match()
test.comparison_file = os.path.join("sample", "PlotBiodiversityMetrics.db")
self.assertTrue(test._check_fragment_numbers_match())
test.fragment_abundances.pop(0)
self.assertFalse(test._check_fragment_numbers_match())
def testFragmentNumbersEqualisation(self):
"""Checks behaviour when equalising fragment numbers."""
test = CoalescenceTree(os.path.join("output", "sampledb2.db"), logging_level=50)
test.clear_calculations()
test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
test.calculate_fragment_richness()
self.test._equalise_fragment_number("notafrag", 1)
test.fragment_abundances[0][2] += 1000
test._equalise_fragment_number("P09", 1)
self.assertTrue(test._check_fragment_numbers_match())
def testFragmentNumbersErrors(self):
"""Checks behaviour when equalising fragment numbers."""
test = CoalescenceTree(os.path.join("output", "sampledb3.db"), logging_level=50)
test.clear_calculations()
test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
test.comparison_abundances = None
with self.assertRaises(ValueError):
test._equalise_all_fragment_numbers()
def testAdjustBiodiversityMetrics(self):
"""Checks that biodiversity metrics are correctly adjusted."""
test = CoalescenceTree(os.path.join("output", "sampledb5.db"), logging_level=50)
test.clear_calculations()
test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
test.adjust_data()
def testComparisonOctavesModification(self):
"""Tests that the comparison database is modified."""
test = CoalescenceTree(os.path.join("output", "sampledb6.db"), logging_level=50)
dst = os.path.join("output", "PlotBiodiversityMetricsNoAlpha2.db")
shutil.copy(os.path.join("sample", "PlotBiodiversityMetricsNoAlpha.db"), dst)
test.import_comparison_data(dst)
test.calculate_comparison_octaves(store=True)
self.assertTrue(os.path.exists(dst))
@unittest.skipIf(sys.version[0] == "2", "Skipping Python 3.x tests")
def testDownsamplingAndRevert(self):
"""Tests that downsampling works as intended and can be reverted."""
c = CoalescenceTree(os.path.join("output", "sampledb9.db"))
random.seed(a=10, version=3)
original_individuals = c.get_number_individuals()
original_richness = c.get_species_richness_pd()
c.wipe_data()
with self.assertRaises(ValueError):
c.downsample(sample_proportion=2.0)
c.downsample(sample_proportion=0.1)
c.set_speciation_parameters([0.1, 0.2])
c.apply()
new_individuals = c.get_number_individuals()
self.assertEqual(1452, new_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "sampledb9.db"))
c.revert_downsample()
c.wipe_data()
c.set_speciation_parameters([0.1, 0.2])
c.apply()
final_individuals = c.get_number_individuals()
assert_frame_equal(original_richness, c.get_species_richness_pd())
self.assertEqual(original_individuals, final_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertFalse(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
# Now test with NSE sim to ensure correct sampling
c = CoalescenceTree(os.path.join("output", "nse_reference1.db"))
nse_richness = c.get_species_richness_pd()
nse_no_individuals = c.get_number_individuals()
c.wipe_data()
c.downsample(sample_proportion=0.1)
c.set_speciation_parameters([0.000001, 0.999999])
c.apply()
new_no_individuals = c.get_number_individuals()
self.assertAlmostEqual(new_no_individuals / nse_no_individuals, 0.1, 5)
self.assertEqual(1000, c.get_species_richness(reference=2))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "nse_reference1.db"))
c.revert_downsample()
c.wipe_data()
c.set_speciation_parameters([0.000001, 0.999999])
c.apply_incremental()
c.set_speciation_parameters([0.5])
c.apply()
actual_richness = c.get_species_richness_pd()
assert_frame_equal(nse_richness, actual_richness)
self.assertEqual(nse_no_individuals, c.get_number_individuals())
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertFalse(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
with self.assertRaises(IOError):
c.revert_downsample()
@unittest.skipIf(sys.version[0] == "2", "Skipping Python 3.x tests")
def testDownsamplingByLocationAndRevert(self):
"""Tests that downsampling works as intended and can be reverted."""
c = CoalescenceTree(os.path.join("output", "sampledb10.db"))
random.seed(a=10, version=3)
original_individuals = c.get_number_individuals()
original_richness = c.get_species_richness_pd()
c.wipe_data()
with self.assertRaises(ValueError):
c.downsample_at_locations(fragment_csv=os.path.join("sample", "FragmentsTestFail1.csv"))
with self.assertRaises(IOError):
c.downsample_at_locations(fragment_csv="not_a_file.csv")
c.downsample_at_locations(fragment_csv=os.path.join("sample", "FragmentsTest3.csv"))
c.set_speciation_parameters([0.1, 0.2])
c.apply()
new_individuals = c.get_number_individuals()
self.assertEqual(2, new_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "sampledb10.db"))
c.revert_downsample()
c.wipe_data()
c.set_speciation_parameters([0.1, 0.2])
c.apply()
final_individuals = c.get_number_individuals()
assert_frame_equal(original_richness, c.get_species_richness_pd())
self.assertEqual(original_individuals, final_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertFalse(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "sampledb10.db"))
c.wipe_data()
c.downsample_at_locations(fragment_csv=os.path.join("sample", "FragmentsTest4.csv"), ignore_errors=True)
c.set_speciation_parameters([0.1, 0.2])
c.apply()
new_individuals = c.get_number_individuals()
self.assertEqual(3, new_individuals)
class TestCoalescenceTreeWriteCsvs(unittest.TestCase):
"""Tests that csvs are correctly outputted."""
@classmethod
def setUpClass(cls):
"""Creates the CoalescenceTree object."""
cls.c = CoalescenceTree(os.path.join("sample", "nse_reference.db"))
def testWriteCommunityParameterToCsv(self):
"""Tests that community parameters are correctly written to a csv."""
output_csv = os.path.join("output", "community_parameters1.csv")
self.c.write_to_csv(output_csv, "COMMUNITY_PARAMETERS")
self.assertTrue(os.path.exists(output_csv))
import csv
if sys.version_info[0] < 3: # pragma: no cover
infile = open(output_csv, "rb")
else:
infile = open(output_csv, "r")
expected_output = [
["reference", "speciation_rate", "time", "fragments", "metacommunity_reference"],
["1", "1e-06", "0.0", "0", "0"],
["2", "0.99999", "0.0", "0", "0"],
["3", "0.5", "0.0", "0", "0"],
]
actual_output = []
with infile as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
actual_output.append(row)
self.assertEqual(expected_output, actual_output)
with self.assertRaises(IOError):
self.c.write_to_csv(output_csv, "COMMUNITY_PARAMETERS")
with self.assertRaises(KeyError):
self.c.write_to_csv("notacsv.csv", "NOTATABLE")
def testWritesAllCsvs(self):
"""Tests that all csvs write to the output correctly."""
output_dir = os.path.join("output", "csvdir")
if os.path.exists(output_dir):
os.remove(output_dir)
self.c.write_all_to_csvs(output_dir, "out1")
expected_tables = ["COMMUNITY_PARAMETERS", "SIMULATION_PARAMETERS", "SPECIES_ABUNDANCES", "SPECIES_LIST"]
for table in expected_tables:
self.assertTrue(os.path.exists(os.path.join(output_dir, "out1_{}.csv".format(table))))
for file in os.listdir(output_dir):
if ".csv" in file:
self.assertIn(file, ["out1_{}.csv".format(x) for x in expected_tables])
self.c.write_all_to_csvs(output_dir, "out2.csv")
for table in expected_tables:
self.assertTrue(os.path.exists(os.path.join(output_dir, "out2_{}.csv".format(table))))
self.c.write_all_to_csvs(output_dir, "out3.")
for table in expected_tables:
self.assertTrue(os.path.exists(os.path.join(output_dir, "out3_{}.csv".format(table))))
class TestCoalescenceTreeSpeciesDistances(unittest.TestCase):
"""Tests analysis is performed correctly."""
@classmethod
def setUpClass(cls):
"""
Sets up the Coalescence object test case.
"""
dst = os.path.join("output", "sampledb1.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(os.path.join("sample", "sample.db"), dst)
cls.test = CoalescenceTree(dst)
cls.test.clear_calculations()
cls.test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
cls.test.calculate_species_distance_similarity()
def testSpeciesDistanceSimilarity(self):
"""
Tests that the species distance similarity function works as intended.
"""
mean = self.test.cursor.execute(
"SELECT value FROM BIODIVERSITY_METRICS WHERE community_reference == 1 AND "
"metric == 'mean_distance_between_individuals'"
).fetchone()[0]
self.assertAlmostEqual(mean, 5.423769507803121, places=5)
species_distances = self.test.get_species_distance_similarity(community_reference=1)
# for distance, similar in species_distances:
# self.assertLessEqual(similar, dissimilar)
self.assertListEqual(species_distances[0], [0, 11])
self.assertListEqual(species_distances[1], [1, 274])
self.assertListEqual(species_distances[2], [2, 289])
class TestCoalescenceTreeAnalyseIncorrectComparison(unittest.TestCase):
"""
Tests errors are raised correctly for incorrect comparison data.
"""
@classmethod
def setUpClass(cls):
"""
Sets up the Coalescence object test case.
"""
random.seed(10)
dst = os.path.join("output", "sampledb2.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(os.path.join("sample", "sample.db"), dst)
cls.test = CoalescenceTree(logging_level=40)
cls.test.set_database(dst)
cls.test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetricsNoAlpha.db"))
cls.test.calculate_comparison_octaves(False)
cls.test.clear_calculations()
cls.test.calculate_fragment_richness()
cls.test.calculate_fragment_octaves()
cls.test.calculate_octaves_error()
cls.test.calculate_alpha_diversity()
cls.test.calculate_alpha_diversity()
cls.test.calculate_beta_diversity()
cls.test2 = CoalescenceTree()
cls.test2.set_database(os.path.join("sample", "sample_nofrag.db"))
@classmethod
def tearDownClass(cls):
"""
Removes the files from output."
"""
cls.test.clear_calculations()
def testRaisesErrorMismatchParameters(self):
"""
Tests that an error is raised when there is a parameter mismatch
"""
with self.assertRaises(ValueError):
self.test.calculate_goodness_of_fit()
class TestSimulationAnalysisTemporal(unittest.TestCase):
"""Tests that applying multiple times works as expected."""
@classmethod
def setUpClass(cls):
"""Generates the analysis object."""
src = os.path.join("sample", "sample2.db")
dst = os.path.join("output", "sample2.db")
if not os.path.exists(dst):
shutil.copy(src, dst)
cls.tree = CoalescenceTree()
cls.tree.set_database(dst)
cls.tree.wipe_data()
def testTimesWrongFormatError(self):
"""Tests that an error is raised when the times are in the wrong format."""
with self.assertRaises(TypeError):
self.tree.set_speciation_parameters([0.4, 0.6], times=[0.1, 0.2, "notafloat"])
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
self.tree.set_speciation_parameters([0.4, 0.6], times="notafloat")
self.tree.times = []
self.tree.set_speciation_parameters([0.4, 0.6], times=[0, 1, 10])
self.assertEqual([0.0, 1.0, 10.0], self.tree.times)
class TestSimulationAnalysis(unittest.TestCase):
"""
Tests that the simulation can perform all required analyses, and that the correct errors are thrown if the object
does not exist.
"""
@classmethod
def setUpClass(cls):
"""Copies the sample databases and applies a basic set of community parameters."""
src = os.path.join("sample", "sample2.db")
dst = os.path.join("output", "sample2.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copy(src, dst)
cls.tree = CoalescenceTree(logging_level=50)
cls.tree.set_database(dst)
cls.tree.wipe_data()
cls.tree.set_speciation_parameters(
speciation_rates=[0.5, 0.7],
record_spatial="T",
record_fragments=os.path.join("sample", "FragmentsTest.csv"),
sample_file=os.path.join("sample", "SA_samplemaskINT.tif"),
)
cls.tree.apply()
cls.tree.calculate_fragment_richness()
cls.tree.calculate_fragment_octaves()
np.random.seed(100)
def testSetDatabaseErrors(self):
"""Tests that the set database errors are correctly raised."""
sim = Simulation()
c = CoalescenceTree()
with self.assertRaises(RuntimeError):
c.set_database(sim)
c = CoalescenceTree()
with self.assertRaises(IOError):
c.set_database(os.path.join("sample", "failsampledoesntexist.db"))
def testFragmentConfigNoExistError(self):
"""Tests that an error is raised if the fragment config file does not exist."""
tree = CoalescenceTree(self.tree.file)
with self.assertRaises(IOError):
tree.set_speciation_parameters(
speciation_rates=[0.5, 0.7],
record_spatial="T",
record_fragments=os.path.join("sample", "notafragmentconfig.csv"),
sample_file=os.path.join("sample", "SA_samplemaskINT.tif"),
)
with self.assertRaises(IOError):
tree.set_speciation_parameters(
speciation_rates=[0.5, 0.7],
record_spatial="T",
record_fragments=os.path.join("sample", "example_historical_fine.tif"),
sample_file=os.path.join("sample", "SA_samplemaskINT.tif"),
)
def testReadsFragmentsRichness(self):
"""
Tests that the fragment richness can be read correctly
"""
sim_params = self.tree.get_simulation_parameters()
expected_params = dict(
seed=9,
task=1,
output_dir="output",
speciation_rate=0.5,
sigma=2.828427,
tau=2.0,
deme=1,
sample_size=0.1,
max_time=2.0,
dispersal_relative_cost=1.0,
min_num_species=1,
habitat_change_rate=0.0,
gen_since_historical=200.0,
time_config_file="null",
coarse_map_file="sample/SA_sample_coarse.tif",
coarse_map_x=35,
coarse_map_y=41,
coarse_map_x_offset=11,
coarse_map_y_offset=14,
coarse_map_scale=1.0,
fine_map_file="sample/SA_sample_fine.tif",
fine_map_x=13,
fine_map_y=13,
fine_map_x_offset=0,
fine_map_y_offset=0,
sample_file="sample/SA_samplemaskINT.tif",
grid_x=13,
grid_y=13,
sample_x=13,
sample_y=13,
sample_x_offset=0,
sample_y_offset=0,
historical_coarse_map="none",
historical_fine_map="none",
sim_complete=1,
dispersal_method="normal",
m_probability=0.0,
cutoff=0.0,
landscape_type="closed",
protracted=0,
min_speciation_gen=0.0,
max_speciation_gen=0.0,
dispersal_map="none",
)
for key in sim_params.keys():
self.assertEqual(
sim_params[key],
expected_params[key],
msg="Error in {}: {} != {}".format(key, sim_params[key], expected_params[key]),
)
fragment2_richness = ["fragment2", 1, 129]
self.assertEqual(self.tree.get_fragment_richness(fragment="fragment2", reference=1), 129)
self.assertEqual(self.tree.get_fragment_richness(fragment="fragment1", reference=2), 175)
octaves = self.tree.get_fragment_richness()
self.assertListEqual(fragment2_richness, [list(x) for x in octaves if x[0] == "fragment2" and x[1] == 1][0])
expected_fragment_richness = []
for reference in self.tree.get_community_references():
for fragment in self.tree.get_fragment_list(reference):
fragment_richness = self.tree.get_fragment_richness(fragment=fragment, reference=reference)
expected_fragment_richness.append(
{"fragment": fragment, "community_reference": reference, "fragment_richness": fragment_richness}
)
expected_fragment_richness_df = (
pd.DataFrame(expected_fragment_richness)
.sort_values(by=["fragment", "community_reference"])
.reset_index(drop=True)
)
actual_fragment_richness = self.tree.get_fragment_richness_pd().reset_index(drop=True)
assert_frame_equal(expected_fragment_richness_df, actual_fragment_richness, check_like=True)
def testGetsFragmentList(self):
"""
Tests that fetching the list of fragments from FRAGMENT_ABUNDANCES is as expected
"""
fragment_list = self.tree.get_fragment_list()
expected_list = ["fragment1", "fragment2"]
self.assertListEqual(expected_list, fragment_list)
def testReadsFragmentAbundances(self):
"""
Tests that the fragment abundances are correctly read
"""
expected_abundances = [
[610, 1],
[611, 1],
[612, 1],
[613, 1],
[614, 1],
[615, 1],
[616, 1],
[617, 1],
[618, 1],
[619, 1],
]
actual_abundances = self.tree.get_species_abundances(fragment="fragment2", reference=1)
for i, each in enumerate(expected_abundances):
self.assertListEqual(actual_abundances[i], each)
with self.assertRaises(ValueError):
self.tree.get_species_abundances(fragment="fragment2")
expected_fragment_abundances_list = []
for reference in self.tree.get_community_references():
for fragment in self.tree.get_fragment_list(reference):
fragment_abundances = self.tree.get_fragment_abundances(fragment=fragment, reference=reference)
for species_id, abundance in fragment_abundances:
expected_fragment_abundances_list.append(
{
"fragment": fragment,
"community_reference": reference,
"species_id": species_id,
"no_individuals": abundance,
}
)
expected_fragment_abundances = (
pd.DataFrame(expected_fragment_abundances_list)
.sort_values(by=["fragment", "community_reference", "species_id"])
.reset_index(drop=True)
)
actual_fragment_abundances = (
self.tree.get_fragment_abundances_pd()
.sort_values(by=["fragment", "community_reference", "species_id"])
.reset_index(drop=True)
)
assert_frame_equal(expected_fragment_abundances, actual_fragment_abundances, check_like=True)
def testFragmentRichnessRaiseError(self):
"""
Tests that the correct errors are raised when no fragment exists with that name, or with the specified
speciation rate, or time. Also checks SyntaxErrors and sqlite3.Errors when no FRAGMENT_RICHNESS table
exists.
"""
failtree = CoalescenceTree()
failtree.set_database(os.path.join("sample", "failsample.db"))
with self.assertRaises(IOError):
failtree.get_fragment_richness()
with self.assertRaises(IOError):
failtree.get_fragment_richness_pd()
with self.assertRaises(IOError):
self.tree.get_fragment_richness(fragment="fragment4", reference=1)
with self.assertRaises(SyntaxError):
self.tree.get_fragment_richness(fragment="fragment4")
with self.assertRaises(SyntaxError):
self.tree.get_fragment_richness(reference=1)
def testReadsFragmentOctaves(self):
"""
Tests that the fragment octaves can be read correctly.
"""
octaves = self.tree.get_fragment_octaves(fragment="fragment2", reference=1)
octaves2 = self.tree.get_fragment_octaves(fragment="fragment1", reference=1)
all_octaves = self.tree.get_fragment_octaves()
desired = ["fragment1", 1, 0, 173]
self.assertListEqual([0, 128], octaves[0])
self.assertListEqual([0, 173], octaves2[0])
self.assertListEqual(desired, [x for x in all_octaves if x[0] == "fragment1" and x[1] == 1 and x[2] == 0][0])
expected_fragment_octaves_list = []
for reference in self.tree.get_community_references():
fragment_list = self.tree.get_fragment_list(reference)
fragment_list.append("whole")
for fragment in fragment_list:
try:
octaves = self.tree.get_fragment_octaves(fragment=fragment, reference=reference)
for octave, richness in octaves:
expected_fragment_octaves_list.append(
{
"fragment": fragment,
"community_reference": reference,
"octave": octave,
"richness": richness,
}
)
except RuntimeError:
continue
expected_fragment_octaves = (
pd.DataFrame(expected_fragment_octaves_list)
.sort_values(["fragment", "community_reference", "octave"], axis=0)
.reset_index(drop=True)
)
actual_fragment_octaves = (
self.tree.get_fragment_octaves_pd()
.sort_values(["fragment", "community_reference", "octave"], axis=0)
.reset_index(drop=True)
)
assert_frame_equal(expected_fragment_octaves, actual_fragment_octaves, check_like=True)
def testFragmentOctavesRaiseError(self):
"""
Tests that the correct errors are raised for different situations for reading fragment octaves
"""
failtree = CoalescenceTree()
try:
failtree.set_database("sample/failsample.db")
except sqlite3.Error:
pass
with self.assertRaises(sqlite3.Error):
failtree.get_fragment_octaves(fragment="fragment4", reference=100)
with self.assertRaises(RuntimeError):
self.tree.get_fragment_octaves(fragment="fragment4", reference=100)
with self.assertRaises(SyntaxError):
self.tree.get_fragment_octaves(fragment="fragment4")
with self.assertRaises(SyntaxError):
self.tree.get_fragment_octaves(reference=100)
def testFragmentSampling(self):
"""
Tests that sampling from fragments is accurate.
"""
self.assertEqual(
10,
self.tree.sample_fragment_richness(
fragment="fragment1", number_of_individuals=10, n=1, community_reference=2
),
)
self.assertEqual(
10,
self.tree.sample_fragment_richness(
fragment="fragment2", number_of_individuals=10, n=10, community_reference=2
),
)
def testLandscapeSampling(self):
"""Tests that the sampling from the landscape works as intended."""
number_dict = {"fragment1": 3, "fragment2": 10}
np.random.seed(100)
self.assertEqual(
13, self.tree.sample_landscape_richness(number_of_individuals=number_dict, n=1, community_reference=2)
)
self.assertAlmostEqual(
99.9, self.tree.sample_landscape_richness(number_of_individuals=100, n=10, community_reference=1), places=3
)
def testRaisesSamplingErrors(self):
"""Tests that sampling errors are correctly raised"""
number_dict = {"fragment1": 3000000, "fragment2": 10}
with self.assertRaises(KeyError):
self.assertEqual(
13, self.tree.sample_landscape_richness(number_of_individuals=number_dict, n=1, community_reference=2)
)
number_dict2 = {"fragment": 10, "fragment2": 10}
with self.assertRaises(KeyError):
self.assertEqual(
13, self.tree.sample_landscape_richness(number_of_individuals=number_dict2, n=1, community_reference=2)
)
def testSpeciesRichness(self):
"""Tests that the simulation species richness is read correctly."""
actual_species_richness = (
self.tree.get_species_richness_pd().sort_values(by=["community_reference"]).reset_index(drop=True)
)
expected_species_richness_list = []
for reference in self.tree.get_community_references():
expected_species_richness_list.append(
{"community_reference": reference, "richness": self.tree.get_species_richness(reference=reference)}
)
expected_species_richness = pd.DataFrame(expected_species_richness_list)
assert_frame_equal(actual_species_richness, expected_species_richness, check_like=True)
def testOctaves(self):
"""Tests that the simulation octave classes are correctly calculated."""
actual_species_octaves = (
self.tree.get_octaves_pd().sort_values(by=["community_reference", "octave"]).reset_index(drop=True)
)
expected_species_octaves_list = []
for reference in self.tree.get_community_references():
for octave, richness in self.tree.get_octaves(reference):
expected_species_octaves_list.append(
{"community_reference": reference, "octave": octave, "richness": richness}
)
expected_species_octaves = pd.DataFrame(expected_species_octaves_list)
assert_frame_equal(actual_species_octaves, expected_species_octaves, check_like=True)
class TestMetacommunityApplication(unittest.TestCase):
"""
Tests that a metacommunity can be applied correctly under the three different scenarios. Note that this does not
test edge cases, just that the parameters are correctly stored and the different application methods work as
intended.
"""
@classmethod
def setUpClass(cls):
"""Initialises the three database files to use."""
src = os.path.join("sample", "sample.db")
for i in range(6):
dst = os.path.join("output", "sample_{}.db".format(i))
if os.path.exists(dst):
os.remove(dst)
shutil.copy2(src, dst)
def testMetacommunityAddingInvalidParameters(self):
"""Tests that adding invalid parameter for a metacommunity raises the appropriate errors."""
tree = CoalescenceTree(os.path.join("output", "sample_0.db"))
tree.wipe_data()
with self.assertRaises(IOError):
tree.get_metacommunity_parameters_pd()
tree.set_speciation_parameters([0.1, 0.2])
for size, spec, opt, ref in [
[0, 0.1, "simulated", None],
[10, 0.0, "analytical", None],
[None, None, "analytical", None],
[10, 0.0, "path/to/file", None],
[0, 0.0, "path/to/file", None],
[0, 0.0, "path/to/not/a/file.db", 1],
]:
with self.assertRaises(ValueError):
tree.add_metacommunity_parameters(
metacommunity_size=size,
metacommunity_speciation_rate=spec,
metacommunity_option=opt,
metacommunity_reference=ref,
)
with self.assertRaises(IOError):
tree.add_metacommunity_parameters(metacommunity_option="not/a/file/db.db", metacommunity_reference=1)
def testMetacommunitySimulation(self):
"""Tests that a simulated metacommunity works as intended."""
tree = CoalescenceTree(os.path.join("output", "sample_1.db"))
tree.wipe_data()
tree.set_speciation_parameters(
[0.1, 0.2], metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option="simulated"
)
tree.add_metacommunity_parameters(
metacommunity_size=15000, metacommunity_speciation_rate=0.1, metacommunity_option="simulated"
)
tree.add_metacommunity_parameters(
metacommunity_size=100000, metacommunity_speciation_rate=0.001, metacommunity_option="simulated"
)
tree.apply()
params_1 = tree.get_metacommunity_parameters(1)
params_2 = tree.get_metacommunity_parameters(2)
params_3 = tree.get_metacommunity_parameters(3)
self.assertEqual(10000, params_1["metacommunity_size"])
self.assertEqual(0.001, params_1["speciation_rate"])
self.assertEqual("simulated", params_1["option"])
self.assertEqual(0, params_1["external_reference"])
self.assertEqual(15000, params_2["metacommunity_size"])
self.assertEqual(0.1, params_2["speciation_rate"])
self.assertEqual("simulated", params_2["option"])
self.assertEqual(0, params_2["external_reference"])
self.assertEqual(100000, params_3["metacommunity_size"])
self.assertEqual(0.001, params_3["speciation_rate"])
self.assertEqual("simulated", params_3["option"])
self.assertEqual(0, params_3["external_reference"])
self.assertEqual(51, tree.get_species_richness(1))
self.assertEqual(47, tree.get_species_richness(2))
self.assertEqual(681, tree.get_species_richness(3))
self.assertEqual(783, tree.get_species_richness(4))
self.assertEqual(247, tree.get_species_richness(5))
self.assertEqual(241, tree.get_species_richness(6))
expected_metacommunity_parameters_list = []
for reference in tree.get_community_references():
try:
params = tree.get_metacommunity_parameters(reference)
params["reference"] = reference
expected_metacommunity_parameters_list.append(params)
except KeyError:
continue
expected_metacommunity_parameters = | pd.DataFrame(expected_metacommunity_parameters_list) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from empiricaldist import Pmf
from scipy.stats import gaussian_kde
from scipy.stats import binom
from scipy.stats import gamma
from scipy.stats import poisson
def values(series):
"""Make a series of values and the number of times they appear.
Returns a DataFrame because they get rendered better in Jupyter.
series: Pandas Series
returns: Pandas DataFrame
"""
series = series.value_counts(dropna=False).sort_index()
series.index.name = 'values'
series.name = 'counts'
return pd.DataFrame(series)
def write_table(table, label, **options):
"""Write a table in LaTex format.
table: DataFrame
label: string
options: passed to DataFrame.to_latex
"""
filename = f'tables/{label}.tex'
fp = open(filename, 'w')
s = table.to_latex(**options)
fp.write(s)
fp.close()
def write_pmf(pmf, label):
"""Write a Pmf object as a table.
pmf: Pmf
label: string
"""
df = pd.DataFrame()
df['qs'] = pmf.index
df['ps'] = pmf.values
write_table(df, label, index=False)
def underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
d: dictionary
options: keyword args to add to d
"""
for key, val in options.items():
d.setdefault(key, val)
return d
def decorate(**options):
"""Decorate the current axes.
Call decorate with keyword arguments like
decorate(title='Title',
xlabel='x',
ylabel='y')
The keyword arguments can be any of the axis properties
https://matplotlib.org/api/axes_api.html
"""
ax = plt.gca()
ax.set(**options)
handles, labels = ax.get_legend_handles_labels()
if handles:
ax.legend(handles, labels)
plt.tight_layout()
def savefig(root, **options):
"""Save the current figure.
root: string filename root
options: passed to plt.savefig
"""
format = options.pop('format', None)
if format:
formats = [format]
else:
formats = ['pdf', 'png']
for format in formats:
fname = f'figs/{root}.{format}'
plt.savefig(fname, **options)
def make_die(sides):
"""Pmf that represents a die with the given number of sides.
sides: int
returns: Pmf
"""
outcomes = np.arange(1, sides+1)
die = Pmf(1/sides, outcomes)
return die
def add_dist_seq(seq):
"""Distribution of sum of quantities from PMFs.
seq: sequence of Pmf objects
returns: Pmf
"""
total = seq[0]
for other in seq[1:]:
total = total.add_dist(other)
return total
def make_mixture(pmf, pmf_seq):
"""Make a mixture of distributions.
pmf: mapping from each hypothesis to its probability
(or it can be a sequence of probabilities)
pmf_seq: sequence of Pmfs, each representing
a conditional distribution for one hypothesis
returns: Pmf representing the mixture
"""
df = | pd.DataFrame(pmf_seq) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# ----------------------------------------------------------------------------
"""
Tests for the Variable Explorer Collections Editor.
"""
# Standard library imports
import os # Example module for testing display inside CollecitonsEditor
from os import path
import copy
import datetime
from xml.dom.minidom import parseString
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
# Third party imports
import numpy
import pandas
import pytest
from flaky import flaky
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QWidget
# Local imports
from spyder.plugins.variableexplorer.widgets.collectionseditor import (
RemoteCollectionsEditorTableView, CollectionsEditorTableView,
CollectionsModel, CollectionsEditor, LARGE_NROWS, ROWS_TO_LOAD)
from spyder.plugins.variableexplorer.widgets.namespacebrowser import (
NamespacesBrowserFinder)
from spyder.plugins.variableexplorer.widgets.tests.test_dataframeeditor import \
generate_pandas_indexes
from spyder.py3compat import PY2
# =============================================================================
# Constants
# =============================================================================
# Full path to this file's parent directory for loading data
LOCATION = path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# =============================================================================
# Utility functions
# =============================================================================
def data(cm, i, j):
return cm.data(cm.index(i, j))
def data_table(cm, n_rows, n_cols):
return [[data(cm, i, j) for i in range(n_rows)] for j in range(n_cols)]
# =============================================================================
# Pytest Fixtures
# =============================================================================
@pytest.fixture
def nonsettable_objects_data():
"""Rturn Python objects with immutable attribs to test CollectionEditor."""
test_objs = [pandas.Period("2018-03"), pandas.Categorical([1, 2, 42])]
expected_objs = [pandas.Period("2018-03"), pandas.Categorical([1, 2, 42])]
keys_test = [["_typ", "day", "dayofyear", "hour"],
["_typ", "nbytes", "ndim"]]
return zip(test_objs, expected_objs, keys_test)
# =============================================================================
# Tests
# ============================================================================
def test_rename_variable(qtbot):
"""Test renaming of the correct variable."""
variables = {'a': 1,
'b': 2,
'c': 3,
'd': '4',
'e': 5}
editor = CollectionsEditorTableView(None, variables.copy())
qtbot.addWidget(editor)
editor.setCurrentIndex(editor.model.index(1, 0))
editor.rename_item(new_name='b2')
assert editor.model.rowCount() == 5
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'b2'
assert data(editor.model, 2, 0) == 'c'
assert data(editor.model, 3, 0) == 'd'
assert data(editor.model, 4, 0) == 'e'
# Reset variables and try renaming one again
new_variables = {'a': 1,
'b': 2,
'b2': 2,
'c': 3,
'd': '4',
'e': 5}
editor.set_data(new_variables.copy())
editor.adjust_columns()
editor.setCurrentIndex(editor.model.index(1, 0))
editor.rename_item(new_name='b3')
assert editor.model.rowCount() == 6
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'b2'
assert data(editor.model, 2, 0) == 'b3'
assert data(editor.model, 3, 0) == 'c'
assert data(editor.model, 4, 0) == 'd'
assert data(editor.model, 5, 0) == 'e'
def test_remove_variable(qtbot):
"""Test removing of the correct variable."""
variables = {'a': 1,
'b': 2,
'c': 3,
'd': '4',
'e': 5}
editor = CollectionsEditorTableView(None, variables.copy())
qtbot.addWidget(editor)
editor.setCurrentIndex(editor.model.index(1, 0))
editor.remove_item(force=True)
assert editor.model.rowCount() == 4
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'c'
assert data(editor.model, 2, 0) == 'd'
assert data(editor.model, 3, 0) == 'e'
# Reset variables and try removing one again
editor.set_data(variables.copy())
editor.adjust_columns()
editor.setCurrentIndex(editor.model.index(1, 0))
editor.remove_item(force=True)
assert editor.model.rowCount() == 4
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'c'
assert data(editor.model, 2, 0) == 'd'
assert data(editor.model, 3, 0) == 'e'
def test_remove_remote_variable(qtbot, monkeypatch):
"""Test the removing of the correct remote variable."""
variables = {'a': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '1'},
'b': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '2'},
'c': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '3'},
'd': {'type': 'str',
'size': 1, 'color': '#800000',
'view': '4'},
'e': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '5'}}
editor = RemoteCollectionsEditorTableView(None, variables.copy())
qtbot.addWidget(editor)
editor.setCurrentIndex(editor.model.index(1, 0))
# Monkey patch remove variables
def remove_values(ins, names):
assert names == ['b']
data = {'a': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '1'},
'c': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '3'},
'd': {'type': 'str',
'size': 1, 'color': '#800000',
'view': '4'},
'e': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '5'}}
editor.set_data(data)
monkeypatch.setattr(
'spyder.plugins.variableexplorer.widgets'
'.collectionseditor.RemoteCollectionsEditorTableView.remove_values',
remove_values)
editor.remove_item(force=True)
assert editor.model.rowCount() == 4
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'c'
assert data(editor.model, 2, 0) == 'd'
assert data(editor.model, 3, 0) == 'e'
# Reset variables and try removing one again
editor.set_data(variables.copy())
editor.adjust_columns()
editor.setCurrentIndex(editor.model.index(1, 0))
editor.remove_item(force=True)
assert editor.model.rowCount() == 4
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'c'
assert data(editor.model, 2, 0) == 'd'
assert data(editor.model, 3, 0) == 'e'
def test_filter_rows(qtbot):
"""Test rows filtering."""
df = pandas.DataFrame(['foo', 'bar'])
editor = CollectionsEditorTableView(None, {'dfa': df, 'dfb': df})
editor.finder = NamespacesBrowserFinder(editor,
editor.set_regex)
qtbot.addWidget(editor)
# Initially two rows
assert editor.model.rowCount() == 2
# Match two rows by name
editor.finder.setText("df")
assert editor.model.rowCount() == 2
# Match two rows by type
editor.finder.setText("DataFrame")
assert editor.model.rowCount() == 2
# Only one match
editor.finder.setText("dfb")
assert editor.model.rowCount() == 1
# No match
editor.finder.setText("dfbc")
assert editor.model.rowCount() == 0
def test_create_dataframeeditor_with_correct_format(qtbot, monkeypatch):
MockDataFrameEditor = Mock()
mockDataFrameEditor_instance = MockDataFrameEditor()
monkeypatch.setattr('spyder.plugins.variableexplorer.widgets.collectionsdelegate.DataFrameEditor',
MockDataFrameEditor)
df = pandas.DataFrame(['foo', 'bar'])
editor = CollectionsEditorTableView(None, {'df': df})
qtbot.addWidget(editor)
editor.set_dataframe_format('%10d')
editor.delegate.createEditor(None, None, editor.model.index(0, 3))
mockDataFrameEditor_instance.dataModel.set_format.assert_called_once_with('%10d')
def test_accept_sig_option_changed_from_dataframeeditor(qtbot, monkeypatch):
df = | pandas.DataFrame(['foo', 'bar']) | pandas.DataFrame |
#!/home/caoy7/anaconda2/envs/py37/bin/python3
#--coding:utf-8--
"""
tracPre.py
Pre-processing code for Hi-Trac data, implemented with cLoops2, from fastq to bedpe files and qc report.
2020-02-27: finished and well tested.
2020-06-30: add linker filter, new stat, and changing mapping to end-to-end
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
#systematic library
import os
import time
import gzip
import argparse
import subprocess
from glob import glob
from datetime import datetime
from argparse import RawTextHelpFormatter
#3rd library
import pandas as pd
from joblib import Parallel, delayed
from Bio.Seq import Seq
from Bio.SeqIO.QualityIO import FastqGeneralIterator
#cLoops2
from cLoops2.utils import getLogger, callSys, isTool
#global settings
#logger
date = time.strftime(' %Y-%m-%d', time.localtime(time.time()))
logger = getLogger(fn=os.getcwd() + "/" + date.strip() + "_" +
os.path.basename(__file__) + ".log")
def help():
"""
Create the command line interface for the script.
"""
description = """
Preprocess the raw reads of FASTQ files of Trac-looping to reference
geneome with bowtie2 and obtain the unqiue PETs with quality control
results.
Fastqs files should be named with suffix pattern as
_R1.fastq.gz, _R2.fastq.gz.
Example:
tracPre.py -fqd ../1.fq -o ./ -ref ../bowtie2/hg38 -n 10 -p 5 -mapq 10
"""
parser = argparse.ArgumentParser(description=description,
formatter_class=RawTextHelpFormatter)
parser.add_argument(
"-fqd",
dest="fqd",
required=True,
type=str,
help="The directory for raw .fastq.gz files, for example ../1.fastq/ ")
parser.add_argument(
"-o",
dest="output",
required=False,
type=str,
default="./",
help=
"Output directory, default is ./, if directory not exists, create one."
)
parser.add_argument(
"-ref",
dest="ref",
required=True,
type=str,
help=
"Bowtie2 reference index prefix, such as ./ref/hg38, generated from\n"\
"bowtie2-build hg38.fa hg38."
)
parser.add_argument(
"-n",
dest="number",
required=False,
type=int,
default=1,
help="How many Bowtie2 to run at the same time, default is 1. ")
parser.add_argument(
"-p",
dest="cpu",
required=False,
type=int,
default=5,
help="How many cpus used by each Bowtie2 or following processing,\n"\
"default is 5. "
)
parser.add_argument("-mapq",
dest="mapq",
required=False,
default=10,
type=int,
help="MAPQ cutoffs for filtering PETs, default is 10.")
op = parser.parse_args()
return op
def preFqs(fastqRoot):
"""
If the fastq files are well prepared, suitable.
"""
fastqs = glob(fastqRoot + "/*.fastq.gz")
data = {}
for fq in fastqs:
s = os.path.split(fq)[1]
s = s.replace(".fastq.gz", "")
if s.endswith("_R1"):
sample = s.replace("_R1", "")
if sample not in data:
data[sample] = [0, 0]
data[sample][0] = fq
if s.endswith("_R2"):
sample = s.replace("_R2", "")
if sample not in data:
data[sample] = [0, 0]
data[sample][1] = fq
for key, fqs in data.items():
if len(fqs) != 2:
logger.error(
"for %s there is not paired fastq files, only %s found" %
(key, ",".join(fqs)))
del data[key]
return data
def findLinker(seq, linker):
"""
Match the linker in the read sequence.
"""
pos = -1
for i in range(len(seq) - 9):
seed = seq[i:i + 9]
if linker.startswith(seed):
pos = i
break
return pos
def checkStarts(seq):
"""
Check the starts
"""
flag = False
ss = ["CATG", "AATT", "NATG", "NATT"]
for s in ss:
if seq.startswith(s):
flag = True
break
return flag
def cutLinker(fq1, fq2, pre, rlen=10, linker="CTGTCTCTTATACACATCT"):
"""
Cut linkers and filter too short reads
"""
sample = pre.split("/")[-1]
nf1 = pre + "_R1.fastq.gz"
nf2 = pre + "_R2.fastq.gz"
if os.path.isfile(nf1) and os.path.isfile(nf2):
print("%s has been generated, return" % pre)
return None
fouts = {
"fo_r1": gzip.open(nf1, "wt"),
"fo_r2": gzip.open(nf2, "wt"),
}
#processing pairing fastqs
with gzip.open(fq1, "rt") as f1, gzip.open(fq2, "rt") as f2:
i = 0
j = 0
for r1, r2 in zip(FastqGeneralIterator(f1), FastqGeneralIterator(f2)):
r1, r2 = list(r1), list(r2)
i += 1
if i % 100000 == 0:
print("%s reads processed for %s" % (i, pre))
#check the starts
"""
if not (checkStarts(r1[1]) and checkStarts(r2[1])):
continue
if r1[1][0] == "N":
r1[1] = r1[1][1:]
r1[2] = r1[2][1:]
if r2[1][0] == "N":
r2[1] = r2[1][1:]
r2[2] = r2[2][1:]
"""
#check the linker
r1pos = findLinker(r1[1], linker)
r2pos = findLinker(r2[1], linker)
#trim reads
if r1pos != -1:
r1[1] = r1[1][:r1pos]
r1[2] = r1[2][:r1pos]
if r2pos != -1:
r2[1] = r2[1][:r2pos]
r2[2] = r2[2][:r2pos]
rid = "_".join(list(map(str, [i, r1pos, r2pos])))
r1[0] = rid
r2[0] = rid
if len(r1[1]) >= rlen and len(r2[1]) >= rlen:
j += 1
fouts["fo_r1"].write("@%s\n%s\n+\n%s\n" %
(r1[0], r1[1], r1[2]))
fouts["fo_r2"].write("@%s\n%s\n+\n%s\n" %
(r2[0], r2[1], r2[2]))
return sample, i, j, nf1, nf2
def tracMapping(sample, fqs, ref, outdir, cpus=25):
"""
Mapping settings for Trac-looping data.
"""
logger.info("Start mapping %s.\n" % sample)
od = os.path.join(outdir, sample)
if not os.path.exists(od):
os.makedirs(od, exist_ok=True)
sam = od + "/" + sample + ".sam"
bam = od + "/" + sample + ".bam"
if os.path.isfile(sam):
logger.error("%s:%s exists, return." % (sample, sam))
return None
if os.path.isfile(bam):
logger.error("%s:%s exists, return." % (sample, bam))
return None
doBowtie = "bowtie2 -p {cpus} -q --end-to-end --very-sensitive -x {ref} -1 {fq1} -2 {fq2} -S {sam}".format(
cpus=cpus, ref=ref, fq1=fqs[0], fq2=fqs[1], sam=sam)
logger.info(doBowtie)
stat, output = subprocess.getstatusoutput(doBowtie)
#trim with "Warning"
output = output.split("\n")
output = [t for t in output if not t.startswith("Warning")]
output = "\n".join(output)
logger.info("FLAG_A:" + sample + "\n" + output + "\nFLAG_A\n")
return sample, sam
def getUniqueBedpe(f, fout):
"""
Get unique bedpe. Read id indicate the linker location.
"""
if os.path.isfile(fout):
return
print("Getting unique PETs from %s to %s" % (f, fout))
redus = set()
with open(fout, "w") as fo:
for i, line in enumerate(open(f)):
line = line.split("\n")[0].split("\t")
if len(line) < 6:
continue
rid = list(map(int, line[6].split("_")))
#for cis short reads, requiring the linkers
if line[0] == line[3]:
dis = abs((int(line[1]) + int(line[2])) / 2 -
(int(line[4]) + int(line[5])) / 2)
if dis < 1000 and rid[1] + rid[2] == -2:
continue
#for trans reads, requiring the linkers
if line[0] != line[3]:
if rid[1] + rid[2] == -2:
continue
#remove redudant PETs
r = hash(tuple(line[:6]))
if r in redus:
continue
else:
redus.add(r)
fo.write("\t".join(line) + "\n")
def sam2bamBedpe(sample, sam, mapq=10):
"""
SAM to BAM and bedpe file
"""
n = os.path.splitext(sam)[0]
bam = n + ".bam"
bedpeAll = n + "_all.bedpe"
bedpeUni = n + "_unique.bedpe"
#sam to bam, filtering mapq
samview = "samtools view -b -F 4 -@ 2 -q {mapq} -o {bam} {sam}".format(
mapq=mapq, bam=bam, sam=sam)
#sort by read name
samsort = "samtools sort -n -@ 2 {bam} -T {pre} -o {bam}".format(
bam=bam, pre=bam.replace(".bam", ""))
rmsam = "rm %s" % (sam)
cmds = [samview, samsort, rmsam]
callSys(cmds, logger)
bam2bedpe = "bamToBed -bedpe -i {bam} > {bedpe}".format(bam=bam,
bedpe=bedpeAll)
logger.info(bam2bedpe)
stat, output = subprocess.getstatusoutput(bam2bedpe)
getUniqueBedpe(bedpeAll, bedpeUni)
cmd = "gzip %s %s" % (bedpeAll, bedpeUni)
callSys([cmd], logger)
return sample, bedpeAll + ".gz", bedpeUni + ".gz"
def sParseBowtie(lines):
"""
Parse Bowtie2 log file, to obtain mapping stastics.
"""
d, s = None, None
lines = lines.split("\n")
s = lines[0]
totalReads = int(lines[1].split(";")[0].split()[0])
d1 = lines[4].strip().split()
conUniqueMappedReads = int(d1[0])
d2 = lines[8].strip().split()
unconUniqueMappedReads = int(d2[0])
#mapRatio = float(lines[15].split("%")[0])
mapRatio = float(lines[-2].split("%")[0])
d = {
"TotalRawReads": totalReads,
#"ConcordantlyUniqueMapReads": conUniqueMappedReads,
#"DisconcordantlyUniqueMapReads": unconUniqueMappedReads,
"MappingRatio(%s)": mapRatio
#"MultipleMapReads": multipleMappedReads,
#"MultipleMapRatio": multipleMappedRatio,
}
return d, s
def parseBowtielog(logs=None):
if logs == None:
logs = glob("*.log")
data = {}
for log in logs:
lines = open(log).read().split("FLAG_A\n")
lines = [line for line in lines if "FLAG_A" in line]
for line in lines:
t = line.split("FLAG_A:")[1]
d, s = sParseBowtie(t)
data[s] = d
data = pd.DataFrame(data).T
return data
def main():
"""
Batch converting from bam to bedpe.
"""
#prepare everything
op = help()
for t in ["bowtie2", "samtools", "bamToBed"]:
if not isTool(t):
logger.error("%s not exits! Please install through conda." % t)
return
if not os.path.exists(op.fqd):
logger.error("Input %s not exists! Return." % op.fqd)
return
if len(glob(op.ref + "*.bt2")) == 0:
logger.error("Bowtie2 reference not exists for prefix of %s! Return." %
op.ref)
return
if not os.path.exists(op.output):
os.makedirs(op.output, exist_ok=True)
else:
fs = glob(os.path.join(op.output, "*"))
if len(fs) > 0:
logger.info(
"Target output directory %s is not empty, may over-write some files."
% op.output)
data = preFqs(op.fqd)
if len(data) == 0:
logger.error(
"No matched _R1.fastq.gz and _R2.fastq.gz in %s. Return." %
(op.fqd))
return
#prepare output dir
dirs = {}
for sample in data.keys():
od = os.path.join(op.output, sample)
dirs[sample] = od
if not os.path.exists(od):
os.makedirs(od, exist_ok=True)
#step 1, filter linkers
logger.info("Step1: Trim linkers and remove short sequences.")
ds = Parallel(n_jobs=op.number)(
delayed(cutLinker)(fqs[0], fqs[1], os.path.join(dirs[sample], sample))
for sample, fqs in data.items())
data = {}
for d in ds:
if d is not None:
data[d[0]] = {
"totalRaw": d[1],
"filterLinkers": d[2],
"f1": d[3],
"f2": d[4],
}
#step2, mapping
logger.info("Step2: Map processed reads to genome.")
ref = op.ref
ds = Parallel(n_jobs=op.number, backend="multiprocessing")(
delayed(tracMapping)(
sample, [vs["f1"], vs["f2"]], ref, op.output, cpus=op.cpu)
for sample, vs in data.items())
for d in ds:
if d is not None:
data[d[0]]["sam"] = d[1]
#step3, convert to bam and bedpe files
#sam to bam and bedpe
logger.info("Step3: File type conversion. ")
cpus = op.number * op.cpu
ncpus = int(min(len(data), cpus / 2))
ds = Parallel(n_jobs=ncpus, backend="multiprocessing")(
delayed(sam2bamBedpe)(sample, vs["sam"], op.mapq)
for sample, vs in data.items())
allBedpes = []
uniBedpes = []
for d in ds:
if d is not None:
data[d[0]]["allBedpe"] = d[1]
data[d[0]]["uniNonbgBedpe"] = d[2]
allBedpes.append(d[1])
uniBedpes.append(d[2])
data = | pd.DataFrame(data) | pandas.DataFrame |
import copy
import logging
import pandas as pd
import numpy as np
from collections import Counter
from sklearn import preprocessing, utils
import sklearn.model_selection as ms
from scipy.sparse import isspmatrix
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import os
import seaborn as sns
from abc import ABC, abstractmethod
# TODO: Move this to a common lib?
OUTPUT_DIRECTORY = './output'
if not os.path.exists(OUTPUT_DIRECTORY):
os.makedirs(OUTPUT_DIRECTORY)
if not os.path.exists('{}/images'.format(OUTPUT_DIRECTORY)):
os.makedirs('{}/images'.format(OUTPUT_DIRECTORY))
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def plot_pairplot(title, df, class_column_name=None):
plt = sns.pairplot(df, hue=class_column_name)
return plt
# Adapted from https://stats.stackexchange.com/questions/239973/a-general-measure-of-data-set-imbalance
def is_balanced(seq):
n = len(seq)
classes = [(clas, float(count)) for clas, count in Counter(seq).items()]
k = len(classes)
H = -sum([(count/n) * np.log((count/n)) for clas, count in classes])
return H/np.log(k) > 0.75
class DataLoader(ABC):
def __init__(self, path, verbose, seed):
self._path = path
self._verbose = verbose
self._seed = seed
self.features = None
self.classes = None
self.testing_x = None
self.testing_y = None
self.training_x = None
self.training_y = None
self.binary = False
self.balanced = False
self._data = pd.DataFrame()
def load_and_process(self, data=None, preprocess=True):
"""
Load data from the given path and perform any initial processing required. This will populate the
features and classes and should be called before any processing is done.
:return: Nothing
"""
if data is not None:
self._data = data
self.features = None
self.classes = None
self.testing_x = None
self.testing_y = None
self.training_x = None
self.training_y = None
else:
self._load_data()
self.log("Processing {} Path: {}, Dimensions: {}", self.data_name(), self._path, self._data.shape)
if self._verbose:
old_max_rows = pd.options.display.max_rows
pd.options.display.max_rows = 10
self.log("Data Sample:\n{}", self._data)
pd.options.display.max_rows = old_max_rows
if preprocess:
self.log("Will pre-process data")
self._preprocess_data()
self.get_features()
self.get_classes()
self.log("Feature dimensions: {}", self.features.shape)
self.log("Classes dimensions: {}", self.classes.shape)
self.log("Class values: {}", np.unique(self.classes))
class_dist = np.histogram(self.classes)[0]
class_dist = class_dist[np.nonzero(class_dist)]
self.log("Class distribution: {}", class_dist)
self.log("Class distribution (%): {}", (class_dist / self.classes.shape[0]) * 100)
self.log("Sparse? {}", isspmatrix(self.features))
if len(class_dist) == 2:
self.binary = True
self.balanced = is_balanced(self.classes)
self.log("Binary? {}", self.binary)
self.log("Balanced? {}", self.balanced)
def scale_standard(self):
self.features = StandardScaler().fit_transform(self.features)
if self.training_x is not None:
self.training_x = StandardScaler().fit_transform(self.training_x)
if self.testing_x is not None:
self.testing_x = StandardScaler().fit_transform(self.testing_x)
def build_train_test_split(self, test_size=0.3):
if not self.training_x and not self.training_y and not self.testing_x and not self.testing_y:
self.training_x, self.testing_x, self.training_y, self.testing_y = ms.train_test_split(
self.features, self.classes, test_size=test_size, random_state=self._seed, stratify=self.classes
)
def get_features(self, force=False):
if self.features is None or force:
self.log("Pulling features")
self.features = np.array(self._data.iloc[:, 0:-1])
return self.features
def get_classes(self, force=False):
if self.classes is None or force:
self.log("Pulling classes")
self.classes = np.array(self._data.iloc[:, -1])
return self.classes
def dump_test_train_val(self, test_size=0.2, random_state=123):
ds_train_x, ds_test_x, ds_train_y, ds_test_y = ms.train_test_split(self.features, self.classes,
test_size=test_size,
random_state=random_state,
stratify=self.classes)
pipe = Pipeline([('Scale', preprocessing.StandardScaler())])
train_x = pipe.fit_transform(ds_train_x, ds_train_y)
train_y = np.atleast_2d(ds_train_y).T
test_x = pipe.transform(ds_test_x)
test_y = np.atleast_2d(ds_test_y).T
train_x, validate_x, train_y, validate_y = ms.train_test_split(train_x, train_y,
test_size=test_size, random_state=random_state,
stratify=train_y)
test_y = pd.DataFrame(np.where(test_y == 0, -1, 1))
train_y = pd.DataFrame(np.where(train_y == 0, -1, 1))
validate_y = pd.DataFrame(np.where(validate_y == 0, -1, 1))
tst = pd.concat([pd.DataFrame(test_x), test_y], axis=1)
trg = pd.concat([pd.DataFrame(train_x), train_y], axis=1)
val = pd.concat([ | pd.DataFrame(validate_x) | pandas.DataFrame |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
from IPython.core.display import HTML
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
import plotly.offline as py
import plotly.graph_objs as go
import plotly.express as px
class SalesForecaster:
"""This class creates 'easy to handle' forecaster objects
It will gather all the required variables to make the code more readable
- sales_clusters_df (pandas dataframe): The original sales dataframe
The columns are :
- product_code : string values such as CLA0 (CLA is the client and 0 is the product number)
- date : datetime64 (ns) the date of the sale such as pd.to_datetime("2018-01-02") : YYYY-MM-DD
- quantity : int64 an integer value: the number of products for this sale
- cluster : int64 an integer value The cluster the product is part of
- test_date (string : "2019-03-01" : YYYY-MM-DD): the training data is automatically all sales prior to this date
- max_waiting_time (string such as '7 days') : The maximum time a client is willing to wait :
required for grouping orders into batches)
- calendar_length (string such as '7 days'): The calendar length you want to zoom in
"""
def __init__(self,
sales_clusters_df,
test_date,
max_waiting_time,
detailed_view=False,
calendar_length='7 days'
):
self.sales_clusters_df = sales_clusters_df
self.test_date = test_date
self.max_waiting_time = max_waiting_time
self.detailed_view = detailed_view
self.calendar_length = calendar_length
self.optimal_batches = []
self.predicted_batches = []
self.predictions = []
def get_predicted_batches(self):
"""This function takes the original sales df,
computes the dates and quantities models at a product level using the test_date to split the dataset
into a training dataset and a testing dataset,
generates the predicted sales,
computes the associated "predicted" batches using the max waiting time value,
computes the optimal batches using the actual data using the max waiting time value,
outputs the optimal batches df and the predicted batches df,
and 2 graphs to visualize it:
- Input:
All the inputs are encapsulated in the SalesForecaster instance:
- sales_clusters_df
- test_date
- max_waiting_time
- calendar_length
- Output:
- Main graph with optimal batches vs predicted batches for the test data
- The same graph zoomed in the week following the test date
- 1 optimal batches df
- 1 predicted batches df
"""
clusters_list = self.sales_clusters_df['Cluster'].unique()
optimal_batches = []
predicted_batches = []
predictions = []
for cluster in clusters_list:
local_optimal_batches, local_predicted_batches, local_predictions = self.\
get_cluster_level_predicted_batches(cluster)
local_optimal_batches['Cluster'] = cluster
local_predicted_batches['Cluster'] = cluster
optimal_batches.append(local_optimal_batches)
predicted_batches.append(local_predicted_batches)
predictions.append(local_predictions)
optimal_batches = pd.concat(optimal_batches)
optimal_batches.reset_index(drop=True,
inplace=True)
optimal_batches['batch_date'] = optimal_batches.batch_date.str.split(' ').apply(lambda x: x[0])
predicted_batches = pd.concat(predicted_batches)
predicted_batches.reset_index(drop=True,
inplace=True)
predicted_batches['batch_date'] = predicted_batches.batch_date.str.split(' ').apply(lambda x: x[0])
predictions = pd.concat(predictions)
predictions.reset_index(drop=True,
inplace=True)
dark_map = px.colors.qualitative.Dark2
pastel_map = px.colors.qualitative.Pastel2
fig = go.Figure()
for (cluster, dark_color, pastel_color) in zip(clusters_list, dark_map, pastel_map):
local_optimal = optimal_batches[optimal_batches['Cluster'] == cluster]
local_predicted = predicted_batches[predicted_batches['Cluster'] == cluster]
fig.add_trace(go.Bar(x=pd.to_datetime(local_optimal[local_optimal['batch_date'] > self.test_date] \
['batch_date']) - pd.Timedelta('12 hours'),
y=local_optimal[local_optimal['batch_date'] > self.test_date] \
['quantities'],
name='Cluster #{}\nOptimized batches - actual values'.format(cluster),
width=1e3 * pd.Timedelta('6 hours').total_seconds(),
marker_color=dark_color))
fig.add_trace(go.Bar(x=pd.to_datetime(local_predicted[local_predicted['batch_date'] > self.test_date] \
['batch_date']) - pd.Timedelta('12 hours'),
y=local_predicted[local_predicted['batch_date'] > self.test_date] \
['predicted_quantities'],
name='Cluster #{}\nPredicted batches'.format(cluster),
width=1e3 * pd.Timedelta('6 hours').total_seconds(),
marker_color=pastel_color))
# Edit the layout
fig.update_layout(title='Optimal batches vs predicted batches for the test period',
xaxis_title='Date',
yaxis_title='Quantities')
fig.show()
fig = go.Figure()
for (cluster, dark_color, pastel_color) in zip(clusters_list, dark_map, pastel_map):
local_optimal = optimal_batches[optimal_batches['Cluster'] == cluster]
local_predicted = predicted_batches[predicted_batches['Cluster'] == cluster]
fig.add_trace(go.Bar(x=pd.to_datetime(local_optimal[(local_optimal['batch_date'] > self.test_date) & \
(local_optimal['batch_date'] < str((pd.Timestamp(
self.test_date) + pd.Timedelta(self.calendar_length))))] \
['batch_date']) - pd.Timedelta('0 hours'),
y=local_optimal[(local_optimal['batch_date'] > self.test_date) & \
(local_optimal['batch_date'] < str(
(pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))] \
['quantities'],
name='Cluster #{}\nOptimized batches - actual values'.format(cluster),
width=1e3 * pd.Timedelta('6 hours').total_seconds(),
marker_color=dark_color,
marker_line_color='black',
marker_line_width=1.5,
opacity=0.6))
fig.add_trace(go.Bar(x=pd.to_datetime(local_predicted[(local_predicted['batch_date'] > self.test_date) & \
(local_predicted['batch_date'] < str((pd.Timestamp(
self.test_date) + pd.Timedelta(self.calendar_length))))] \
['batch_date']) - pd.Timedelta('0 hours'),
y=local_predicted[(local_predicted['batch_date'] > self.test_date) & \
(local_predicted['batch_date'] < str(
(pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))] \
['predicted_quantities'],
name='Cluster #{}\nPredicted batches'.format(cluster),
width=1e3 * pd.Timedelta('6 hours').total_seconds(),
marker_color=pastel_color,
marker_line_color='black',
marker_line_width=1.5,
opacity=0.6))
# Edit the layout
fig.update_layout(barmode='stack', xaxis_tickangle=-45,
title='Optimal batches vs predicted batches for the following week',
xaxis_title='Date',
yaxis_title='Quantities')
fig.show()
fig = go.Figure()
for (cluster, dark_color, pastel_color) in zip(clusters_list, dark_map, pastel_map):
local_optimal = optimal_batches[optimal_batches['Cluster'] == cluster]
local_predicted = predicted_batches[predicted_batches['Cluster'] == cluster]
local_predictions = predictions[predictions['Cluster'] == cluster]
if local_predictions[(local_predictions.ds > self.test_date) & (
local_predictions.ds <= str((pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))].shape[
0] > 0:
display(HTML(local_predictions[(local_predictions.ds > self.test_date) & (
local_predictions.ds <= str((pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))][
['ds', 'y', 'product_code', 'Cluster']].to_html()))
if local_predictions[(local_predictions.yhat_date > self.test_date) & (
local_predictions.yhat_date <= str(
(pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))].shape[
0] > 0:
display(HTML(local_predictions[(local_predictions.yhat_date > self.test_date) & (
local_predictions.yhat_date <= str(( | pd.Timestamp(self.test_date) | pandas.Timestamp |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue( | com.is_float_dtype(cat.categories) | pandas.core.common.is_float_dtype |
from __future__ import print_function
import csv
import os
import copy
import numpy as np
import os, sys
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
sys.path.append(os.environ.get("PROJECT_ROOT"))
sys.path.append(os.path.join(os.environ.get("PROJECT_ROOT"), 'test'))
import GPy_1_0_5
import scipy.io
import zipfile
import tarfile
import datetime
import json
import re
import sys
from .config import *
ipython_available=True
try:
import IPython
except ImportError:
ipython_available=False
try:
#In Python 2, cPickle is faster. It does not exist in Python 3 but the underlying code is always used
#if available
import cPickle as pickle
except ImportError:
import pickle
#A Python2/3 import handler - urllib2 changed its name in Py3 and was also reorganised
try:
from urllib2 import urlopen
from urllib2 import URLError
except ImportError:
from urllib.request import urlopen
from urllib.error import URLError
def reporthook(a,b,c):
# ',' at the end of the line is important!
#print "% 3.1f%% of %d bytes\r" % (min(100, float(a * b) / c * 100), c),
#you can also use sys.stdout.write
sys.stdout.write("\r% 3.1f%% of %d bytes" % (min(100, float(a * b) / c * 100), c))
sys.stdout.flush()
# Global variables
data_path = os.path.expandvars(config.get('datasets', 'dir'))
#data_path = os.path.join(os.path.dirname(__file__), 'datasets')
default_seed = 10000
overide_manual_authorize=False
neil_url = 'http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/dataset_mirror/'
# Read data resources from json file.
# Don't do this when ReadTheDocs is scanning as it breaks things
on_rtd = os.environ.get('READTHEDOCS', None) == 'True' #Checks if RTD is scanning
if not (on_rtd):
path = os.path.join(os.path.dirname(__file__), 'data_resources.json')
json_data=open(path).read()
data_resources = json.loads(json_data)
if not (on_rtd):
path = os.path.join(os.path.dirname(__file__), 'football_teams.json')
json_data=open(path).read()
football_dict = json.loads(json_data)
def prompt_user(prompt):
"""Ask user for agreeing to data set licenses."""
# raw_input returns the empty string for "enter"
yes = set(['yes', 'y'])
no = set(['no','n'])
try:
print(prompt)
choice = raw_input().lower()
# would like to test for exception here, but not sure if we can do that without importing IPython
except:
print('Stdin is not implemented.')
print('You need to set')
print('overide_manual_authorize=True')
print('to proceed with the download. Please set that variable and continue.')
raise
if choice in yes:
return True
elif choice in no:
return False
else:
print(("Your response was a " + choice))
print("Please respond with 'yes', 'y' or 'no', 'n'")
#return prompt_user()
def data_available(dataset_name=None):
"""Check if the data set is available on the local machine already."""
try:
from itertools import izip_longest
except ImportError:
from itertools import zip_longest as izip_longest
dr = data_resources[dataset_name]
zip_urls = (dr['files'], )
if 'save_names' in dr: zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
for file_list, save_list in izip_longest(*zip_urls, fillvalue=[]):
for f, s in izip_longest(file_list, save_list, fillvalue=None):
if s is not None: f=s # If there is a save_name given, use that one
if not os.path.exists(os.path.join(data_path, dataset_name, f)):
return False
return True
def download_url(url, store_directory, save_name=None, messages=True, suffix=''):
"""Download a file from a url and save it to disk."""
i = url.rfind('/')
file = url[i+1:]
print(file)
dir_name = os.path.join(data_path, store_directory)
if save_name is None: save_name = os.path.join(dir_name, file)
else: save_name = os.path.join(dir_name, save_name)
if suffix is None: suffix=''
print("Downloading ", url, "->", save_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
try:
response = urlopen(url+suffix)
except URLError as e:
if not hasattr(e, "code"):
raise
response = e
if response.code > 399 and response.code<500:
raise ValueError('Tried url ' + url + suffix + ' and received client error ' + str(response.code))
elif response.code > 499:
raise ValueError('Tried url ' + url + suffix + ' and received server error ' + str(response.code))
with open(save_name, 'wb') as f:
meta = response.info()
content_length_str = meta.getheaders("Content-Length")
if content_length_str:
file_size = int(content_length_str[0])
else:
file_size = None
status = ""
file_size_dl = 0
block_sz = 8192
line_length=30
while True:
buff = response.read(block_sz)
if not buff:
break
file_size_dl += len(buff)
f.write(buff)
sys.stdout.write(" "*(len(status)) + "\r")
if file_size:
status = r"[{perc: <{ll}}] {dl:7.3f}/{full:.3f}MB".format(dl=file_size_dl/(1048576.),
full=file_size/(1048576.), ll=line_length,
perc="="*int(line_length*float(file_size_dl)/file_size))
else:
status = r"[{perc: <{ll}}] {dl:7.3f}MB".format(dl=file_size_dl/(1048576.),
ll=line_length,
perc="."*int(line_length*float(file_size_dl/(10*1048576.))))
sys.stdout.write(status)
sys.stdout.flush()
sys.stdout.write(" "*(len(status)) + "\r")
print(status)
# if we wanted to get more sophisticated maybe we should check the response code here again even for successes.
#with open(save_name, 'wb') as f:
# f.write(response.read())
#urllib.urlretrieve(url+suffix, save_name, reporthook)
def authorize_download(dataset_name=None):
"""Check with the user that the are happy with terms and conditions for the data set."""
print(('Acquiring resource: ' + dataset_name))
# TODO, check resource is in dictionary!
print('')
dr = data_resources[dataset_name]
print('Details of data: ')
print((dr['details']))
print('')
if dr['citation']:
print('Please cite:')
print((dr['citation']))
print('')
if dr['size']:
print(('After downloading the data will take up ' + str(dr['size']) + ' bytes of space.'))
print('')
print(('Data will be stored in ' + os.path.join(data_path, dataset_name) + '.'))
print('')
if overide_manual_authorize:
if dr['license']:
print('You have agreed to the following license:')
print((dr['license']))
print('')
return True
else:
if dr['license']:
print('You must also agree to the following license:')
print((dr['license']))
print('')
return prompt_user('Do you wish to proceed with the download? [yes/no]')
def download_data(dataset_name=None):
"""Check with the user that the are happy with terms and conditions for the data set, then download it."""
import itertools
dr = data_resources[dataset_name]
if not authorize_download(dataset_name):
raise Exception("Permission to download data set denied.")
zip_urls = (dr['urls'], dr['files'])
if dr.has_key('save_names'): zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
if dr.has_key('suffices'): zip_urls += (dr['suffices'], )
else: zip_urls += ([],)
for url, files, save_names, suffices in itertools.izip_longest(*zip_urls, fillvalue=[]):
for f, save_name, suffix in itertools.izip_longest(files, save_names, suffices, fillvalue=None):
download_url(os.path.join(url,f), dataset_name, save_name, suffix=suffix)
return True
def data_details_return(data, data_set):
"""Update the data component of the data dictionary with details drawn from the data_resources."""
data.update(data_resources[data_set])
return data
def cmu_urls_files(subj_motions, messages = True):
'''
Find which resources are missing on the local disk for the requested CMU motion capture motions.
'''
dr = data_resources['cmu_mocap_full']
cmu_url = dr['urls'][0]
subjects_num = subj_motions[0]
motions_num = subj_motions[1]
resource = {'urls' : [], 'files' : []}
# Convert numbers to strings
subjects = []
motions = [list() for _ in range(len(subjects_num))]
for i in range(len(subjects_num)):
curSubj = str(int(subjects_num[i]))
if int(subjects_num[i]) < 10:
curSubj = '0' + curSubj
subjects.append(curSubj)
for j in range(len(motions_num[i])):
curMot = str(int(motions_num[i][j]))
if int(motions_num[i][j]) < 10:
curMot = '0' + curMot
motions[i].append(curMot)
all_skels = []
assert len(subjects) == len(motions)
all_motions = []
for i in range(len(subjects)):
skel_dir = os.path.join(data_path, 'cmu_mocap')
cur_skel_file = os.path.join(skel_dir, subjects[i] + '.asf')
url_required = False
file_download = []
if not os.path.exists(cur_skel_file):
# Current skel file doesn't exist.
if not os.path.isdir(skel_dir):
os.makedirs(skel_dir)
# Add skel file to list.
url_required = True
file_download.append(subjects[i] + '.asf')
for j in range(len(motions[i])):
file_name = subjects[i] + '_' + motions[i][j] + '.amc'
cur_motion_file = os.path.join(skel_dir, file_name)
if not os.path.exists(cur_motion_file):
url_required = True
file_download.append(subjects[i] + '_' + motions[i][j] + '.amc')
if url_required:
resource['urls'].append(cmu_url + '/' + subjects[i] + '/')
resource['files'].append(file_download)
return resource
try:
import gpxpy
import gpxpy.gpx
gpxpy_available = True
except ImportError:
gpxpy_available = False
if gpxpy_available:
def epomeo_gpx(data_set='epomeo_gpx', sample_every=4):
if not data_available(data_set):
download_data(data_set)
files = ['endomondo_1', 'endomondo_2', 'garmin_watch_via_endomondo','viewranger_phone', 'viewranger_tablet']
X = []
for file in files:
gpx_file = open(os.path.join(data_path, 'epomeo_gpx', file + '.gpx'), 'r')
gpx = gpxpy.parse(gpx_file)
segment = gpx.tracks[0].segments[0]
points = [point for track in gpx.tracks for segment in track.segments for point in segment.points]
data = [[(point.time-datetime.datetime(2013,8,21)).total_seconds(), point.latitude, point.longitude, point.elevation] for point in points]
X.append(np.asarray(data)[::sample_every, :])
gpx_file.close()
return data_details_return({'X' : X, 'info' : 'Data is an array containing time in seconds, latitude, longitude and elevation in that order.'}, data_set)
#del gpxpy_available
# Some general utilities.
def sample_class(f):
p = 1. / (1. + np.exp(-f))
c = np.random.binomial(1, p)
c = np.where(c, 1, -1)
return c
def boston_housing(data_set='boston_housing'):
if not data_available(data_set):
download_data(data_set)
all_data = np.genfromtxt(os.path.join(data_path, data_set, 'housing.data'))
X = all_data[:, 0:13]
Y = all_data[:, 13:14]
return data_details_return({'X' : X, 'Y': Y}, data_set)
def brendan_faces(data_set='brendan_faces'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'frey_rawface.mat'))
Y = mat_data['ff'].T
return data_details_return({'Y': Y}, data_set)
def della_gatta_TRP63_gene_expression(data_set='della_gatta', gene_number=None):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'DellaGattadata.mat'))
X = np.double(mat_data['timepoints'])
if gene_number == None:
Y = mat_data['exprs_tp53_RMA']
else:
Y = mat_data['exprs_tp53_RMA'][:, gene_number]
if len(Y.shape) == 1:
Y = Y[:, None]
return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set)
def football_data(season='1314', data_set='football_data'):
"""Football data from English games since 1993. This downloads data from football-data.co.uk for the given season. """
def league2num(string):
league_dict = {'E0':0, 'E1':1, 'E2': 2, 'E3': 3, 'EC':4}
return league_dict[string]
def football2num(string):
if football_dict.has_key(string):
return football_dict[string]
else:
football_dict[string] = len(football_dict)+1
return len(football_dict)+1
data_set_season = data_set + '_' + season
data_resources[data_set_season] = copy.deepcopy(data_resources[data_set])
data_resources[data_set_season]['urls'][0]+=season + '/'
start_year = int(season[0:2])
end_year = int(season[2:4])
files = ['E0.csv', 'E1.csv', 'E2.csv', 'E3.csv']
if start_year>4 and start_year < 93:
files += ['EC.csv']
data_resources[data_set_season]['files'] = [files]
if not data_available(data_set_season):
download_data(data_set_season)
from matplotlib import pyplot as pb
for file in reversed(files):
filename = os.path.join(data_path, data_set_season, file)
# rewrite files removing blank rows.
writename = os.path.join(data_path, data_set_season, 'temp.csv')
input = open(filename, 'rb')
output = open(writename, 'wb')
writer = csv.writer(output)
for row in csv.reader(input):
if any(field.strip() for field in row):
writer.writerow(row)
input.close()
output.close()
table = np.loadtxt(writename,skiprows=1, usecols=(0, 1, 2, 3, 4, 5), converters = {0: league2num, 1: pb.datestr2num, 2:football2num, 3:football2num}, delimiter=',')
X = table[:, :4]
Y = table[:, 4:]
return data_details_return({'X': X, 'Y': Y}, data_set)
def sod1_mouse(data_set='sod1_mouse'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'sod1_C57_129_exprs.csv')
Y = read_csv(filename, header=0, index_col=0)
num_repeats=4
num_time=4
num_cond=4
X = 1
return data_details_return({'X': X, 'Y': Y}, data_set)
def spellman_yeast(data_set='spellman_yeast'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'combined.txt')
Y = read_csv(filename, header=0, index_col=0, sep='\t')
return data_details_return({'Y': Y}, data_set)
def spellman_yeast_cdc15(data_set='spellman_yeast'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'combined.txt')
Y = read_csv(filename, header=0, index_col=0, sep='\t')
t = np.asarray([10, 30, 50, 70, 80, 90, 100, 110, 120, 130, 140, 150, 170, 180, 190, 200, 210, 220, 230, 240, 250, 270, 290])
times = ['cdc15_'+str(time) for time in t]
Y = Y[times].T
t = t[:, None]
return data_details_return({'Y' : Y, 't': t, 'info': 'Time series of synchronized yeast cells from the CDC-15 experiment of Spellman et al (1998).'}, data_set)
def lee_yeast_ChIP(data_set='lee_yeast_ChIP'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
import zipfile
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'binding_by_gene.tsv')
S = read_csv(filename, header=1, index_col=0, sep='\t')
transcription_factors = [col for col in S.columns if col[:7] != 'Unnamed']
annotations = S[['Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3']]
S = S[transcription_factors]
return data_details_return({'annotations' : annotations, 'Y' : S, 'transcription_factors': transcription_factors}, data_set)
def fruitfly_tomancak(data_set='fruitfly_tomancak', gene_number=None):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'tomancak_exprs.csv')
Y = read_csv(filename, header=0, index_col=0).T
num_repeats = 3
num_time = 12
xt = np.linspace(0, num_time-1, num_time)
xr = np.linspace(0, num_repeats-1, num_repeats)
xtime, xrepeat = np.meshgrid(xt, xr)
X = np.vstack((xtime.flatten(), xrepeat.flatten())).T
return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set)
def drosophila_protein(data_set='drosophila_protein'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'becker_et_al.csv')
Y = read_csv(filename, header=0)
return data_details_return({'Y': Y}, data_set)
def drosophila_knirps(data_set='drosophila_protein'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'becker_et_al.csv')
# in the csv file we have facts_kni and ext_kni. We treat facts_kni as protein and ext_kni as mRNA
df = read_csv(filename, header=0)
t = df['t'][:,None]
x = df['x'][:,None]
g = df['expression1'][:,None]
p = df['expression2'][:,None]
leng = x.shape[0]
T = np.vstack([t,t])
S = np.vstack([x,x])
inx = np.zeros(leng*2)[:,None]
inx[leng*2/2:leng*2]=1
X = np.hstack([T,S,inx])
Y = np.vstack([g,p])
return data_details_return({'Y': Y, 'X': X}, data_set)
# This will be for downloading google trends data.
def google_trends(query_terms=['big data', 'machine learning', 'data science'], data_set='google_trends', refresh_data=False):
"""Data downloaded from Google trends for given query terms. Warning, if you use this function multiple times in a row you get blocked due to terms of service violations. The function will cache the result of your query, if you wish to refresh an old query set refresh_data to True. The function is inspired by this notebook: http://nbviewer.ipython.org/github/sahuguet/notebooks/blob/master/GoogleTrends%20meet%20Notebook.ipynb"""
query_terms.sort()
import pandas
# Create directory name for data
dir_path = os.path.join(data_path,'google_trends')
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
dir_name = '-'.join(query_terms)
dir_name = dir_name.replace(' ', '_')
dir_path = os.path.join(dir_path,dir_name)
file = 'data.csv'
file_name = os.path.join(dir_path,file)
if not os.path.exists(file_name) or refresh_data:
print("Accessing Google trends to acquire the data. Note that repeated accesses will result in a block due to a google terms of service violation. Failure at this point may be due to such blocks.")
# quote the query terms.
quoted_terms = []
for term in query_terms:
quoted_terms.append(urllib2.quote(term))
print("Query terms: ", ', '.join(query_terms))
print("Fetching query:")
query = 'http://www.google.com/trends/fetchComponent?q=%s&cid=TIMESERIES_GRAPH_0&export=3' % ",".join(quoted_terms)
data = urlopen(query).read()
print("Done.")
# In the notebook they did some data cleaning: remove Javascript header+footer, and translate new Date(....,..,..) into YYYY-MM-DD.
header = """// Data table response\ngoogle.visualization.Query.setResponse("""
data = data[len(header):-2]
data = re.sub('new Date\((\d+),(\d+),(\d+)\)', (lambda m: '"%s-%02d-%02d"' % (m.group(1).strip(), 1+int(m.group(2)), int(m.group(3)))), data)
timeseries = json.loads(data)
columns = [k['label'] for k in timeseries['table']['cols']]
rows = map(lambda x: [k['v'] for k in x['c']], timeseries['table']['rows'])
df = pandas.DataFrame(rows, columns=columns)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
df.to_csv(file_name)
else:
print("Reading cached data for google trends. To refresh the cache set 'refresh_data=True' when calling this function.")
print("Query terms: ", ', '.join(query_terms))
df = pandas.read_csv(file_name, parse_dates=[0])
columns = df.columns
terms = len(query_terms)
import datetime
X = np.asarray([(row, i) for i in range(terms) for row in df.index])
Y = np.asarray([[df.ix[row][query_terms[i]]] for i in range(terms) for row in df.index ])
output_info = columns[1:]
return data_details_return({'data frame' : df, 'X': X, 'Y': Y, 'query_terms': output_info, 'info': "Data downloaded from google trends with query terms: " + ', '.join(output_info) + '.'}, data_set)
# The data sets
def oil(data_set='three_phase_oil_flow'):
"""The three phase oil data from Bishop and James (1993)."""
if not data_available(data_set):
download_data(data_set)
oil_train_file = os.path.join(data_path, data_set, 'DataTrn.txt')
oil_trainlbls_file = os.path.join(data_path, data_set, 'DataTrnLbls.txt')
oil_test_file = os.path.join(data_path, data_set, 'DataTst.txt')
oil_testlbls_file = os.path.join(data_path, data_set, 'DataTstLbls.txt')
oil_valid_file = os.path.join(data_path, data_set, 'DataVdn.txt')
oil_validlbls_file = os.path.join(data_path, data_set, 'DataVdnLbls.txt')
fid = open(oil_train_file)
X = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_test_file)
Xtest = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_valid_file)
Xvalid = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_trainlbls_file)
Y = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_testlbls_file)
Ytest = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_validlbls_file)
Yvalid = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'Xtest' : Xtest, 'Xvalid': Xvalid, 'Yvalid': Yvalid}, data_set)
#else:
# throw an error
def oil_100(seed=default_seed, data_set = 'three_phase_oil_flow'):
np.random.seed(seed=seed)
data = oil()
indices = np.random.permutation(1000)
indices = indices[0:100]
X = data['X'][indices, :]
Y = data['Y'][indices, :]
return data_details_return({'X': X, 'Y': Y, 'info': "Subsample of the full oil data extracting 100 values randomly without replacement, here seed was " + str(seed)}, data_set)
def pumadyn(seed=default_seed, data_set='pumadyn-32nm'):
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar = tarfile.open(os.path.join(path, 'pumadyn-32nm.tar.gz'))
print('Extracting file.')
tar.extractall(path=path)
tar.close()
# Data is variance 1, no need to normalize.
data = np.loadtxt(os.path.join(data_path, data_set, 'pumadyn-32nm', 'Dataset.data.gz'))
indices = np.random.permutation(data.shape[0])
indicesTrain = indices[0:7168]
indicesTest = indices[7168:-1]
indicesTrain.sort(axis=0)
indicesTest.sort(axis=0)
X = data[indicesTrain, 0:-2]
Y = data[indicesTrain, -1][:, None]
Xtest = data[indicesTest, 0:-2]
Ytest = data[indicesTest, -1][:, None]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed': seed}, data_set)
def robot_wireless(data_set='robot_wireless'):
# WiFi access point strengths on a tour around UW Paul Allen building.
if not data_available(data_set):
download_data(data_set)
file_name = os.path.join(data_path, data_set, 'uw-floor.txt')
all_time = np.genfromtxt(file_name, usecols=(0))
macaddress = np.genfromtxt(file_name, usecols=(1), dtype='string')
x = np.genfromtxt(file_name, usecols=(2))
y = np.genfromtxt(file_name, usecols=(3))
strength = np.genfromtxt(file_name, usecols=(4))
addresses = np.unique(macaddress)
times = np.unique(all_time)
addresses.sort()
times.sort()
allY = np.zeros((len(times), len(addresses)))
allX = np.zeros((len(times), 2))
allY[:]=-92.
strengths={}
for address, j in zip(addresses, range(len(addresses))):
ind = np.nonzero(address==macaddress)
temp_strengths=strength[ind]
temp_x=x[ind]
temp_y=y[ind]
temp_times = all_time[ind]
for time in temp_times:
vals = time==temp_times
if any(vals):
ind2 = np.nonzero(vals)
i = np.nonzero(time==times)
allY[i, j] = temp_strengths[ind2]
allX[i, 0] = temp_x[ind2]
allX[i, 1] = temp_y[ind2]
allY = (allY + 85.)/15.
X = allX[0:215, :]
Y = allY[0:215, :]
Xtest = allX[215:, :]
Ytest = allY[215:, :]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'addresses' : addresses, 'times' : times}, data_set)
def silhouette(data_set='ankur_pose_data'):
# <NAME> and <NAME>'s silhoutte data.
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'ankurDataPoseSilhouette.mat'))
inMean = np.mean(mat_data['Y'])
inScales = np.sqrt(np.var(mat_data['Y']))
X = mat_data['Y'] - inMean
X = X / inScales
Xtest = mat_data['Y_test'] - inMean
Xtest = Xtest / inScales
Y = mat_data['Z']
Ytest = mat_data['Z_test']
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest}, data_set)
def decampos_digits(data_set='decampos_characters', which_digits=[0,1,2,3,4,5,6,7,8,9]):
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
digits = np.load(os.path.join(path, 'digits.npy'))
digits = digits[which_digits,:,:,:]
num_classes, num_samples, height, width = digits.shape
Y = digits.reshape((digits.shape[0]*digits.shape[1],digits.shape[2]*digits.shape[3]))
lbls = np.array([[l]*num_samples for l in which_digits]).reshape(Y.shape[0], 1)
str_lbls = np.array([[str(l)]*num_samples for l in which_digits])
return data_details_return({'Y': Y, 'lbls': lbls, 'str_lbls' : str_lbls, 'info': 'Digits data set from the de Campos characters data'}, data_set)
def ripley_synth(data_set='ripley_prnn_data'):
if not data_available(data_set):
download_data(data_set)
train = np.genfromtxt(os.path.join(data_path, data_set, 'synth.tr'), skip_header=1)
X = train[:, 0:2]
y = train[:, 2:3]
test = np.genfromtxt(os.path.join(data_path, data_set, 'synth.te'), skip_header=1)
Xtest = test[:, 0:2]
ytest = test[:, 2:3]
return data_details_return({'X': X, 'Y': y, 'Xtest': Xtest, 'Ytest': ytest, 'info': 'Synthetic data generated by Ripley for a two class classification problem.'}, data_set)
def global_average_temperature(data_set='global_temperature', num_train=1000, refresh_data=False):
path = os.path.join(data_path, data_set)
if data_available(data_set) and not refresh_data:
print('Using cached version of the data set, to use latest version set refresh_data to True')
else:
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'GLBTS.long.data'))
print('Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0])
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Mauna Loa data with " + str(num_train) + " values used as training points."}, data_set)
def mauna_loa(data_set='mauna_loa', num_train=545, refresh_data=False):
path = os.path.join(data_path, data_set)
if data_available(data_set) and not refresh_data:
print('Using cached version of the data set, to use latest version set refresh_data to True')
else:
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'co2_mm_mlo.txt'))
print('Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0])
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Mauna Loa data with " + str(num_train) + " values used as training points."}, data_set)
def boxjenkins_airline(data_set='boxjenkins_airline', num_train=96):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'boxjenkins_airline.csv'), delimiter=',')
Y = data[:num_train, 1:2]
X = data[:num_train, 0:1]
Xtest = data[num_train:, 0:1]
Ytest = data[num_train:, 1:2]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Montly airline passenger data from Box & Jenkins 1976."}, data_set)
def osu_run1(data_set='osu_run1', sample_every=4):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'run1TXT.ZIP'), 'r')
for name in zip.namelist():
zip.extract(name, path)
Y, connect = GPy.util.mocap.load_text_data('Aug210106', path)
Y = Y[0:-1:sample_every, :]
return data_details_return({'Y': Y, 'connect' : connect}, data_set)
def swiss_roll_generated(num_samples=1000, sigma=0.0):
with open(os.path.join(os.path.dirname(__file__), 'datasets', 'swiss_roll.pickle')) as f:
data = pickle.load(f)
Na = data['Y'].shape[0]
perm = np.random.permutation(np.r_[:Na])[:num_samples]
Y = data['Y'][perm, :]
t = data['t'][perm]
c = data['colors'][perm, :]
so = np.argsort(t)
Y = Y[so, :]
t = t[so]
c = c[so, :]
return {'Y':Y, 't':t, 'colors':c}
def hapmap3(data_set='hapmap3'):
"""
The HapMap phase three SNP dataset - 1184 samples out of 11 populations.
SNP_matrix (A) encoding [see Paschou et all. 2007 (PCA-Correlated SNPs...)]:
Let (B1,B2) be the alphabetically sorted bases, which occur in the j-th SNP, then
/ 1, iff SNPij==(B1,B1)
Aij = | 0, iff SNPij==(B1,B2)
\ -1, iff SNPij==(B2,B2)
The SNP data and the meta information (such as iid, sex and phenotype) are
stored in the dataframe datadf, index is the Individual ID,
with following columns for metainfo:
* family_id -> Family ID
* paternal_id -> Paternal ID
* maternal_id -> Maternal ID
* sex -> Sex (1=male; 2=female; other=unknown)
* phenotype -> Phenotype (-9, or 0 for unknown)
* population -> Population string (e.g. 'ASW' - 'YRI')
* rest are SNP rs (ids)
More information is given in infodf:
* Chromosome:
- autosomal chromosemes -> 1-22
- X X chromosome -> 23
- Y Y chromosome -> 24
- XY Pseudo-autosomal region of X -> 25
- MT Mitochondrial -> 26
* Relative Positon (to Chromosome) [base pairs]
"""
try:
from pandas import read_pickle, DataFrame
from sys import stdout
import bz2
except ImportError as i:
raise i("Need pandas for hapmap dataset, make sure to install pandas (http://pandas.pydata.org/) before loading the hapmap dataset")
dir_path = os.path.join(data_path,'hapmap3')
hapmap_file_name = 'hapmap3_r2_b36_fwd.consensus.qc.poly'
unpacked_files = [os.path.join(dir_path, hapmap_file_name+ending) for ending in ['.ped', '.map']]
unpacked_files_exist = reduce(lambda a, b:a and b, map(os.path.exists, unpacked_files))
if not unpacked_files_exist and not data_available(data_set):
download_data(data_set)
preprocessed_data_paths = [os.path.join(dir_path,hapmap_file_name + file_name) for file_name in \
['.snps.pickle',
'.info.pickle',
'.nan.pickle']]
if not reduce(lambda a,b: a and b, map(os.path.exists, preprocessed_data_paths)):
if not overide_manual_authorize and not prompt_user("Preprocessing requires ~25GB "
"of memory and can take a (very) long time, continue? [Y/n]"):
print("Preprocessing required for further usage.")
return
status = "Preprocessing data, please be patient..."
print(status)
def write_status(message, progress, status):
stdout.write(" "*len(status)); stdout.write("\r"); stdout.flush()
status = r"[{perc: <{ll}}] {message: <13s}".format(message=message, ll=20,
perc="="*int(20.*progress/100.))
stdout.write(status); stdout.flush()
return status
if not unpacked_files_exist:
status=write_status('unpacking...', 0, '')
curr = 0
for newfilepath in unpacked_files:
if not os.path.exists(newfilepath):
filepath = newfilepath + '.bz2'
file_size = os.path.getsize(filepath)
with open(newfilepath, 'wb') as new_file, open(filepath, 'rb') as f:
decomp = bz2.BZ2Decompressor()
file_processed = 0
buffsize = 100 * 1024
for data in iter(lambda : f.read(buffsize), b''):
new_file.write(decomp.decompress(data))
file_processed += len(data)
status=write_status('unpacking...', curr+12.*file_processed/(file_size), status)
curr += 12
status=write_status('unpacking...', curr, status)
os.remove(filepath)
status=write_status('reading .ped...', 25, status)
# Preprocess data:
snpstrnp = np.loadtxt(unpacked_files[0], dtype=str)
status=write_status('reading .map...', 33, status)
mapnp = np.loadtxt(unpacked_files[1], dtype=str)
status=write_status('reading relationships.txt...', 42, status)
# and metainfo:
infodf = DataFrame.from_csv(os.path.join(dir_path,'./relationships_w_pops_121708.txt'), header=0, sep='\t')
infodf.set_index('IID', inplace=1)
status=write_status('filtering nan...', 45, status)
snpstr = snpstrnp[:,6:].astype('S1').reshape(snpstrnp.shape[0], -1, 2)
inan = snpstr[:,:,0] == '0'
status=write_status('filtering reference alleles...', 55, status)
ref = np.array(map(lambda x: np.unique(x)[-2:], snpstr.swapaxes(0,1)[:,:,:]))
status=write_status('encoding snps...', 70, status)
# Encode the information for each gene in {-1,0,1}:
status=write_status('encoding snps...', 73, status)
snps = (snpstr==ref[None,:,:])
status=write_status('encoding snps...', 76, status)
snps = (snps*np.array([1,-1])[None,None,:])
status=write_status('encoding snps...', 78, status)
snps = snps.sum(-1)
status=write_status('encoding snps...', 81, status)
snps = snps.astype('i8')
status=write_status('marking nan values...', 88, status)
# put in nan values (masked as -128):
snps[inan] = -128
status=write_status('setting up meta...', 94, status)
# get meta information:
metaheader = np.r_[['family_id', 'iid', 'paternal_id', 'maternal_id', 'sex', 'phenotype']]
metadf = DataFrame(columns=metaheader, data=snpstrnp[:,:6])
metadf.set_index('iid', inplace=1)
metadf = metadf.join(infodf.population)
metadf.to_pickle(preprocessed_data_paths[1])
# put everything together:
status=write_status('setting up snps...', 96, status)
snpsdf = DataFrame(index=metadf.index, data=snps, columns=mapnp[:,1])
with open(preprocessed_data_paths[0], 'wb') as f:
pickle.dump(f, snpsdf, protocoll=-1)
status=write_status('setting up snps...', 98, status)
inandf = DataFrame(index=metadf.index, data=inan, columns=mapnp[:,1])
inandf.to_pickle(preprocessed_data_paths[2])
status=write_status('done :)', 100, status)
print('')
else:
print("loading snps...")
snpsdf = read_pickle(preprocessed_data_paths[0])
print("loading metainfo...")
metadf = read_pickle(preprocessed_data_paths[1])
print("loading nan entries...")
inandf = read_pickle(preprocessed_data_paths[2])
snps = snpsdf.values
populations = metadf.population.values.astype('S3')
hapmap = dict(name=data_set,
description='The HapMap phase three SNP dataset - '
'1184 samples out of 11 populations. inan is a '
'boolean array, containing wheather or not the '
'given entry is nan (nans are masked as '
'-128 in snps).',
snpsdf=snpsdf,
metadf=metadf,
snps=snps,
inan=inandf.values,
inandf=inandf,
populations=populations)
return hapmap
def singlecell(data_set='singlecell'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'singlecell.csv')
Y = read_csv(filename, header=0, index_col=0)
genes = Y.columns
labels = Y.index
# data = np.loadtxt(os.path.join(dir_path, 'singlecell.csv'), delimiter=",", dtype=str)
return data_details_return({'Y': Y, 'info' : "qPCR singlecell experiment in Mouse, measuring 48 gene expressions in 1-64 cell states. The labels have been created as in Guo et al. [2010]",
'genes': genes, 'labels':labels,
}, data_set)
def singlecell_rna_seq_islam(dataset='singlecell_islam'):
if not data_available(dataset):
download_data(dataset)
from pandas import read_csv, DataFrame, concat
dir_path = os.path.join(data_path, dataset)
filename = os.path.join(dir_path, 'GSE29087_L139_expression_tab.txt.gz')
data = read_csv(filename, sep='\t', skiprows=6, compression='gzip', header=None)
header1 = read_csv(filename, sep='\t', header=None, skiprows=5, nrows=1, compression='gzip')
header2 = read_csv(filename, sep='\t', header=None, skiprows=3, nrows=1, compression='gzip')
data.columns = np.concatenate((header1.ix[0, :], header2.ix[0, 7:]))
Y = data.set_index("Feature").ix[8:, 6:-4].T.astype(float)
# read the info .soft
filename = os.path.join(dir_path, 'GSE29087_family.soft.gz')
info = read_csv(filename, sep='\t', skiprows=0, compression='gzip', header=None)
# split at ' = '
info = DataFrame(info.ix[:,0].str.split(' = ').tolist())
# only take samples:
info = info[info[0].str.contains("!Sample")]
info[0] = info[0].apply(lambda row: row[len("!Sample_"):])
groups = info.groupby(0).groups
# remove 'GGG' from barcodes
barcode = info[1][groups['barcode']].apply(lambda row: row[:-3])
title = info[1][groups['title']]
title.index = barcode
title.name = 'title'
geo_accession = info[1][groups['geo_accession']]
geo_accession.index = barcode
geo_accession.name = 'geo_accession'
case_id = info[1][groups['source_name_ch1']]
case_id.index = barcode
case_id.name = 'source_name_ch1'
info = concat([title, geo_accession, case_id], axis=1)
labels = info.join(Y).source_name_ch1[:-4]
labels[labels=='Embryonic stem cell'] = "ES"
labels[labels=='Embryonic fibroblast'] = "MEF"
return data_details_return({'Y': Y,
'info': '92 single cells (48 mouse ES cells, 44 mouse embryonic fibroblasts and 4 negative controls) were analyzed by single-cell tagged reverse transcription (STRT)',
'genes': Y.columns,
'labels': labels,
'datadf': data,
'infodf': info}, dataset)
def singlecell_rna_seq_deng(dataset='singlecell_deng'):
if not data_available(dataset):
download_data(dataset)
from pandas import read_csv, isnull
dir_path = os.path.join(data_path, dataset)
# read the info .soft
filename = os.path.join(dir_path, 'GSE45719_series_matrix.txt.gz')
info = read_csv(filename, sep='\t', skiprows=0, compression='gzip', header=None, nrows=29, index_col=0)
summary = info.loc['!Series_summary'][1]
design = info.loc['!Series_overall_design']
# only take samples:
sample_info = read_csv(filename, sep='\t', skiprows=30, compression='gzip', header=0, index_col=0).T
sample_info.columns = sample_info.columns.to_series().apply(lambda row: row[len("!Sample_"):])
sample_info.columns.name = sample_info.columns.name[len("!Sample_"):]
sample_info = sample_info[['geo_accession', 'characteristics_ch1', 'description']]
sample_info = sample_info.iloc[:, np.r_[0:4, 5:sample_info.shape[1]]]
c = sample_info.columns.to_series()
c[1:4] = ['strain', 'cross', 'developmental_stage']
sample_info.columns = c
# get the labels right:
rep = re.compile('\(.*\)')
def filter_dev_stage(row):
if isnull(row):
row = "2-cell stage embryo"
if row.startswith("developmental stage: "):
row = row[len("developmental stage: "):]
if row == 'adult':
row += " liver"
row = row.replace(' stage ', ' ')
row = rep.sub(' ', row)
row = row.strip(' ')
return row
labels = sample_info.developmental_stage.apply(filter_dev_stage)
# Extract the tar file
filename = os.path.join(dir_path, 'GSE45719_Raw.tar')
with tarfile.open(filename, 'r') as files:
print("Extracting Archive {}...".format(files.name))
data = None
gene_info = None
message = ''
members = files.getmembers()
overall = len(members)
for i, file_info in enumerate(members):
f = files.extractfile(file_info)
inner = | read_csv(f, sep='\t', header=0, compression='gzip', index_col=0) | pandas.read_csv |
#!python3
"""Module for working with student records and making Students tab"""
import numpy as np
import pandas as pd
from reports_modules.excel_base import safe_write, write_array
from reports_modules.excel_base import make_excel_indices
DEFAULT_FROM_TARGET = 0.2 # default prediction below target grad rate
MINUS1_CUT = 0.2 # minimum odds required to "toss" a college in minus1 pred
def _get_act_translation(x, lookup_df):
"""Apply function for calculating equivalent SAT for ACT scores.
Lookup table has index of ACT with value of SAT"""
act = x
if np.isreal(act):
if act in lookup_df.index: # it's an ACT value in the table
return lookup_df.loc[act, "SAT"]
return np.nan # default if not in table or not a number
def _get_sat_guess(x):
"""Returns a GPA guess based on regression constants from the
prior year. nan if GPA isn't a number"""
gpa = x
if np.isreal(gpa):
guess = 427.913068576 + 185.298880075 * gpa
return np.round(guess / 10.0) * 10.0
else:
return np.nan
def _pick_sat_for_use(x):
""" Returns the SAT we'll use in practice"""
sat_guess, interim, actual_sat = x
if np.isreal(actual_sat):
return actual_sat
elif np.isreal(interim):
return interim
elif np.isreal(sat_guess):
return sat_guess
else:
return np.nan
def _get_sat_max(x):
"""Returns the max of two values if both are numbers, otherwise
returns the numeric one or nan if neither is numeric"""
sat, act_in_sat = x
if np.isreal(sat):
if np.isreal(act_in_sat):
return max(sat, act_in_sat)
else:
return sat
else:
if np.isreal(act_in_sat):
return act_in_sat
else:
return np.nan
def reduce_roster(campus, cfg, dfs, counselor, advisor, debug, do_nonseminar):
"""Uses campus info and config file to reduce the active student list"""
df = dfs["full_roster"].copy()
if debug:
print("Starting roster of {} students".format(len(df)), flush=True, end="")
if campus == "All":
if "all_campuses" in cfg:
df = df[df["Campus"].isin(cfg["all_campuses"])]
else:
pass # we're using the entire dataframe
elif campus == "PAS": # special code for -1 EFC students
df = df[df["EFC"] == -1]
elif campus.startswith("list"): # special code for a list from a csv
df = df[df.index.isin(dfs["roster_list"].index)]
else:
df = df[df["Campus"] == campus]
if counselor != "All":
df = df.dropna(subset=["Counselor"])
df = df[df["Counselor"].str.contains(counselor)]
if advisor != "All":
df = df.dropna(subset=["Advisor"])
df = df[df["Advisor"].str.contains(advisor)]
if do_nonseminar:
df = df[df["SpEd"].str.endswith("NonS")]
else:
df = df[~df["SpEd"].str.endswith("NonS")]
if debug:
print("..ending at {} students.".format(len(df)), flush=True)
# Two calculated columns need to be added for the application
# analyses
df["local_act_in_sat"] = df["ACT"].apply(
_get_act_translation, args=(dfs["ACTtoSAT"],)
)
df["local_sat_guess"] = df["GPA"].apply(_get_sat_guess)
df["local_sat_used"] = df[["local_sat_guess", "InterimSAT", "SAT"]].apply(
_pick_sat_for_use, axis=1
)
df["local_sat_max"] = df[["local_sat_used", "local_act_in_sat"]].apply(
_get_sat_max, axis=1
)
dfs["roster"] = df
def _get_subgroup(x):
"""Apply function to return one of eight unique subgroups"""
race, gender = x
if race == "B":
subgroup = "Black"
elif race == "H":
subgroup = "Latinx"
elif race == "A" or race == "P":
subgroup = "Asian"
else:
subgroup = "Other"
if gender == "M":
return subgroup + " Male"
elif gender == "F":
return subgroup + " Female"
else:
return subgroup + " Other"
def _get_strategies(x, lookup_df):
"""Apply function for calculating strategies based on gpa and sat using the
lookup table (mirrors Excel equation for looking up strategy"""
gpa, sat = x
if np.isreal(gpa) and np.isreal(sat):
lookup = "{:.1f}:{:.0f}".format(
max(np.floor(gpa * 10) / 10, 1.5), max(sat, 710)
)
return lookup_df["Strategy"].get(lookup, np.nan)
else:
return np.nan
def _get_bucket(x, use_EFC=False):
"""Apply function to create a text field to "bucket" students"""
strat, gpa, efc, race = x
special_strats = [5, 6] # these are the ones split by 3.0 GPA
if pd.isnull(gpa) or | pd.isnull(strat) | pandas.isnull |
import pytest
import pandas as pd
from opendc_eemm.preprocess import aggregate_predictions
from opendc_eemm.visualization import plot_power_draw
@pytest.mark.parametrize('value, expected', [(1, 1)])
def test_test(value, expected):
assert value == expected
def test_aggregate_predictions():
with pytest.raises(ValueError):
aggregate_predictions( | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Generate data for examples"""
# author: <NAME>, <NAME>, Duke University; <NAME>, <NAME>
# Copyright Duke University 2020
# License: MIT
import pandas as pd
import numpy as np
def generate_uniform_given_importance(num_control=1000, num_treated=1000,
num_cov=4, min_val=0,
max_val=3, covar_importance=[4, 3, 2, 1],
bi_mean=2, bi_stdev=1):
"""
This generates data according to the discrete uniform distribution
"""
x_c = np.random.randint(min_val, max_val, size=(num_control, num_cov))
x_t = np.random.randint(min_val, max_val, size=(num_treated, num_cov))
y_c = np.dot(x_c, np.array(covar_importance)) # y for control group
# this is beta
treatment_eff_coef = np.random.normal(bi_mean, bi_stdev, size=num_cov)
treatment_effect = np.dot(x_t, treatment_eff_coef) # this is beta*x
# yc is just the 1st term of the below summation. Thus, CATT is the 2nd term
y_t = np.dot(x_t, np.array(covar_importance)) + treatment_effect
true_catt = treatment_effect
df1 = pd.DataFrame(x_c, columns=range(num_cov))
df1['outcome'] = y_c
df1['treated'] = 0
df2 = pd.DataFrame(x_t, columns=range(num_cov))
df2['outcome'] = y_t
df2['treated'] = 1
data_frame = pd.concat([df2, df1])
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(['index'], axis=1)
return data_frame, true_catt
def generate_binomial_given_importance(num_control=1000, num_treated=1000,
num_cov=5, bernoulli_param=0.5,
bi_mean=2, bi_stdev=1,
covar_importance=[4, 3, 2, 1, 0.01]):
'''
This function generates data where the covariates exponentially decay with
importance. The x's are all binary.
'''
# data for control group
x_c = np.random.binomial(1, bernoulli_param, size=(num_control, num_cov))
# data for treated group
x_t = np.random.binomial(1, bernoulli_param, size=(num_treated, num_cov))
y_c = np.dot(x_c, np.array(covar_importance)) # y for control group
# this is beta
treatment_eff_coef = np.random.normal(bi_mean, bi_stdev, size=num_cov)
treatment_effect = np.dot(x_t, treatment_eff_coef) # this is beta*x
# yc is just the 1st term of the below summation. Thus, CATT is the 2nd term
y_t = np.dot(x_t, np.array(covar_importance)) + treatment_effect
true_catt = treatment_effect
df1 = pd.DataFrame(x_c, columns=range(num_cov))
df1['outcome'] = y_c
df1['treated'] = 0
df2 = pd.DataFrame(x_t, columns=range(num_cov))
df2['outcome'] = y_t
df2['treated'] = 1
data_frame = | pd.concat([df2, df1]) | pandas.concat |
import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.transform_primitive_base import (
TransformPrimitive
)
from featuretools.primitives.utils import (
_apply_roll_with_offset_gap,
_roll_series_with_gap
)
class RollingMax(TransformPrimitive):
"""Determines the maximum of entries over a given window.
Description:
Given a list of numbers and a corresponding list of
datetimes, return a rolling maximum of the numeric values,
starting at the row `gap` rows away from the current row and looking backward
over the specified window (by `window_length` and `gap`).
Input datetimes should be monotonic.
Args:
window_length (int, string, optional): Specifies the amount of data included in each window.
If an integer is provided, will correspond to a number of rows. For data with a uniform sampling frequency,
for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases, can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
Defaults to 3.
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, will correspond to a number of rows.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 0, which will include the target instance in the window.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
Note:
Only offset aliases with fixed frequencies can be used when defining gap and window_length.
This means that aliases such as `M` or `W` cannot be used, as they can indicate different
numbers of days. ('M', because different months are different numbers of days;
'W' because week will indicate a certain day of the week, like W-Wed, so that will
indicate a different number of days depending on the anchoring date.)
Note:
When using an offset alias to define `gap`, an offset alias must also be used to define `window_length`.
This limitation does not exist when using an offset alias to define `window_length`. In fact,
if the data has a uniform sampling frequency, it is preferable to use a numeric `gap` as it is more
efficient.
Examples:
>>> import pandas as pd
>>> rolling_max = RollingMax(window_length=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_max(times, [4, 3, 2, 1, 0]).tolist()
[4.0, 4.0, 4.0, 3.0, 2.0]
We can also control the gap before the rolling calculation.
>>> import pandas as pd
>>> rolling_max = RollingMax(window_length=3, gap=1)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_max(times, [4, 3, 2, 1, 0]).tolist()
[nan, 4.0, 4.0, 4.0, 3.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> rolling_max = RollingMax(window_length=3, min_periods=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_max(times, [4, 3, 2, 1, 0]).tolist()
[nan, nan, 4.0, 3.0, 2.0]
We can also set the window_length and gap using offset alias strings.
>>> import pandas as pd
>>> rolling_max = RollingMax(window_length='3min', gap='1min')
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_max(times, [4, 3, 2, 1, 0]).tolist()
[nan, 4.0, 4.0, 4.0, 3.0]
"""
name = "rolling_max"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={'time_index'}), ColumnSchema(semantic_tags={'numeric'})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={'numeric'})
def __init__(self, window_length=3, gap=0, min_periods=1):
self.window_length = window_length
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def rolling_max(datetime, numeric):
x = pd.Series(numeric.values, index=datetime.values)
rolled_series = _roll_series_with_gap(x,
self.window_length,
gap=self.gap,
min_periods=self.min_periods)
if isinstance(self.gap, str):
additional_args = (self.gap, max, self.min_periods)
return rolled_series.apply(_apply_roll_with_offset_gap, args=additional_args).values
return rolled_series.max().values
return rolling_max
class RollingMin(TransformPrimitive):
"""Determines the minimum of entries over a given window.
Description:
Given a list of numbers and a corresponding list of
datetimes, return a rolling minimum of the numeric values,
starting at the row `gap` rows away from the current row and looking backward
over the specified window (by `window_length` and `gap`).
Input datetimes should be monotonic.
Args:
window_length (int, string, optional): Specifies the amount of data included in each window.
If an integer is provided, will correspond to a number of rows. For data with a uniform sampling frequency,
for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases, can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
Defaults to 3.
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, will correspond to a number of rows.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 0, which will include the target instance in the window.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
Note:
Only offset aliases with fixed frequencies can be used when defining gap and window_length.
This means that aliases such as `M` or `W` cannot be used, as they can indicate different
numbers of days. ('M', because different months are different numbers of days;
'W' because week will indicate a certain day of the week, like W-Wed, so that will
indicate a different number of days depending on the anchoring date.)
Note:
When using an offset alias to define `gap`, an offset alias must also be used to define `window_length`.
This limitation does not exist when using an offset alias to define `window_length`. In fact,
if the data has a uniform sampling frequency, it is preferable to use a numeric `gap` as it is more
efficient.
Examples:
>>> import pandas as pd
>>> rolling_min = RollingMin(window_length=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_min(times, [4, 3, 2, 1, 0]).tolist()
[4.0, 3.0, 2.0, 1.0, 0.0]
We can also control the gap before the rolling calculation.
>>> import pandas as pd
>>> rolling_min = RollingMin(window_length=3, gap=1)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_min(times, [4, 3, 2, 1, 0]).tolist()
[nan, 4.0, 3.0, 2.0, 1.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> rolling_min = RollingMin(window_length=3, min_periods=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_min(times, [4, 3, 2, 1, 0]).tolist()
[nan, nan, 2.0, 1.0, 0.0]
We can also set the window_length and gap using offset alias strings.
>>> import pandas as pd
>>> rolling_min = RollingMin(window_length='3min', gap='1min')
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_min(times, [4, 3, 2, 1, 0]).tolist()
[nan, 4.0, 3.0, 2.0, 1.0]
"""
name = "rolling_min"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={'time_index'}), ColumnSchema(semantic_tags={'numeric'})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={'numeric'})
def __init__(self, window_length=3, gap=0, min_periods=1):
self.window_length = window_length
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def rolling_min(datetime, numeric):
x = pd.Series(numeric.values, index=datetime.values)
rolled_series = _roll_series_with_gap(x,
self.window_length,
gap=self.gap,
min_periods=self.min_periods)
if isinstance(self.gap, str):
additional_args = (self.gap, min, self.min_periods)
return rolled_series.apply(_apply_roll_with_offset_gap, args=additional_args).values
return rolled_series.min().values
return rolling_min
class RollingMean(TransformPrimitive):
"""Calculates the mean of entries over a given window.
Description:
Given a list of numbers and a corresponding list of
datetimes, return a rolling mean of the numeric values,
starting at the row `gap` rows away from the current row and looking backward
over the specified time window (by `window_length` and `gap`).
Input datetimes should be monotonic.
Args:
window_length (int, string, optional): Specifies the amount of data included in each window.
If an integer is provided, will correspond to a number of rows. For data with a uniform sampling frequency,
for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases, can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
Defaults to 3.
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, will correspond to a number of rows.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 0, which will include the target instance in the window.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
Note:
Only offset aliases with fixed frequencies can be used when defining gap and window_length.
This means that aliases such as `M` or `W` cannot be used, as they can indicate different
numbers of days. ('M', because different months are different numbers of days;
'W' because week will indicate a certain day of the week, like W-Wed, so that will
indicate a different number of days depending on the anchoring date.)
Note:
When using an offset alias to define `gap`, an offset alias must also be used to define `window_length`.
This limitation does not exist when using an offset alias to define `window_length`. In fact,
if the data has a uniform sampling frequency, it is preferable to use a numeric `gap` as it is more
efficient.
Examples:
>>> import pandas as pd
>>> rolling_mean = RollingMean(window_length=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_mean(times, [4, 3, 2, 1, 0]).tolist()
[4.0, 3.5, 3.0, 2.0, 1.0]
We can also control the gap before the rolling calculation.
>>> import pandas as pd
>>> rolling_mean = RollingMean(window_length=3, gap=1)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_mean(times, [4, 3, 2, 1, 0]).tolist()
[nan, 4.0, 3.5, 3.0, 2.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> rolling_mean = RollingMean(window_length=3, min_periods=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_mean(times, [4, 3, 2, 1, 0]).tolist()
[nan, nan, 3.0, 2.0, 1.0]
"""
name = "rolling_mean"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={'time_index'}), ColumnSchema(semantic_tags={'numeric'})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={'numeric'})
def __init__(self, window_length=3, gap=0, min_periods=0):
self.window_length = window_length
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def rolling_mean(datetime, numeric):
x = pd.Series(numeric.values, index=datetime.values)
rolled_series = _roll_series_with_gap(x,
self.window_length,
gap=self.gap,
min_periods=self.min_periods)
if isinstance(self.gap, str):
additional_args = (self.gap, np.mean, self.min_periods)
return rolled_series.apply(_apply_roll_with_offset_gap, args=additional_args).values
return rolled_series.mean().values
return rolling_mean
class RollingSTD(TransformPrimitive):
"""Calculates the standard deviation of entries over a given window.
Description:
Given a list of numbers and a corresponding list of
datetimes, return a rolling standard deviation of
the numeric values, starting at the row `gap` rows away from the current row and
looking backward over the specified time window
(by `window_length` and `gap`). Input datetimes should be monotonic.
Args:
window_length (int, string, optional): Specifies the amount of data included in each window.
If an integer is provided, will correspond to a number of rows. For data with a uniform sampling frequency,
for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases, can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
Defaults to 3.
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, will correspond to a number of rows.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 0, which will include the target instance in the window.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
Note:
Only offset aliases with fixed frequencies can be used when defining gap and window_length.
This means that aliases such as `M` or `W` cannot be used, as they can indicate different
numbers of days. ('M', because different months are different numbers of days;
'W' because week will indicate a certain day of the week, like W-Wed, so that will
indicate a different number of days depending on the anchoring date.)
Note:
When using an offset alias to define `gap`, an offset alias must also be used to define `window_length`.
This limitation does not exist when using an offset alias to define `window_length`. In fact,
if the data has a uniform sampling frequency, it is preferable to use a numeric `gap` as it is more
efficient.
Examples:
>>> import pandas as pd
>>> rolling_std = RollingSTD(window_length=4)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_std(times, [4, 3, 2, 1, 0]).tolist()
[nan, 0.7071067811865476, 1.0, 1.2909944487358056, 1.2909944487358056]
We can also control the gap before the rolling calculation.
>>> import pandas as pd
>>> rolling_std = RollingSTD(window_length=4, gap=1)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_std(times, [4, 3, 2, 1, 0]).tolist()
[nan, nan, 0.7071067811865476, 1.0, 1.2909944487358056]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> rolling_std = RollingSTD(window_length=4, min_periods=4)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_std(times, [4, 3, 2, 1, 0]).tolist()
[nan, nan, nan, 1.2909944487358056, 1.2909944487358056]
We can also set the window_length and gap using offset alias strings.
>>> import pandas as pd
>>> rolling_std = RollingSTD(window_length='4min', gap='1min')
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_std(times, [4, 3, 2, 1, 0]).tolist()
[nan, nan, 0.7071067811865476, 1.0, 1.2909944487358056]
"""
name = "rolling_std"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={'time_index'}), ColumnSchema(semantic_tags={'numeric'})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={'numeric'})
def __init__(self, window_length=3, gap=0, min_periods=1):
self.window_length = window_length
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def rolling_std(datetime, numeric):
x = pd.Series(numeric.values, index=datetime.values)
rolled_series = _roll_series_with_gap(x,
self.window_length,
gap=self.gap,
min_periods=self.min_periods)
if isinstance(self.gap, str):
def _pandas_std(series):
return series.std()
additional_args = (self.gap, _pandas_std, self.min_periods)
return rolled_series.apply(_apply_roll_with_offset_gap, args=additional_args).values
return rolled_series.std().values
return rolling_std
class RollingCount(TransformPrimitive):
"""Determines a rolling count of events over a given window.
Description:
Given a list of datetimes, return a rolling count starting
at the row `gap` rows away from the current row and looking backward over the specified
time window (by `window_length` and `gap`).
Input datetimes should be monotonic.
Args:
window_length (int, string, optional): Specifies the amount of data included in each window.
If an integer is provided, will correspond to a number of rows. For data with a uniform sampling frequency,
for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases, can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
Defaults to 3.
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, will correspond to a number of rows.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 0, which will include the target instance in the window.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
Note:
Only offset aliases with fixed frequencies can be used when defining gap and h.
This means that aliases such as `M` or `W` cannot be used, as they can indicate different
numbers of days. ('M', because different months are different numbers of days;
'W' because week will indicate a certain day of the week, like W-Wed, so that will
indicate a different number of days depending on the anchoring date.)
Note:
When using an offset alias to define `gap`, an offset alias must also be used to define `window_length`.
This limitation does not exist when using an offset alias to define `window_length`. In fact,
if the data has a uniform sampling frequency, it is preferable to use a numeric `gap` as it is more
efficient.
Examples:
>>> import pandas as pd
>>> rolling_count = RollingCount(window_length=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_count(times).tolist()
[1.0, 2.0, 3.0, 3.0, 3.0]
We can also control the gap before the rolling calculation.
>>> import pandas as pd
>>> rolling_count = RollingCount(window_length=3, gap=1)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_count(times).tolist()
[nan, 1.0, 2.0, 3.0, 3.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> rolling_count = RollingCount(window_length=3, min_periods=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_count(times).tolist()
[nan, nan, 3.0, 3.0, 3.0]
We can also set the window_length and gap using offset alias strings.
>>> import pandas as pd
>>> rolling_count = RollingCount(window_length='3min', gap='1min')
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_count(times).tolist()
[nan, 1.0, 2.0, 3.0, 3.0]
"""
name = "rolling_count"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={'time_index'})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={'numeric'})
def __init__(self, window_length=3, gap=0, min_periods=0):
self.window_length = window_length
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def rolling_count(datetime):
x = | pd.Series(1, index=datetime) | pandas.Series |
#Download and clean nest label series from Zooniverse
import pandas as pd
import geopandas as gpd
from panoptes_client import Panoptes
from shapely.geometry import box, Point
import json
import numpy as np
import os
from datetime import datetime
import utils
def species_from_label(value):
label_dict = {}
label_dict[0] = "Great Egret"
label_dict[1] = "Snowy Egret"
label_dict[2] = "White Ibis"
label_dict[3] = "Great Blue Heron"
label_dict[4] = "Wood Stork"
label_dict[5] = "Roseate Spoonbill"
label_dict[6] = "Anhinga"
label_dict[7] = "Other"
label_dict[8] = "Unknown"
return label_dict[value]
def download_data(everglades_watch, min_version, generate=False):
#see https://panoptes-python-client.readthedocs.io/en/v1.1/panoptes_client.html#module-panoptes_client.classification
classification_export = everglades_watch.get_export('classifications', generate=generate)
rows = []
for row in classification_export.csv_dictreader():
rows.append(row)
df = pd.DataFrame(rows)
df = df[df.workflow_name =="Nests"]
df["workflow_version"] = df.workflow_version.astype(float)
df = df[df.workflow_version > min_version]
return df
def download_subject_data(everglades_watch, savedir, generate=False):
#see https://panoptes-python-client.readthedocs.io/en/v1.1/panoptes_client.html#module-panoptes_client.classification
classification_export = everglades_watch.get_export('subjects', generate=generate)
rows = []
for row in classification_export.csv_dictreader():
rows.append(row)
df = | pd.DataFrame(rows) | pandas.DataFrame |
#%%
from pathlib import Path
import graspy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from graspy.embed import *
from graspy.plot import gridplot, heatmap, pairplot
from graspy.utils import *
import pickle
data_dir = Path(".")
data_dir = data_dir / "Cook et al revised Supplementary Information"
matdir = data_dir / "SI 5 Connectome adjacency matrices Pedigo.xlsx"
celldir = "./nice_data/master_cells.csv"
n_verify = 1000
save_me = True
run_homologs = True
plot = True
def verify(n, cells, adj, original, error="error"):
for i in range(n):
rand_ind_out = np.random.randint(0, len(cells))
rand_ind_in = np.random.randint(0, len(cells))
in_cell = cells[rand_ind_in]
out_cell = cells[rand_ind_out]
saved_weight = adj[rand_ind_in, rand_ind_out]
try:
original_weight = original.loc[in_cell, out_cell]
if saved_weight != original_weight:
print(error)
print("edge")
print(out_cell)
print(in_cell)
print(saved_weight)
print(original_weight)
except:
pass
def verify_undirected(n, cells, adj, original, error="error"):
for i in range(n):
rand_ind_out = np.random.randint(0, len(cells))
rand_ind_in = np.random.randint(0, len(cells))
in_cell = cells[rand_ind_in]
out_cell = cells[rand_ind_out]
saved_weight = adj[rand_ind_in, rand_ind_out]
try:
original_weight1 = original.loc[in_cell, out_cell]
original_weight2 = original.loc[out_cell, in_cell]
if saved_weight != original_weight1 and save_weight != original_weight2:
print(error)
print("edge")
print(out_cell)
print(in_cell)
print(saved_weight)
print(original_weight)
except:
pass
def load_df(matdir, sheet_name):
df = pd.read_excel(matdir, sheet_name=sheet_name).fillna(0)
outs = df.index.values
ins = df.columns.values
outs = upper_array(outs)
ins = upper_array(ins)
df.columns = ins
df.index = outs
return df
def upper_array(arr):
upper = [u.upper() for u in arr]
return np.array(upper)
def emmons_excel_to_df(matdir, sheet_name):
df = load_df(matdir, sheet_name)
# get the in / out cells
# have to append some 0s to make into a square matrix
outs = df.index.values
ins = df.columns.values
not_outs = np.setdiff1d(ins, outs)
not_outs_df = | pd.DataFrame(columns=df.columns) | pandas.DataFrame |
import numpy as nmp
import numpy.random as rnd
import pandas as pnd
import clonosGP.aux as aux
import clonosGP.stats as sts
##
def get_1sample(sampleid = 'S0', weights=(0.65, 0.25, 0.10), z=None, phi=(1.0, 0.5, 0.25), nmuts=100, rho=0.9, mean_depth=1000):
CNm = nmp.ones(nmuts, dtype='int')
CNt = nmp.repeat(2, nmuts)
CNn = nmp.repeat(2, nmuts)
R = rnd.poisson(mean_depth, size=nmuts)
z = rnd.choice(len(weights), size=nmuts, replace=True, p=weights) if z is None else z
phi = nmp.asarray(phi)[z]
weights = nmp.asarray(weights)[z]
VAF0 = aux.calculate_vaf0(rho, CNm, CNt, CNn)
theta = VAF0 * phi
r = rnd.binomial(R, theta)
mutid = [f'M{i+1}' for i in range(nmuts)]
return pnd.DataFrame({
'SAMPLEID': sampleid,
'PURITY': rho,
'MUTID': mutid,
'r': r,
'R': R,
'CNn': CNn,
'CNt': CNt,
'CNm': CNm,
'theta': theta,
'VAF0': VAF0,
'phi': phi,
'w': weights
})
#
def get_Nsamples(nclusters=3, nmuts=100, nsamples=10, h2=None, tau=None, weights=None, rho=(0.8, 0.9), mean_depth=(40, 40), depths=None):
weights = nmp.ones(nclusters) / nclusters if weights is None else weights
tau = rnd.gamma(1, 1) if tau is None else tau
h2 = rnd.gamma(1, 1) if h2 is None else h2
times = nmp.linspace(0, 1, nsamples) + rnd.uniform(-1/nsamples, 1/nsamples, nsamples); times[0] = 0; times[-1] = 1
sids = [f'S{_+1}' for _ in range(nsamples)]
mids = [f'M{_+1}' for _ in range(nmuts)]
cov = h2 * sts.cov_mat32(times[:, None], times, tau).eval()
y = rnd.multivariate_normal(nmp.zeros(nsamples), cov, size=nclusters)
y = 1 / (1 + nmp.exp(-y))
z = rnd.choice(nclusters, size=nmuts, p=weights)
phi = y[z, :]
lam = rnd.uniform(mean_depth[0], mean_depth[1], size=nsamples)
rho = rnd.uniform(rho[0], rho[1], size=nsamples)
R = rnd.poisson(lam, size=(nmuts, nsamples)) if depths is None else rnd.choice(depths, size=(nmuts, nsamples))
r = rnd.binomial(R, 0.5 * rho * phi)
r = pnd.DataFrame(r, index=pnd.Index(mids, name='MUTID'), columns=pnd.MultiIndex.from_arrays([sids, times], names=['SAMPLEID', 'TIME']))
R = pnd.DataFrame(R, index=pnd.Index(mids, name='MUTID'), columns=pnd.MultiIndex.from_arrays([sids, times], names=['SAMPLEID', 'TIME']))
phi = pnd.DataFrame(phi, index=pnd.Index(mids, name='MUTID'), columns=pnd.MultiIndex.from_arrays([sids, times], names=['SAMPLEID', 'TIME']))
cid = pnd.DataFrame({'CLUSTERID': z+1}, index=pnd.Index(mids, name='MUTID')).reset_index()
return pnd.merge(
| pnd.concat({'r': r, 'R': R, 'PHI': phi}, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.metrics import r2_score
import statsmodels.api as sm
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
xtr = | pd.read_csv('xtr.csv') | pandas.read_csv |
import pandas as pd
import requests
import os
import beis_indicators
from beis_indicators.utils.dir_file_management import make_indicator,save_indicator
PROJECT_DIR = beis_indicators.project_dir
TARGET_PATH = f"{PROJECT_DIR}/data/processed/housing"
INTERIM_PATH = f"{PROJECT_DIR}/data/interim/ashe_mean_salary"
# Get the LAD to NUTS lookup
lad_nuts_lu = | pd.read_csv(
"https://opendata.arcgis.com/datasets/9b4c94e915c844adb11e15a4b1e1294d_0.csv") | pandas.read_csv |
# load dependencies
import requests
import pandas as pd
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
from datetime import date, datetime, timedelta
headers = {"User-Agent": "<NAME>. <<EMAIL>>"}
def fetch_ags() -> pd.DataFrame:
"""
Fetch Amtliche Gemeindeschlüssel for Thüringen and return the table as a pandas' DataFrame.
:return: DataFrame including the Amtliche Gemeindeschlüssel for Thüringen
:rtype: pd.DataFrame
"""
# define URL
url = "https://statistik.thueringen.de/datenbank/gemauswahl.asp"
# GET request with pre-defined URL
response = requests.get(url=url, headers=headers)
# parse HTML using BeautifulSoup
soup = BeautifulSoup(markup=response.text, features="html.parser")
# parse table headers by identifying th-tag
table_headers = [th.text for th in soup.find(name="thead").find_all("th")]
# initialize empty array
rows = []
# loop over rows and extract values
for row in soup.find(name="tbody").find_all(name="tr"):
element = {}
for i, col in enumerate(row.find_all(name="td")):
element[table_headers[i]] = col.text
rows.append(element)
# return data as data frame
return pd.DataFrame.from_records(data=rows)
def fetch_incidence(ags: str) -> pd.DataFrame:
"""
Fetch the Corona incidences for a given region identified by AGS.
:param ags: Amtlicher Gemeindeschlüssel
:type ags: str
:return: DataFrame with the historical incidences
:rtype: pd.DataFrame
"""
# build url
url = f"https://api.corona-zahlen.org/districts/{ags}/history/incidence"
# GET request
response = requests.get(url=url, headers=headers)
# parse and extract data
data = response.json()["data"]
# reformat data: parse date and rename fields
incidences = [{"date": date.fromisoformat(e["date"][:10]), "incidence": e["weekIncidence"]} for
e in data[ags]["history"]]
# transform data into a data frame
df_incidences = | pd.DataFrame.from_records(data=incidences) | pandas.DataFrame.from_records |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 17:02:59 2018
@author: bruce
"""
# last version = plot_corr_mx_concate_time_linux_v1.6.0.py
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import librosa
def correlation_matrix(corr_mx, cm_title):
from matplotlib import pyplot as plt
from matplotlib import cm as cm
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cs = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cs)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rank(corr_mx, cm_title):
temp = corr_mx
#output = (temp == temp.max(axis=1)[:,None]) # along row
output = temp.rank(axis=1, ascending=False)
fig, ax1 = plt.subplots()
im1 = ax1.matshow(output, cmap=plt.cm.Wistia)
#cs = ax1.matshow(output)
fig.colorbar(im1)
ax1.grid(False)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.title(cm_title)
# show digit in matrix
output = np.asarray(output)
for i in range(22):
for j in range(22):
c = output[j,i]
ax1.text(i, j, int(c), va='center', ha='center')
plt.show()
def correlation_matrix_comb(corr_mx, cm_title):
fig, (ax2, ax3) = plt.subplots(1, 2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
'''
# graph 1 grayscale
im1 = ax1.matshow(corr_mx, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im1, ax=ax1)
ax1.grid(False)
ax1.set_title(cm_title)
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
'''
# graph 2 yellowscale
corr_mx_rank = corr_mx.rank(axis=1, ascending=False)
cmap_grey = LinearSegmentedColormap.from_list('mycmap', ['white', 'black'])
im2 = ax2.matshow(corr_mx, cmap='viridis')
# colorbar need numpy version 1.13.1
fig.colorbar(im2, ax=ax2)
ax2.grid(False)
ax2.set_title(cm_title)
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
# show digit in matrix
corr_mx_rank = np.asarray(corr_mx_rank)
for i in range(22):
for j in range(22):
c = corr_mx_rank[j,i]
ax2.text(i, j, int(c), va='center', ha='center')
# graph 3
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
im3 = ax3.matshow(output, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im3, ax=ax3)
ax3.grid(False)
ax3.set_title(cm_title)
ax3.set_xticks(np.arange(len(xlabels)))
ax3.set_yticks(np.arange(len(ylabels)))
ax3.set_xticklabels(xlabels,fontsize=6)
ax3.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# shrink value for correlation matrix
# in order to use colormap -> 10 scale
def shrink_value_03_1(corr_in1):
corr_out1 = corr_in1.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i in range (22):
for j in range(22):
if corr_in1.iloc[i, j] < 0.3:
corr_out1.iloc[i, j] = 0.3
return corr_out1
def shrink_value_05_1(corr_in2):
corr_out2 = corr_in2.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i2 in range (22):
for j2 in range(22):
if corr_in2.iloc[i2, j2] < 0.5:
corr_out2.iloc[i2, j2] = 0.5
return corr_out2
# not used!!!!!!!!!!!!
# normalize the complex signal series
def normalize_complex_arr(a):
a_oo = a - a.real.min() - 1j*a.imag.min() # origin offsetted
return a_oo/np.abs(a_oo).max()
#################################
f_dB = lambda x : 20 * np.log10(np.abs(x))
# import the pkl file
#pkl_file=pd.read_pickle('/Users/bruce/Documents/uOttawa/Project/audio_brainstem_response/Data_BruceSunMaster_Studies/study2/study2DataFrame.pkl')
df_EFR=pd.read_pickle('/home/bruce/Dropbox/Project/4.Code for Linux/df_EFR.pkl')
# remove DC offset
df_EFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_EFR_detrend_data = pd.DataFrame(signal.detrend(df_EFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7))
df_EFR_detrend = df_EFR_detrend.append(pd.concat([df_EFR_detrend_data, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_detrend = df_EFR_detrend.reset_index(drop=True)
df_EFR = df_EFR_detrend
# Time domain
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_EFR
df_EFR_win = pd.DataFrame()
# implement the window function
for i in range(1408):
temp_EFR_window = pd.DataFrame((df_EFR.iloc[i,:1024] * win_hamming).values.reshape(1,1024))
temp_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7))
df_EFR_win = df_EFR_win.append(pd.concat([temp_EFR_window, temp_EFR_label], axis=1, ignore_index=True))
# set the title of columns
# df_EFR_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# df_EFR_avg = df_EFR_avg.sort_values(by=["Condition", "Subject"])
# df_EFR_avg = df_EFR_avg.reset_index(drop=True)
df_EFR_win.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_win = df_EFR_win.sort_values(by=["Condition", "Subject"])
df_EFR_win = df_EFR_win.reset_index(drop=True)
# average all the subjects , test and retest and keep one sound levels
# filter by 'a vowel and 85Db'
df_EFR_sorted = df_EFR.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_sorted = df_EFR_sorted.reset_index(drop=True)
df_EFR_win_sorted = df_EFR_win.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_win_sorted = df_EFR_win_sorted.reset_index(drop=True)
# filter55 65 75 sound levels and keep 85dB
# keep vowel condition and subject
df_EFR_85 = | pd.DataFrame(df_EFR_sorted.iloc[1056:, :]) | pandas.DataFrame |
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, Series
import pandas._testing as tm
class TestDataFrameIndexingCategorical:
def test_assignment(self):
# assignment
df = DataFrame(
{"value": np.array(np.random.randint(0, 10000, 100), dtype="int32")}
)
labels = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
df = df.sort_values(by=["value"], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df["D"] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype("int32"), CategoricalDtype(categories=labels, ordered=False)],
index=["value", "D"],
)
tm.assert_series_equal(result, expected)
df["E"] = s
str(df)
result = df.dtypes
expected = Series(
[
np.dtype("int32"),
CategoricalDtype(categories=labels, ordered=False),
CategoricalDtype(categories=labels, ordered=False),
],
index=["value", "D", "E"],
)
tm.assert_series_equal(result, expected)
result1 = df["D"]
result2 = df["E"]
tm.assert_categorical_equal(result1._mgr._block.values, d)
# sorting
s.name = "E"
tm.assert_series_equal(result2.sort_index(), s.sort_index())
cat = Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = DataFrame(Series(cat))
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = Categorical(["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = DataFrame({"cats": cats1, "values": values1}, index=idx1)
# changed multiple rows
cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = DataFrame({"cats": cats2, "values": values2}, index=idx2)
# changed part of the cats column
cats3 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = DataFrame({"cats": cats3, "values": values3}, index=idx3)
# changed single value in cats col
cats4 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = DataFrame(
{"cats": cats4, "values": values4}, index=idx4
)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
msg1 = (
"Cannot setitem on a Categorical with a new category, "
"set the categories first"
)
msg2 = "Cannot set a Categorical with another, without identical categories"
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.iloc[2, 0] = "c"
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.iloc[2, :] = ["c", 2]
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg2):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = Categorical(list("bb"), categories=list("abc"))
with pytest.raises(ValueError, match=msg2):
# different values
df = orig.copy()
df.iloc[2:4, 0] = Categorical(list("cc"), categories=list("abc"))
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg1):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j", "cats"] = "c"
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j", :] = ["c", 2]
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg2):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(
["b", "b"], categories=["a", "b", "c"]
)
with pytest.raises(ValueError, match=msg2):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(
["c", "c"], categories=["a", "b", "c"]
)
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg1):
df.loc["j":"k", "cats"] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", df.columns[0]] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", df.columns[0]] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j", df.columns[0]] = "c"
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j", :] = ["c", 2]
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", df.columns[0]] = Categorical(["b", "b"], categories=["a", "b"])
| tm.assert_frame_equal(df, exp_parts_cats_col) | pandas._testing.assert_frame_equal |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
import datetime as dt
import re
import cupy as cp
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from pandas.util.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
import cudf
from cudf.core import DataFrame, Series
from cudf.core.index import DatetimeIndex
from cudf.tests.utils import NUMERIC_TYPES, assert_eq
def data1():
return pd.date_range("20010101", "20020215", freq="400h", name="times")
def data2():
return pd.date_range("20010101", "20020215", freq="400h", name="times")
def timeseries_us_data():
return pd.date_range(
"2019-07-16 00:00:00",
"2019-07-16 00:00:01",
freq="5555us",
name="times",
)
def timestamp_ms_data():
return pd.Series(
[
"2019-07-16 00:00:00.333",
"2019-07-16 00:00:00.666",
"2019-07-16 00:00:00.888",
]
)
def timestamp_us_data():
return pd.Series(
[
"2019-07-16 00:00:00.333333",
"2019-07-16 00:00:00.666666",
"2019-07-16 00:00:00.888888",
]
)
def timestamp_ns_data():
return pd.Series(
[
"2019-07-16 00:00:00.333333333",
"2019-07-16 00:00:00.666666666",
"2019-07-16 00:00:00.888888888",
]
)
def numerical_data():
return np.arange(1, 10)
fields = ["year", "month", "day", "hour", "minute", "second", "weekday"]
@pytest.mark.parametrize("data", [data1(), data2()])
def test_series(data):
pd_data = pd.Series(data.copy())
gdf_data = Series(pd_data)
assert_eq(pd_data, gdf_data)
@pytest.mark.parametrize(
"lhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"rhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_series_binops_pandas(lhs_dtype, rhs_dtype):
pd_data_1 = pd.Series(
pd.date_range("20010101", "20020215", freq="400h", name="times")
)
pd_data_2 = pd.Series(
pd.date_range("20010101", "20020215", freq="401h", name="times")
)
gdf_data_1 = Series(pd_data_1).astype(lhs_dtype)
gdf_data_2 = Series(pd_data_2).astype(rhs_dtype)
assert_eq(pd_data_1, gdf_data_1.astype("datetime64[ns]"))
assert_eq(pd_data_2, gdf_data_2.astype("datetime64[ns]"))
assert_eq(pd_data_1 < pd_data_2, gdf_data_1 < gdf_data_2)
assert_eq(pd_data_1 > pd_data_2, gdf_data_1 > gdf_data_2)
assert_eq(pd_data_1 == pd_data_2, gdf_data_1 == gdf_data_2)
assert_eq(pd_data_1 <= pd_data_2, gdf_data_1 <= gdf_data_2)
assert_eq(pd_data_1 >= pd_data_2, gdf_data_1 >= gdf_data_2)
@pytest.mark.parametrize(
"lhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"rhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_series_binops_numpy(lhs_dtype, rhs_dtype):
pd_data_1 = pd.Series(
pd.date_range("20010101", "20020215", freq="400h", name="times")
)
pd_data_2 = pd.Series(
pd.date_range("20010101", "20020215", freq="401h", name="times")
)
gdf_data_1 = Series(pd_data_1).astype(lhs_dtype)
gdf_data_2 = Series(pd_data_2).astype(rhs_dtype)
np_data_1 = np.array(pd_data_1).astype(lhs_dtype)
np_data_2 = np.array(pd_data_2).astype(rhs_dtype)
np.testing.assert_equal(np_data_1, gdf_data_1.to_array())
np.testing.assert_equal(np_data_2, gdf_data_2.to_array())
np.testing.assert_equal(
np.less(np_data_1, np_data_2), (gdf_data_1 < gdf_data_2).to_array()
)
np.testing.assert_equal(
np.greater(np_data_1, np_data_2), (gdf_data_1 > gdf_data_2).to_array()
)
np.testing.assert_equal(
np.equal(np_data_1, np_data_2), (gdf_data_1 == gdf_data_2).to_array()
)
np.testing.assert_equal(
np.less_equal(np_data_1, np_data_2),
(gdf_data_1 <= gdf_data_2).to_array(),
)
np.testing.assert_equal(
np.greater_equal(np_data_1, np_data_2),
(gdf_data_1 >= gdf_data_2).to_array(),
)
@pytest.mark.parametrize("data", [data1(), data2()])
def test_dt_ops(data):
pd_data = pd.Series(data.copy())
gdf_data = Series(data.copy())
assert_eq(pd_data == pd_data, gdf_data == gdf_data)
assert_eq(pd_data < pd_data, gdf_data < gdf_data)
assert_eq(pd_data > pd_data, gdf_data > gdf_data)
# libgdf doesn't respect timezones
@pytest.mark.parametrize("data", [data1()])
@pytest.mark.parametrize("field", fields)
def test_dt_series(data, field):
pd_data = pd.Series(data.copy())
gdf_data = Series(pd_data)
base = getattr(pd_data.dt, field)
test = getattr(gdf_data.dt, field).to_pandas().astype("int64")
assert_series_equal(base, test)
@pytest.mark.parametrize("data", [data1()])
@pytest.mark.parametrize("field", fields)
def test_dt_index(data, field):
pd_data = data.copy()
gdf_data = DatetimeIndex(pd_data)
assert_index_equal(
getattr(gdf_data, field).to_pandas(), getattr(pd_data, field)
)
def test_setitem_datetime():
df = DataFrame()
df["date"] = pd.date_range("20010101", "20010105").values
assert np.issubdtype(df.date.dtype, np.datetime64)
def test_sort_datetime():
df = pd.DataFrame()
df["date"] = np.array(
[
np.datetime64("2016-11-20"),
np.datetime64("2020-11-20"),
np.datetime64("2019-11-20"),
np.datetime64("1918-11-20"),
np.datetime64("2118-11-20"),
]
)
df["vals"] = np.random.sample(len(df["date"]))
gdf = cudf.from_pandas(df)
s_df = df.sort_values(by="date")
s_gdf = gdf.sort_values(by="date")
assert_eq(s_df, s_gdf)
def test_issue_165():
df_pandas = pd.DataFrame()
start_date = dt.datetime.strptime("2000-10-21", "%Y-%m-%d")
data = [(start_date + dt.timedelta(days=x)) for x in range(6)]
df_pandas["dates"] = data
df_pandas["num"] = [1, 2, 3, 4, 5, 6]
df_cudf = DataFrame.from_pandas(df_pandas)
base = df_pandas.query("dates==@start_date")
test = df_cudf.query("dates==@start_date")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date
base_mask = df_pandas.dates == start_date
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
start_date_ts = pd.Timestamp(start_date)
test = df_cudf.query("dates==@start_date_ts")
base = df_pandas.query("dates==@start_date_ts")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date_ts
base_mask = df_pandas.dates == start_date_ts
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
start_date_np = np.datetime64(start_date_ts, "ns")
test = df_cudf.query("dates==@start_date_np")
base = df_pandas.query("dates==@start_date_np")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date_np
base_mask = df_pandas.dates == start_date_np
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
@pytest.mark.parametrize("data", [data1(), data2()])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
def test_typecast_from_datetime(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data)
gdf_data = Series(pd_data)
np_casted = np_data.astype(dtype)
gdf_casted = gdf_data.astype(dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [data1(), data2()])
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_from_datetime_to_int64_to_datetime(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data)
gdf_data = Series(pd_data)
np_casted = np_data.astype(np.int64).astype(dtype)
gdf_casted = gdf_data.astype(np.int64).astype(dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [timeseries_us_data()])
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_different_datetime_resolutions(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data).astype(dtype)
gdf_series = Series(pd_data).astype(dtype)
np.testing.assert_equal(np_data, gdf_series.to_array())
@pytest.mark.parametrize(
"data", [timestamp_ms_data(), timestamp_us_data(), timestamp_ns_data()]
)
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_string_timstamp_typecast_to_different_datetime_resolutions(
data, dtype
):
pd_sr = data
gdf_sr = cudf.Series.from_pandas(pd_sr)
expect = pd_sr.values.astype(dtype)
got = gdf_sr.astype(dtype).values_host
np.testing.assert_equal(expect, got)
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("from_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_data = Series(np_data)
np_casted = np_data.astype(to_dtype)
gdf_casted = gdf_data.astype(to_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("from_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_from_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_data = Series(np_data)
np_casted = np_data.astype(to_dtype).astype(from_dtype)
gdf_casted = gdf_data.astype(to_dtype).astype(from_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize(
"from_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_from_datetime_to_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_col = Series(np_data)._column
np_casted = np_data.astype(to_dtype)
gdf_casted = gdf_col.astype(to_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("nulls", ["some", "all"])
def test_to_from_pandas_nulls(data, nulls):
pd_data = pd.Series(data.copy().astype("datetime64[ns]"))
if nulls == "some":
# Fill half the values with NaT
pd_data[list(range(0, len(pd_data), 2))] = np.datetime64("nat", "ns")
elif nulls == "all":
# Fill all the values with NaT
pd_data[:] = np.datetime64("nat", "ns")
gdf_data = Series.from_pandas(pd_data)
expect = pd_data
got = gdf_data.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_to_arrow(dtype):
timestamp = (
cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={}
)
.reset_index()["timestamp"]
.reset_index(drop=True)
)
gdf = DataFrame({"timestamp": timestamp.astype(dtype)})
assert_eq(gdf, DataFrame.from_arrow(gdf.to_arrow(preserve_index=False)))
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(pd.date_range("2010-01-01", "2010-02-01")),
pd.Series([None, None], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize(
"nulls", ["none", pytest.param("some", marks=pytest.mark.xfail)]
)
def test_datetime_unique(data, nulls):
psr = pd.Series(data)
print(data)
print(nulls)
if len(data) > 0:
if nulls == "some":
p = np.random.randint(0, len(data), 2)
psr[p] = None
gsr = cudf.from_pandas(psr)
expected = psr.unique()
got = gsr.unique()
assert_eq(pd.Series(expected), got.to_pandas())
@pytest.mark.parametrize(
"data",
[
[],
pd.Series( | pd.date_range("2010-01-01", "2010-02-01") | pandas.date_range |
import h5py
import typing
import datetime
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import OneHotEncoder
from numpy.core._multiarray_umath import ndarray
from model_logging import get_logger
import glob
class DataLoader():
def __init__(
self,
dataframe: pd.DataFrame,
target_datetimes: typing.List[datetime.datetime],
stations: typing.Dict[typing.AnyStr, typing.Tuple[float, float, float]],
target_time_offsets: typing.List[datetime.timedelta],
config: typing.Dict[typing.AnyStr, typing.Any],
data_folder: typing.AnyStr
):
"""
Copy-paste from evaluator.py:
Args:
dataframe: a pandas dataframe that provides the netCDF file path (or HDF5 file path and offset) for all
relevant timestamp values over the test period.
target_datetimes: a list of timestamps that your data loader should use to provide imagery for your model.
The ordering of this list is important, as each element corresponds to a sequence of GHI values
to predict. By definition, the GHI values must be provided for the offsets given by
``target_time_offsets`` which are added to each timestamp (T=0) in this datetimes list.
stations: a map of station names of interest paired with their coordinates (latitude, longitude, elevation)
target_time_offsets: the list of time-deltas to predict GHIs for (by definition: [T=0, T+1h, T+3h, T+6h]).
config: configuration dictionary holding extra parameters
"""
self.dataframe = dataframe
self.target_datetimes = target_datetimes
self.stations = list(stations.keys())
self.config = config
self.target_time_offsets = target_time_offsets
self.data_folder = data_folder
self.initialize()
def initialize(self):
self.logger = get_logger()
self.logger.debug("Initialize start")
self.test_station = self.stations[0]
self.output_seq_len = len(self.target_time_offsets)
self.data_files_list = glob.glob(self.data_folder + "/*.hdf5")
# sort required for evaluator script
self.data_files_list.sort()
stations = np.array([b"BND", b"TBL", b"DRA", b"FPK", b"GWN", b"PSU", b"SXF"])
self.encoder = OneHotEncoder(sparse=False)
stations = stations.reshape(len(stations), 1)
self.encoder.fit(stations)
self.data_loader = tf.data.Dataset.from_generator(
self.data_generator_fn,
output_types=(tf.float32, tf.float32, tf.float32, tf.bool, tf.float32, tf.float32, tf.float32)
).prefetch(tf.data.experimental.AUTOTUNE)
def to_cyclical_secondofday(self, date):
SECONDS_PER_DAY = 24 * 60 * 60
second_of_day = (date.hour * 60 + date.minute) * 60 + date.second
day_cycle_rad = second_of_day / SECONDS_PER_DAY * 2.0 * np.pi
day_cycle_x = np.sin(day_cycle_rad)
day_cycle_y = np.cos(day_cycle_rad)
return pd.DataFrame(day_cycle_x), pd.DataFrame(day_cycle_y)
def to_cyclical_dayofyear(self, date):
DAYS_PER_YEAR = 365
year_cycle_rad = date.dayofyear / DAYS_PER_YEAR
year_cycle_x = np.sin(year_cycle_rad)
year_cycle_y = np.cos(year_cycle_rad)
return pd.DataFrame(year_cycle_x), | pd.DataFrame(year_cycle_y) | pandas.DataFrame |
# Copyright 2020 trueto
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import json
import chardet
import pandas as pd
from pathlib import Path
str_type = {
"疾病和诊断": "Dis",
"解剖部位": "Body",
"实验室检验": "Test",
"影像检查": "CT",
"药物": "Drug",
"手术": "Sur"
}
def C_trans_to_E(string):
E_pun = u',.!?[]()<>"\'"\':;'
C_pun = u',。!?【】()《》“‘”’:;'
table= {ord(f): ord(t) for f, t in zip(C_pun, E_pun)}
string = string.translate(table)
return re.sub("[ |\t|\r|\n|\\\]", "_", string)
def strQ2B(ustr):
"全角转半角"
rstr = ""
for uchar in ustr:
inside_code = ord(uchar)
# 全角空格直接转换
if inside_code == 12288:
inside_code = 32
# 全角字符(除空格)根据关系转化
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstr += chr(inside_code)
return rstr
def get_X_y(in_file, out_file, max_len=500):
X = []
y = []
entity_data = []
with open(in_file, 'r', encoding='utf8') as f:
for line in f:
tempObj = json.loads(line)
originalText = tempObj['originalText']
text = C_trans_to_E(strQ2B(originalText))
entities = tempObj['entities']
print("Processing text:{}".format(text))
if len(text) <= max_len:
X_ = list(text)
y_ = ["O"] * len(X_)
for entity in entities:
start_pos = entity["start_pos"]
end_pos = entity["end_pos"]
label_type = entity["label_type"]
if "clinical" in in_file:
tag = str_type[label_type]
else:
tag = label_type
# for i in range(start_pos, end_pos):
# y_[i] = tag
entity_data.append([text[start_pos : end_pos], tag])
y_[start_pos] = 'B-'+tag
for i in range(start_pos+1, end_pos):
y_[i] = 'I-' + tag
assert len(X_) == len(y_)
X.append(X_)
y.append(y_)
else:
# 分句
dot_index_list = []
text_ = text
flag = 0
while(len(text_) > max_len):
text_ = text_[:max_len]
index_list = []
for match in re.finditer(',', text_):
index = match.span()[0]
index_list.append(index)
# last_dot = index_list.pop()
if len(index_list) > 1:
last_dot = index_list.pop()
else:
index_list_ = []
for match in re.finditer('.', text_):
index = match.span()[0]
index_list_.append(index)
if len(index_list_) > 1:
last_dot = index_list_.pop()
else:
last_dot = len(text_)
dot_index_list.append(last_dot + flag)
text_ = text[last_dot+flag:]
flag += last_dot
print(dot_index_list)
flag = 0
dot_index_list.append(len(text))
for i, dot_index in enumerate(dot_index_list):
short_text = text[flag: dot_index+1]
X_ = list(short_text)
print("Short text:{}".format(short_text))
y_ = ["O"] * len(X_)
for entity in entities:
start_pos = entity["start_pos"]
end_pos = entity["end_pos"]
label_type = entity["label_type"]
if "clinical" in in_file:
tag = str_type[label_type]
else:
tag = label_type
#for j in range(start_pos, end_pos):
# j = j - flag
# if j >= 0 and j < len(y_):
# y_[j] = tag
en_list = []
k = start_pos - flag
if k >= 0 and k < len(y_):
y_[k] = 'B-' + tag
en_list.append(X_[k])
for j in range(start_pos+1, end_pos):
j = j - flag
if j >= 0 and j < len(y_):
y_[j] = 'I-' + tag
en_list.append(X_[j])
if len(en_list) > 0:
entity_data.append(["".join(en_list), tag])
# if start_pos - flag > 0:
# print(short_text[start_pos - flag : end_pos - flag])
assert len(X_) == len(y_)
X.append(X_)
y.append(y_)
flag = dot_index + 1
assert len(X) == len(y)
data_obj = (X, y, entity_data)
pd.to_pickle(data_obj, out_file)
def get_X(in_file, out_file, max_len=500):
X = []
cut_his = {}
originalTexts = []
texts = []
with open(in_file, 'rb') as f:
encoding = chardet.detect(f.read())['encoding']
with open(in_file, 'r', encoding="utf8") as f:
for text_id, line in enumerate(f):
tempObj = json.loads(line, encoding=encoding)
originalText = tempObj['originalText']
originalTexts.append(originalText)
text = C_trans_to_E(strQ2B(originalText))
texts.append(text)
print("Processing text:{}".format(text))
if len(text) <= max_len:
X_ = list(text)
X.append(X_)
cut_his[text_id] = len(X) - 1
else:
# 分句
dot_index_list = []
text_ = text
flag = 0
while(len(text_) > max_len):
text_ = text_[:max_len]
index_list = []
for match in re.finditer(',', text_):
index = match.span()[0]
index_list.append(index)
# last_dot = index_list.pop()
if len(index_list) > 1:
last_dot = index_list.pop()
else:
index_list_ = []
for match in re.finditer('.', text_):
index = match.span()[0]
index_list_.append(index)
if len(index_list_) > 1:
last_dot = index_list_.pop()
else:
last_dot = len(text_)
dot_index_list.append(last_dot + flag)
text_ = text[last_dot+flag:]
flag += last_dot
print(dot_index_list)
flag = 0
dot_index_list.append(len(text))
text_id_list = []
for i, dot_index in enumerate(dot_index_list):
short_text = text[flag: dot_index+1]
X_ = list(short_text)
X.append(X_)
text_id_list.append(len(X)-1)
flag = dot_index + 1
cut_his[text_id] = text_id_list
# assert len(X) == len(ids)
data_obj = (X, cut_his, originalTexts, texts)
pd.to_pickle(data_obj, out_file)
def get_vocab_csv(input_file, name):
_, _, entity_data = pd.read_pickle(input_file)
tmp_df = | pd.DataFrame(data=entity_data, columns=['entity', 'label_type']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# %load ./imports.py
# %load /Users/bartev/dev/github-bv/sporty/notebooks/imports.py
## Where am I
get_ipython().system('echo $VIRTUAL_ENV')
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
# magics
get_ipython().run_line_magic('load_ext', 'blackcellmagic')
# start cell with `%%black` to format using `black`
get_ipython().run_line_magic('load_ext', 'autoreload')
# start cell with `%autoreload` to reload module
# https://ipython.org/ipython-doc/stable/config/extensions/autoreload.html
# reload all modules when running
get_ipython().run_line_magic('autoreload', '2')
# In[29]:
# imports
import pandas as pd
import numpy as np
import statsmodels.formula.api as smf
import seaborn as sns
from importlib import reload
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# https://plotnine.readthedocs.io/en/stable/
import plotnine as p9
from plotnine import ggplot, aes, facet_wrap
from src.utils import lower_case_col_names, drop_suffix
import src.data.load_data as ld
from src.data.load_data import get_nba_game_team_points, load_nba, load_nba_games_dataset
# In[100]:
nba_games = load_nba_games_dataset()
nba_games.head()
# # Explore the dataset
# ## Qualitative vs Quantitative Data
# * object: qualitative variable
# * int64: quantitative and discrete (integer) (-2^63) - (2^63 - 1)
# * float64: quantitative and continuous - real numbers (64 bit)
# In[81]:
nba_games.dtypes
# ## Convert a categorical variable to a dummy variable
# `pd.get_dummies` creates new columns, and drops the original columns.
# In[84]:
nba_games.head()
# In[88]:
dummy = pd.get_dummies(nba_games, columns=['wl'])
dummy.columns
# ### 3 ways to merge `wl_W`
# In[91]:
pd.concat([nba_games, dummy['wl_W']], axis=1).rename(columns={'wl_W': 'win'}).head()
# In[93]:
nba_games.pipe(lambda x: pd.concat([x, dummy['wl_W']], axis=1))
# In[101]:
nba_games_w = nba_games.merge(dummy).drop(columns='wl_L').rename(columns={'wl_W':'win'})
nba_games_w
# In[102]:
nba_games['game_date_est'].dtype
# Currently, the dates are stored as objects (so treated equally(???) w/o any ordering)
# Use `pd.to_datetime()` to convert to a date variable
# In[104]:
import datetime
nba_games_d = (nba_games_w
.assign(game_date=lambda x: | pd.to_datetime(x['game_date_est']) | pandas.to_datetime |
'''
Run using python from terminal.
Doesn't read from scripts directory (L13) when run from poetry shell.
'''
import pandas as pd
import pandas.testing as pd_testing
import typing as tp
import os
import unittest
from unittest import mock
import datetime
from scripts import influx_metrics_univ3 as imetrics
class TestInfluxMetrics(unittest.TestCase):
def get_price_cumulatives_df(self, path) -> pd.DataFrame:
'''
Helper to return dataframe used to mock out `query_data_frame` in the
`get_price_cumulatives` function in `scripts/influx_metrics_univ3.py`
'''
base = os.path.dirname(os.path.abspath(__file__))
base = os.path.abspath(os.path.join(base, os.pardir))
base = os.path.join(base, 'helpers')
base = os.path.join(base, path)
base = os.path.join(base, 'get-price-cumulatives.csv')
df = pd.read_csv(base, sep=',')
df._start = pd.to_datetime(df._start)
df._stop = pd.to_datetime(df._stop)
df._time = | pd.to_datetime(df._time) | pandas.to_datetime |
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
def compute_running_avg(trace, period=100):
"""Computes the running average of a single data trace.
Attributes:
trace: The Python list data trace to compute averages for.
period: The size of the window of previous data points that are
used in the moving average computation.
Returns: Python list containing the running average at each point.
"""
n = len(trace)
if period > n:
period = n
avgs = | pd.Series(trace) | pandas.Series |
import pandas as pd
from ismore import brainamp_channel_lists
from ismore.common_state_lists import *
from utils.constants import *
#### BrainAmp-related settings ####
VERIFY_BRAINAMP_DATA_ARRIVAL = True
# print warning if EMG data doesn't arrive or stops arriving for this long
VERIFY_BRAINAMP_DATA_ARRIVAL_TIME = 1 # secs
# to verify if the names of the channel names being streamed from brainvision and the selected ones match
VERIFY_BRAINAMP_CHANNEL_LIST = False
# the channels that the Python code will receive, make available to the task,
# and save into the HDF file
# (received data on other BrainAmp channels will simply be discarded)
# !! THIS IS A DEFAULT VALUE, BUT IF THERE IS A TRAIT IN THE ISMORETASKS FUNCTION, THE CHANNEL LIST WILL BE SELECTED FROM THE TASK SERVER INTERFACE
# AND THIS VARIABLE WILL BE MODIFIED ACCORDING TO THAT SELECTION
BRAINAMP_CHANNELS = brainamp_channel_lists.eeg32_raw_filt
#BRAINAMP_CHANNELS = brainamp_channel_lists.emg14_raw_filt
#BRAINAMP_CHANNELS = ismoretasks.BRAINAMP_CHANNELS #nerea
###################################
# send SetSpeed commands to server addresses
# receive feedback data on client addresses
ARMASSIST_UDP_SERVER_ADDR = ('127.0.0.1', 5001)
ARMASSIST_UDP_CLIENT_ADDR = ('127.0.0.1', 5002)
REHAND_UDP_SERVER_ADDR = ('127.0.0.1', 5000)
REHAND_UDP_CLIENT_ADDR = ('127.0.0.1', 5003)
VERIFY_PLANT_DATA_ARRIVAL = True
# print warning if plant data doesn't arrive or stops arriving for this long
VERIFY_PLANT_DATA_ARRIVAL_TIME = 1 # secs
DONNING_SAME_AS_STARTING_POSITION = True
WATCHDOG_ENABLED = False
WATCHDOG_TIMEOUT = 1000 # ms
MAT_SIZE = [85, 95] # cm
####################################
##### Starting position of exo #####
####################################
starting_pos = | pd.Series(0.0, ismore_pos_states) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# ## Problem 2 - Plotting temperatures
#
# In this problem we will plot monthly mean temperatures from the Helsinki-Vantaa airpot for the past 30 years.
#
# ## Input data
#
# File `data/helsinki-vantaa.csv` monthly average temperatures from Helsinki Vantaa airport. Column descriptions:
#
# ### Part 1
#
# Load the Helsinki temperature data (`data/helsinki-vantaa.csv`)
#
# - Read the data into variable called `data` using pandas
# - Parse dates from the column `'DATE'` and set the dates as index in the dataframe
# YOUR CODE HERE 1 to read the data into data and parse dates
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#read a data and parese dates from the column `'DATE'` and set the dates as index in the dataframe
data = pd.read_csv("data/helsinki-vantaa.csv",parse_dates=['DATE'],index_col = 'DATE' )
# This test print should print first five rows
print(data.head())
# Check the number of rows in the data frame
print(len(data))
# ### Part 2
#
# Select data for a 30 year period (January 1988 - December 2018)
#
# - Store the selection in a new variable `selection`
# YOUR CODE HERE 2
# change index of data to type of datetime and make colum "date_time"
data["date_time"] = pd.to_datetime(data.index)
# Select data for a 30 year period (January 1988 - December 2018)
selection = data.loc[(data.index >= '1988-01-01' )& (data.index <='2018-12-31' )]
# Check that the data was read in correctly:
selection.head()
# Check how many rows of data you selected:
print("Number of rows:", len(selection))
# ### Part 3
#
# #### Part 3.1
#
# Create a line plot that displays the temperatures (`TEMP_C`) for yeach month in the 30 year time period:
#
# #### Part 3.2
#
# Save your figure as PNG file called `temp_line_plot.png`.
#
# YOUR CODE HERE 3
# Create an empty DataFrame for the aggregated values
monthly_data = | pd.DataFrame() | pandas.DataFrame |
import cv2
from datetime import datetime
import pandas
#First frame
first_frame = None
status_list = [None, None]
times=[]
df= | pandas.DataFrame(columns= ["Start","End"]) | pandas.DataFrame |
import pdb
import glob
import copy
import os
import pickle
import joblib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats
import sklearn.feature_selection
from random import choices
class FeatureColumn:
def __init__(self, category, field, preprocessors, args=None, cost=None):
self.category = category
self.field = field
self.preprocessors = preprocessors
self.args = args
self.data = None
self.cost = cost
class NHANES:
def __init__(self, db_path=None, columns=None):
self.db_path = db_path
self.columns = columns # Depricated
self.dataset = None # Depricated
self.column_data = None
self.column_info = None
self.df_features = None
self.df_targets = None
self.costs = None
def process(self):
df = None
cache = {}
# collect relevant data
df = []
for fe_col in self.columns:
sheet = fe_col.category
field = fe_col.field
data_files = glob.glob(self.db_path+sheet+'/*.XPT')
df_col = []
for dfile in data_files:
print(80*' ', end='\r')
print('\rProcessing: ' + dfile.split('/')[-1], end='')
# read the file
if dfile in cache:
df_tmp = cache[dfile]
else:
df_tmp = pd.read_sas(dfile)
cache[dfile] = df_tmp
# skip of there is no SEQN
if 'SEQN' not in df_tmp.columns:
continue
#df_tmp.set_index('SEQN')
# skip if there is nothing interseting there
sel_cols = set(df_tmp.columns).intersection([field])
if not sel_cols:
continue
else:
df_tmp = df_tmp[['SEQN'] + list(sel_cols)]
df_tmp.set_index('SEQN', inplace=True)
df_col.append(df_tmp)
try:
df_col = pd.concat(df_col)
except:
#raise Error('Failed to process' + field)
raise Exception('Failed to process' + field)
df.append(df_col)
df = pd.concat(df, axis=1)
#df = pd.merge(df, df_sel, how='outer')
# do preprocessing steps
df_proc = []#[df['SEQN']]
for fe_col in self.columns:
field = fe_col.field
fe_col.data = df[field].copy()
# do preprocessing
if fe_col.preprocessors is not None:
prepr_col = df[field]
for x in range(len(fe_col.preprocessors)):
prepr_col = fe_col.preprocessors[x](prepr_col, fe_col.args[x])
else:
prepr_col = df[field]
# handle the 1 to many
if (len(prepr_col.shape) > 1):
fe_col.cost = [fe_col.cost] * prepr_col.shape[1]
else:
fe_col.cost = [fe_col.cost]
df_proc.append(prepr_col)
self.dataset = | pd.concat(df_proc, axis=1) | pandas.concat |
import os
import sys
import inspect
from copy import deepcopy
import numpy as np
import pandas as pd
from ucimlr.helpers import (download_file, download_unzip, one_hot_encode_df_, xy_split,
normalize_df_, split_normalize_sequence, split_df, get_split, split_df_on_column)
from ucimlr.dataset import Dataset
from ucimlr.constants import TRAIN
from ucimlr.constants import REGRESSION
def all_datasets():
"""
Returns a list of all RegressionDataset classes.
"""
return [cls for _, cls in inspect.getmembers(sys.modules[__name__])
if inspect.isclass(cls)
and issubclass(cls, RegressionDataset)
and cls != RegressionDataset]
class RegressionDataset(Dataset):
type_ = REGRESSION # Is this necessary?
@property
def num_targets(self):
return self.y.shape[1]
class Abalone(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Abalone).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path, header=None)
y_columns = df.columns[-1:]
one_hot_encode_df_(df)
df_test, df_train, df_valid = split_df(df, [0.2, 0.8 - 0.8 * validation_size, 0.8 * validation_size])
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class AirFoil(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'airfoil_self_noise.dat'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\t', names =["Frequency(Hz)", "Angle of attacks(Deg)", "Chord length(m)", "Free-stream velocity(m/s)", "Suction side displacement thickness(m)", " Scaled sound pressure level(Db)"])
y_columns = ['Scaled sound pressure level(Db)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class AirQuality(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'AirQualityUCI.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00360/AirQualityUCI.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', parse_dates=[0, 1])
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
df.Date = (df.Date - df.Date.min()).astype('timedelta64[D]') # Days as int
df.Time = df.Time.apply(lambda x: int(x.split('.')[0])) # Hours as int
df['C6H6(GT)'] = df['C6H6(GT)'].apply(lambda x: float(x.replace(',', '.'))) # Target as float
# Some floats are given with ',' instead of '.'
df = df.applymap(lambda x: float(x.replace(',', '.')) if type(x) is str else x) # Target as float
df = df[df['C6H6(GT)'] != -200] # Drop all rows with missing target values
df.loc[df['CO(GT)'] == -200, 'CO(GT)'] = -10 # -200 means missing value, shifting this to be closer to
# the other values for this column
y_columns = ['C6H6(GT)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Appliances_energy_prediction(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'energydata_complete.csv'
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00374/energydata_complete.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, parse_dates=[0, 1])
df.date = (df.date - df.date.min()).astype('timedelta64[D]')
y_columns = ['Appliances']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
self.problem_type = REGRESSION
class AutoMPG(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'auto-mpg.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\s+', names =["mpg", "cylinders", "displacements", "horsepower", "weight", "acceleration", "model year", "origin", "car name"])
y_columns = ['mpg']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
sel.problem_type=REGRESSION
class Automobile(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'imports-85.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names = ["symboling", "normalized-losses", "make", "fuel-type", " aspiration", "num-of-doors", "body-style", "drive-wheels", "engine-location", "wheel-base", " length", "width", " height", "curb-weight", "engine-type", "num-of-cylinders", "engine-size", " fuel-system", " bore", "stroke", " compression-ratio", "horsepower", "peak-rpm", "city-mpg", "highway-mpg", "price"])
y_columns = ['']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class BeijingAirQuality(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+Multi-Site+Air-Quality+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00501/PRSA2017_Data_20130301-20170228.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if 'PRSA_Data' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class BeijingPM(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'PRSA_data_2010.1.1-2014.12.31.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00381/PRSA_data_2010.1.1-2014.12.31.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
y_columns=['pm2.5']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type)
self.problem_type = REGRESSION
class BiasCorrection(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bias+correction+of+numerical+prediction+model+temperature+forecast).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Bias_correction_ucl.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00514/Bias_correction_ucl.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col = 'Date', parse_dates= True)
class BikeSharing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class CarbonNanotubes(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Carbon+Nanotubes).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'carbon_nanotubes.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00448/carbon_nanotubes.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ChallengerShuttleORing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Challenger+USA+Space+Shuttle+O-Ring).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'o-ring-erosion-only.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/space-shuttle/o-ring-erosion-only.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class BlogFeedback(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/BlogFeedback).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
file_name = 'blogData_train.csv'
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00304/BlogFeedback.zip'
download_unzip(url, dataset_path)
# Iterate all test csv and concatenate to one DataFrame
test_dfs = []
for fn in os.listdir(dataset_path):
if 'blogData_test' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
test_dfs.append(pd.read_csv(file_path, header=None))
df_test = pd.concat(test_dfs)
file_path = os.path.join(dataset_path, file_name)
df_train_valid = pd.read_csv(file_path, header=None)
y_columns = [280]
df_train_valid[y_columns[0]] = np.log(df_train_valid[y_columns[0]] + 0.01)
df_test[y_columns[0]] = np.log(df_test[y_columns[0]] + 0.01)
page_columns = list(range(50))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class CommunitiesCrime(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'communities.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,header=None)
class ConcreteSlumpTest(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Slump+Test).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'slump_test.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class PropulsionPlants (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI CBM Dataset.zip'
download_unzip(url, dataset_path)
filename = 'data.txt'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col='dteday', parse_dates=True)
class ConcreteCompressiveStrength (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Concrete_Data.xls'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
class ComputerHardware (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Computer+Hardware).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'machine.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/cpu-performance/machine.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names=["vendor name", "Model Name", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "PRP", "ERP"])
class CommunitiesCrimeUnnormalized (RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'CommViolPredUnnormalizedData.txt'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00211/CommViolPredUnnormalizedData.txt'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = | pd.read_csv(file_path, keep_default_na=False, header=None) | pandas.read_csv |
#!/usr/bin/env python3
import os
import sys
import pandas as pd
import numpy as np
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
from multiprocessing import Pool
from os.path import isfile, join
import shutil
import warnings
from pathlib import Path
import time
warnings.simplefilter(action='ignore', category=FutureWarning)
"""
Plot Rating Curves and Compare to USGS Gages
Parameters
----------
fim_dir : str
Directory containing FIM output folders.
output_dir : str
Directory containing rating curve plots and tables.
usgs_gages_filename : str
File name of USGS rating curves.
nwm_flow_dir : str
Directory containing NWM recurrence flows files.
number_of_jobs : str
Number of jobs.
stat_groups : str
string of columns to group eval metrics.
"""
def check_file_age(file):
'''
Checks if file exists, determines the file age, and recommends
updating if older than 1 month.
Returns
-------
None.
'''
file = Path(file)
if file.is_file():
modification_time = file.stat().st_mtime
current_time = time.time()
file_age_days = (current_time - modification_time)/86400
if file_age_days > 30:
check = f'{file.name} is {int(file_age_days)} days old, consider updating.\nUpdate with rating_curve_get_usgs_curves.py'
else:
check = f'{file.name} is {int(file_age_days)} days old.'
return check
# recurr_intervals = ['recurr_1_5_cms.csv','recurr_5_0_cms.csv','recurr_10_0_cms.csv']
def generate_rating_curve_metrics(args):
elev_table_filename = args[0]
hydrotable_filename = args[1]
usgs_gages_filename = args[2]
usgs_recurr_stats_filename = args[3]
nwm_recurr_data_filename = args[4]
rc_comparison_plot_filename = args[5]
nwm_flow_dir = args[6]
catfim_flows_filename = args[7]
huc = args[8]
elev_table = pd.read_csv(elev_table_filename,dtype={'location_id': str})
hydrotable = pd.read_csv(hydrotable_filename,dtype={'HUC': str,'feature_id': str})
usgs_gages = pd.read_csv(usgs_gages_filename,dtype={'location_id': str})
# Join rating curves with elevation data
hydrotable = hydrotable.merge(elev_table, on="HydroID")
relevant_gages = list(hydrotable.location_id.unique())
usgs_gages = usgs_gages[usgs_gages['location_id'].isin(relevant_gages)]
usgs_gages = usgs_gages.reset_index(drop=True)
if len(usgs_gages) > 0:
# Adjust rating curve to elevation
hydrotable['elevation_ft'] = (hydrotable.stage + hydrotable.dem_adj_elevation) * 3.28084 # convert from m to ft
# hydrotable['raw_elevation_ft'] = (hydrotable.stage + hydrotable.dem_elevation) * 3.28084 # convert from m to ft
hydrotable['discharge_cfs'] = hydrotable.discharge_cms * 35.3147
usgs_gages = usgs_gages.rename(columns={"flow": "discharge_cfs", "elevation_navd88": "elevation_ft"})
hydrotable['source'] = "FIM"
usgs_gages['source'] = "USGS"
limited_hydrotable = hydrotable.filter(items=['location_id','elevation_ft','discharge_cfs','source'])
select_usgs_gages = usgs_gages.filter(items=['location_id', 'elevation_ft', 'discharge_cfs','source'])
rating_curves = limited_hydrotable.append(select_usgs_gages)
# Add stream order
stream_orders = hydrotable.filter(items=['location_id','str_order']).drop_duplicates()
rating_curves = rating_curves.merge(stream_orders, on='location_id')
rating_curves['str_order'] = rating_curves['str_order'].astype('int')
# plot rating curves
generate_facet_plot(rating_curves, rc_comparison_plot_filename)
# NWM recurr intervals
recurr_1_5_yr_filename = join(nwm_flow_dir,'recurr_1_5_cms.csv')
recurr_5_yr_filename = join(nwm_flow_dir,'recurr_5_0_cms.csv')
recurr_10_yr_filename = join(nwm_flow_dir,'recurr_10_0_cms.csv')
# Update column names
recurr_1_5_yr = pd.read_csv(recurr_1_5_yr_filename,dtype={'feature_id': str})
recurr_1_5_yr = recurr_1_5_yr.rename(columns={"discharge": "1.5"})
recurr_5_yr = | pd.read_csv(recurr_5_yr_filename,dtype={'feature_id': str}) | pandas.read_csv |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': | pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']) | pandas.to_datetime |
from typing import List
import plotly.graph_objects as go
from fhir.resources.resource import Resource
import pandas as pd
def plot_resource_field(resources: List[Resource], field: str, title: str = None, plot_type: str = "bar",
show: bool = True) -> go.Figure:
"""
Plot a field of a resource.
:param resources: Resources for which to plot the field
:param field: Field to plot
:param title: Title of the plot
:param plot_type: Type of plot to use. Options are: bar, histogram, pie
:param show: Show the plot
:return: Plotly figure
"""
if title is None:
title = f"{field} for {resources[0].resource_type}"
figure = go.Figure()
figure.update_layout(title_text=title, title_x=0.5)
values = [resource.dict().get(field) for resource in resources]
# convert to series and get value counts
val_counts = | pd.Series(values) | pandas.Series |
from django.shortcuts import render
from django.http import HttpResponse
from django.views import View
import pytz
import numpy as np
from datetime import datetime, time
import pandas as pd
import os, subprocess, psutil
from django.conf.urls.static import static
from . forms import SubmitTickerSymbolForm
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) #points to static folder
class CommandCenterView(View):
def __init__(self):
self.the_form = SubmitTickerSymbolForm()
self.month_year = datetime.now().strftime('%d | %B | %Y')
def contextRender(self, request,*args,**kwargs):
'''Common context renderer for the CommandCenterView'''
context = {
"title": "Command center",
"form": self.the_form,
"month_year": self.month_year,
"twsRunning": kwargs['msg'],
}
return render(request, "ib/commandCenter.html", context)
def get(self, request, *args, **kwargs):
t_msg = "Keep up the good work :)"
return self.contextRender(request\
,msg=t_msg)
def post(self, request, *args, **kwargs):
form = SubmitTickerSymbolForm(request.POST)
# launch trader work station(TWS)
if request.method == 'POST' and 'launchTws' in request.POST.keys():
if "tws.exe" in (p.name() for p in psutil.process_iter()):
t_msg = "TWS is running..."
return self.contextRender(request\
,msg=t_msg)
else:
subprocess.Popen(['C:\\Jts\\tws.exe'])
t_msg = "Launching TWS..."
return self.contextRender(request\
,msg=t_msg)
#add a ticker to forex list
elif request.method == 'POST' and 'forexQuote0' in request.POST.keys():
fName = "static\\csv\\forexWatchList.csv"
csvPathForex = os.path.join(BASE_DIR, fName )
forex_ticker = form.data['tickerSymbol'].upper()
columns = ['ticker', 'pid', 'clientid']
emptydf = pd.DataFrame(columns=columns)
try:
df = pd.read_csv(csvPathForex)
except:
emptydf.to_csv(csvPathForex, sep=',', index=False)
df = pd.read_csv(csvPathForex)
client_id = [i for i in range(20, 25) if i not in df['clientid'].values ][0]
if forex_ticker in df['ticker'].values:
t_msg = "FAILED! "+forex_ticker+ " is already in the STOCK list"
return self.contextRender(request\
,msg=t_msg)
else:
insertPoint = len(df['ticker'].values)
df.loc[insertPoint, 'ticker'] = forex_ticker # df.loc is the trick to add to eend of row
df.loc[insertPoint, 'clientid'] = client_id
df.to_csv(csvPathForex, sep=',', index=False)
t_msg = " Added " + forex_ticker+ " to FOREX list"
return self.contextRender(request\
,msg=t_msg)
#add a ticker to stock list
elif request.method == 'POST' and 'stockQuote0' in request.POST.keys():
fName = "static\\csv\\stockWatchList.csv"
csvPathStock = os.path.join(BASE_DIR, fName )
stock_ticker = form.data['tickerSymbol'].upper()
columns = ['ticker', 'pid', 'clientid']
emptydf = pd.DataFrame(columns=columns)
try:
df = pd.read_csv(csvPathStock)
except:
emptydf.to_csv(csvPathStock, sep=',', index=False)
df = pd.read_csv(csvPathStock)
# insertPoint = len([i for i in df['ticker'].values if isinstance(i, str)])
client_id = [i for i in range(5, 20) if i not in df['clientid'].values ][0]
if stock_ticker in df['ticker'].values:
t_msg = "FAILED! "+stock_ticker+ " is already in the STOCK list"
return self.contextRender(request\
,msg=t_msg)
else:
#create emty csv to deal with file not found error
fName = "static\\csv\\realtimeData\\" + stock_ticker + "_raw_realtime_ib.csv"
csvPath = os.path.join(BASE_DIR, fName ) # original data
columns = ['Time', 'Open', 'High', 'Low', 'Close']
try:
if datetime.fromtimestamp(os.path.getmtime(csvPath)).date() < \
datetime.now(tz=pytz.timezone('US/Eastern')).date():
emptyDf = pd.DataFrame(columns=columns)
emptyDf.to_csv(csvPath, sep=',', index=False)
except:
emptyDf = pd.DataFrame(columns=columns)
emptyDf.to_csv(csvPath, sep=',', index=False)
insertPoint = len(df['ticker'].values)
df.loc[insertPoint, 'ticker'] = stock_ticker # df.loc is the trick to add to eend of row
df.loc[insertPoint, 'clientid'] = client_id
df.to_csv(csvPathStock, sep=',', index=False)
t_msg = " Added " + stock_ticker+ " to STOCK list"
return self.contextRender(request\
,msg=t_msg)
#remove a ticker from the forex list
elif request.method == 'POST' and 'forexRow' in request.POST.keys():
fName = "static\\csv\\forexWatchList.csv"
csvPathForex = os.path.join(BASE_DIR, fName )
row_number = int(request.POST['forexRow'])
f_ticker = request.POST['forexTicker']
df = pd.read_csv(csvPathForex)
pid_insert_point = df['ticker'].values.tolist().index(f_ticker)
pid = df['pid'].iloc[pid_insert_point].astype(int)
try:
p = psutil.Process(pid)
p.terminate()
try:
fName_rt_raw = "static\\csv\\realtimeData\\"+f_ticker+"_raw_realtime_ib.csv"
fName_rt = "static\\csv\\realtimeData\\"+f_ticker+"_realtime_ib.csv"
csvPathForex_rt_raw = os.path.join(BASE_DIR, fName_rt)
csvPathForex_rt = os.path.join(BASE_DIR, fName_rt_raw)
os.remove(csvPathForex_rt_raw)
os.remove(csvPathForex_rt)
except:
pass
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathForex, sep=',', index=False)
t_msg = "Process terminated! \n Successfully removed CSV and "\
+ f_ticker+" from FOREX list"
return self.contextRender(request\
,msg=t_msg)
except:
try:
fName_rt_raw = "static\\csv\\realtimeData\\"+f_ticker+"_raw_realtime_ib.csv"
fName_rt = "static\\csv\\realtimeData\\"+f_ticker+"_realtime_ib.csv"
csvPathForex_rt_raw = os.path.join(BASE_DIR, fName_rt)
csvPathForex_rt = os.path.join(BASE_DIR, fName_rt_raw)
os.remove(csvPathForex_rt_raw)
os.remove(csvPathForex_rt)
except:
pass
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathForex, sep=',', index=False)
t_msg = "Successfully removed "\
+ f_ticker+" from FOREX list! \n No active "+ f_ticker+" downloads!"
return self.contextRender(request\
,msg=t_msg)
#remove a ticker from the stock list
elif request.method == 'POST' and 'stockRow' in request.POST.keys():
fName = "static\\csv\\stockWatchList.csv"
csvPathStock = os.path.join(BASE_DIR, fName )
row_number = int(request.POST['stockRow'])
s_ticker = request.POST['stockTicker']
df = pd.read_csv(csvPathStock)
pid_insert_point = df['ticker'].values.tolist().index(s_ticker)
pid = df['pid'].iloc[pid_insert_point].astype(int)
try:
# terminate quote downloads
p = psutil.Process(pid)
p.terminate()
#remove csv files
try:
fName_rt_raw = "static\\csv\\realtimeData\\"+s_ticker+"_raw_realtime_ib.csv"
fName_rt = "static\\csv\\realtimeData\\"+s_ticker+"_realtime_ib.csv"
csvPathForex_rt_raw = os.path.join(BASE_DIR, fName_rt)
csvPathForex_rt = os.path.join(BASE_DIR, fName_rt_raw)
os.remove(csvPathForex_rt_raw)
os.remove(csvPathForex_rt)
except:
pass
# remove from list
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathStock, sep=',', index=False)
t_msg = "Process terminated! \n Successfully removed "\
+ s_ticker+" from STOCK list"
return self.contextRender(request\
,msg=t_msg)
except:
try:
fName_rt_raw = "static\\csv\\realtimeData\\"+s_ticker+"_raw_realtime_ib.csv"
fName_rt = "static\\csv\\realtimeData\\"+s_ticker+"_realtime_ib.csv"
csvPathForex_rt_raw = os.path.join(BASE_DIR, fName_rt)
csvPathForex_rt = os.path.join(BASE_DIR, fName_rt_raw)
os.remove(csvPathForex_rt_raw)
os.remove(csvPathForex_rt)
except:
pass
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathStock, sep=',', index=False)
t_msg = " Successfully removed "\
+ s_ticker+" from STOCK list! \n No active "+ s_ticker+" downloads!"
return self.contextRender(request\
,msg=t_msg)
# get forex quote for a clicked ticker
elif request.method == 'POST' and 'forexQuote' in request.POST.keys():
fName = "static\\csv\\forexWatchList.csv"
csvPathForex = os.path.join(BASE_DIR, fName )
f_ticker = request.POST['forexQuote']
df = | pd.read_csv(csvPathForex) | pandas.read_csv |
# -*- coding: utf-8 -*-
import click
import json
import shutil
import logging
from pathlib import Path
from functools import partial
from dotenv import find_dotenv, load_dotenv
import pandas as pd
@click.group()
def main():
pass
@main.command()
@click.argument('input_filepath', type=click.Path(exists=True, dir_okay=False),
default='data/raw/iris.data')
@click.argument('output_filepath', type=click.Path(dir_okay=False),
default='data/processed/iris.csv')
def iris(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
columns = ['sepal_length',
'sepal_width',
'petal_length',
'petal_width',
'class']
logger = logging.getLogger(__name__)
logger.info('making final data set from raw iris data')
logger.info(f'reading source file from {input_filepath}')
df = | pd.read_csv(input_filepath, header=None, names=columns) | pandas.read_csv |
import Bio.SeqIO, os
import pandas as pd
import sys, time, regex
from tqdm import tqdm
start = time.time()
def main():
sAnalysis_Tag = '63_GS_PE off-target_283T_2_1rxn_220118'
BaseDIR = r'C:\Users\home\Desktop\220128_miniseq'
FASTQ_file = r'%s\%s\%s.fastq' % (BaseDIR, sAnalysis_Tag, sAnalysis_Tag.split('_')[0])
Barcode_file = r'%s\C_PECV6K_with_refseq_211112.csv' % BaseDIR
OutDIR = r'%s\%s\output_PECV6K' % (BaseDIR, sAnalysis_Tag)
sRE = '[T]{4}[ACGT]{16}' ## for PECV6K = 16 / for off-target = 20
sError = 'ErrorFree'
dict_brcd = make_bc_list_dictionary(Barcode_file)
os.makedirs(OutDIR, exist_ok=True)
dict_brcd_count, dict_read_type_count = find_barcode_in_NGSread(FASTQ_file, sRE, dict_brcd, sError)
dict_to_csv(dict_brcd_count, OutDIR, 'bc_count_%s' % sAnalysis_Tag, 1)
dict_to_csv(dict_read_type_count, OutDIR, 'read_count_%s' % sAnalysis_Tag)
def find_barcode_in_NGSread(FASTQ_file, sRE, dict_brcd, sError):
fastq_info = Bio.SeqIO.parse(FASTQ_file, 'fastq')
dict_sOutput = {brcd: [] for brcd in dict_brcd.keys()}
dict_sOutput2 = {brcd: 0 for brcd in dict_brcd.keys()}
dict_sOutput3 = {'conv': {'WT': 0, 'ED': 0, 'Other': 0},
'opti': {'WT': 0, 'ED': 0, 'Other': 0},
'Error_prone': {'WT': 0, 'ED': 0, 'Other': 0}}
for sSeqData in tqdm(fastq_info, desc='Sorting from FASTQ data', ncols=100, total=len(FASTQ_file)/4):
sReadID = str(sSeqData.id)
sNGSSeq = str(sSeqData.seq)
for sReIndex in regex.finditer(sRE, sNGSSeq, overlapped=True):
nIndexStart = sReIndex.start()
nIndexEnd = sReIndex.end()
sBarcodeMatch = sNGSSeq[nIndexStart:nIndexEnd]
sRefSeqCheck = sNGSSeq[:nIndexStart+24]
sTargetSeq = sNGSSeq[nIndexEnd-2:-40]
### Skip Non-barcodes ###
try:
dict_refSeq = dict_brcd[sBarcodeMatch]
except KeyError:
continue
#########################
## Skip error in Refseq ##
if sError == 'ErrorFree':
if dict_refSeq['convRef'] in sRefSeqCheck: read_type = 'conv'
elif dict_refSeq['optiRef'] in sRefSeqCheck: read_type = 'opti'
else: read_type = 'Error_prone'
##########################
if dict_brcd[sBarcodeMatch]['WTSeq'] in reverse_complement(sTargetSeq): product_type = 'WT'
elif dict_brcd[sBarcodeMatch]['EDSeq'] in reverse_complement(sTargetSeq): product_type = 'ED'
else: product_type = 'Other'
dict_sOutput2[sBarcodeMatch] += 1
dict_sOutput3[read_type][product_type] += 1
# loop END: i, sReadLine
# loop END: sSeqData
return dict_sOutput2, dict_sOutput3
def reverse_complement(sSeq):
dict_sBases = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N', '.': '.', '*': '*',
'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
list_sSeq = list(sSeq) # Turns the sequence in to a gigantic list
list_sSeq = [dict_sBases[sBase] for sBase in list_sSeq]
return ''.join(list_sSeq)[::-1]
def make_bc_list_dictionary(Barcode_file):
df_bc_list = pd.read_csv(Barcode_file)
df_bc_list.columns = ['Barcode', 'convRef', 'optiRef', 'WTSeq', 'EDSeq']
dict_brcd = {}
for idx in df_bc_list.index:
data = df_bc_list.loc[idx]
barcode = data['Barcode'].upper()
convRef = data['convRef'].upper()
optiRef = data['optiRef'].upper()
WT_Seq = data['WTSeq'].upper()
ED_Seq = data['EDSeq'].upper()
dict_brcd[barcode] = {'convRef': convRef, 'optiRef': optiRef, 'WTSeq': WT_Seq, 'EDSeq': ED_Seq}
return dict_brcd
def dict_to_csv(dictionary, OutDIR, Output_Tag, T=0):
df = pd.DataFrame(dict([(key, | pd.Series(val) | pandas.Series |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
| pd.testing.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
"""
This script is for exploring the implementation of DeepAR in GluonTS
"""
import json, itertools, os
import streamlit as st
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from gluonts.transform import FieldName
from gluonts.dataset.common import ListDataset
from gluonts.trainer import Trainer
from gluonts.model.deepar import DeepAREstimator
from sklearn import preprocessing
from pathlib import Path
@st.cache
def get_data(path='../../data/input/full_dataset_4_seasons.csv'):
data = pd.read_csv(path)
return data
@st.cache
def get_roster(path='../../data/input/full_roster_4_seasons.csv'):
data = pd.read_csv(path)
return data
@st.cache
def clean_duplicates(data, streamlit=False):
unique = data.loc[:, 'name'].unique()
clean = pd.DataFrame()
for player in unique:
player_df = data.loc[data.loc[:, 'name'] == player]
player_df = player_df.drop_duplicates(subset='date')
clean = pd.concat([clean, player_df])
return clean
@st.cache
def clean_rookies_retirees(data, split_from='2018-10-03'):
unique = data.loc[:, 'name'].unique()
clean = | pd.DataFrame() | pandas.DataFrame |
""" a light-weight aligner"""
import os
import sys
from pathlib import Path
import tkinter as tk
from tkinter import messagebox
# pylint: disable=unused-import
_ = """
from jinja2 import ( # type: ignore # noqa: F401
PackageLoader,
Environment,
ChoiceLoader,
FileSystemLoader,
)
# """ # pyinstaller packing difficulties
# pylint: enable=unused-import
# import torch
import numpy as np
import pandas as pd
from absl import app, flags # type: ignore
from prompt_toolkit import HTML, print_formatted_text, prompt
# import warnings
# warnings.filterwarnings("ignore")
# warnings.filterwarnings('ignore', category=torch.serialization.SourceChangeWarning)
import logzero
from logzero import logger
from light_aligner import __version__
from light_aligner.light_aligner import light_aligner
# from light_aligner.color_table_applymap import color_table_applymap
from light_aligner.save_xlsx import save_xlsx
from light_aligner.common_prefix import common_prefix
from light_aligner.gen_filename import gen_filename
from light_aligner.browse_filename import browse_filename
from light_aligner.single_or_dual import single_or_dual
from light_aligner.text_to_plist import text_to_plist
from light_aligner.load_paras import load_paras
from light_aligner.load_xlsx import load_xlsx
from light_aligner.check_anchors import check_anchors
# from light_aligner.align_sents import align_sents
from light_aligner.plist_to_slist import plist_to_slist
# tkroot = tk.Tk() # pylint: disable=invalid-name
FLAGS = flags.FLAGS
flags.DEFINE_string(
"src_file", "", "source file, browse to file location if left empty",
)
flags.DEFINE_string(
"tgt_file", "", "target file, browse to file location if left empty",
)
flags.DEFINE_boolean(
"version", False, f"light-aligner v.{__version__}",
)
flags.DEFINE_float(
"thr", None, "a threhold (0. to 1.0), default to auto-adjust",
)
flags.DEFINE_boolean("debug", False, "display annoying debug messages.")
# pylint: disable=too-many-branches, too-many-statements, too-many-locals
def main(argv):
""" __main__ main """
del argv
# root = tk.Tk()
# root.withdraw()
if FLAGS.debug:
logzero.loglevel(10) # logging.DEBUG
else:
logzero.loglevel(20) # logging.INFO
_ = [
"src_file",
"tgt_file",
"thr",
"version",
"debug",
]
# args = CIMultiDict((elm, getattr(FLAGS, elm)) for elm in _) # multidict._multidict.CIMultiDict # noqa=E501
args = dict((elm, getattr(FLAGS, elm)) for elm in _)
logger.debug("\n\t args: %s", args) # noqa=E501
if args.get("version"):
print(
f"\t bumblebee-aligner v.{__version__} brought to you by mu@qq41947782, join qq-group 316287378 to be kept updated."
)
sys.exit(0)
while 1:
# try to load src_file 2 times
_ = 0
while not args.get("src_file").strip() and _ < 3:
if _ > 0:
title = f" retry {_}: select a file"
else:
title = "Select a file"
logger.info("%s", "Select a file...")
args["src_file"] = browse_filename(title=title)
_ += 1
if not args.get("src_file").strip():
logger.warning(" Tried %s times, giving up", _)
break
# load src_text and detect dual-lang, if yes skip tgt_text
src_file = args["src_file"]
src_text = load_paras(src_file)
logger.info("file 1: %s, ..., %s", src_text[0][:100], src_text[-1][:100])
s_or_d = single_or_dual(src_text)
if "en" in s_or_d and "zh" in s_or_d:
root = tk.Tk()
root.withdraw()
ans = messagebox.askyesnocancel(
"Dual-lang %s detected" % s_or_d,
"Light-aligner thinks this is a dual-language %s file. Do you want to treat it as such?"
% s_or_d,
)
else: # normal single lang
ans = False
if ans: # branch to process dual-lang file
... # gen src_text and tgt_text
p_list = text_to_plist(src_text)
parent = Path(src_file).absolute().parent
# src_stem = Path(src_file).stem
stem = Path(src_file).stem
suffix = ".xlsx"
# tgt_stem = Path(tgt_file).stem
# stem = common_prefix([stem, tgt_stem])
# out_file = f'{parent / stem}-thr{thr}-tol{tol}{suffix}'
out_file = f"{parent / stem}{suffix}"
out_file = gen_filename(out_file)
logger.debug(" out_file: %s", out_file)
# color_table_applymap(p_list, file=out_file)
save_xlsx(p_list, file=out_file)
logger.info("\n\tFile written to **[%s]**", Path(out_file).absolute())
logger.info("\n\t Opening **[%s]**", Path(out_file).absolute())
# if args.get('startfile'):
if sys.platform == "win32":
os.startfile(out_file)
# end of align and save
# need to reset src_file tgt_file?
# +++++++++++++++
# align sents for dual ... get user input (out_file->out_file_s)
root = tk.Tk()
root.withdraw()
ans = messagebox.askyesno(
" Align sents ",
"We now proceed to aligning sentences. You may wish to edit %s and save before the next step. Continue?",
)
logger.info("messagebox.askyesno ans: %s", ans)
if ans:
try:
anchors = 0
res = load_xlsx(out_file, anchors=anchors)
except Exception as exc:
logger.error(" load_xlsx: %s", exc)
res = None
if not (isinstance(res, int) or res is None):
anchors = check_anchors(res)
ans = True
if anchors < 1:
root = tk.Tk()
root.withdraw()
ans = messagebox.askyesno(
" No anchors found!",
"This is likely invalid data. We can proceed. But the result may not be that good for long texts. Continue?",
)
if ans: # continue if anchors > 0 or anchors == 0 and ans == True
try:
# sents = align_sents(res)
slist = plist_to_slist(res)
except Exception as exc:
logger.error("align_sents: %s", exc)
slist = ""
if slist:
out_file_s = f"{Path(out_file).parent / stem}-s{suffix}"
out_file_s = gen_filename(out_file_s)
try:
writer = pd.ExcelWriter(out_file_s)
| pd.DataFrame(slist) | pandas.DataFrame |
import time
import random
import numpy as np
import pandas as pd
import hdbscan
import sklearn.datasets
from sklearn import metrics
from classix import CLASSIX
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn import preprocessing
from tqdm import tqdm
from sklearn.cluster import MeanShift
from quickshift.QuickshiftPP import *
import matplotlib.pyplot as plt
from tqdm import tqdm
import seaborn as sns
plt.style.use('bmh')
seed = 0
np.random.seed(seed)
random.seed(seed)
def test_kmeanspp_labels(X=None, y=None, _range=np.arange(2, 21, 1)):
ar = list()
am = list()
for i in _range:
kmeans = KMeans(n_clusters=i, init='k-means++', random_state=1)
kmeans.fit(X)
ri = metrics.adjusted_rand_score(y, kmeans.labels_)
mi = metrics.adjusted_mutual_info_score(y, kmeans.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_meanshift_labels(X=None, y=None, _range=np.arange(2, 21, 1)):
ar = list()
am = list()
for i in _range:
meanshift = MeanShift(bandwidth=i)
meanshift.fit(X)
ri = metrics.adjusted_rand_score(y, meanshift.labels_)
mi = metrics.adjusted_mutual_info_score(y, meanshift.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_dbscan_labels(X=None, y=None, _range=np.arange(0.05, 0.505, 0.005), minPts=5):
ar = list()
am = list()
for i in _range:
dbscan = DBSCAN(eps=i, n_jobs=1, min_samples=minPts)
dbscan.fit(X)
ri = metrics.adjusted_rand_score(y, dbscan.labels_)
mi = metrics.adjusted_mutual_info_score(y, dbscan.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_hdbscan_labels(X=None, y=None, _range=np.arange(2, 21, 1)):
ar = list()
am = list()
for i in _range:
_hdbscan = hdbscan.HDBSCAN(min_cluster_size=int(i), algorithm='best')
_hdbscan.fit(X)
ri = metrics.adjusted_rand_score(y, _hdbscan.labels_)
mi = metrics.adjusted_mutual_info_score(y, _hdbscan.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_quickshiftpp_labels(X=None, y=None, _range=np.arange(2, 17, 1), beta=0.3):
ar = list()
am = list()
for i in _range:
quicks = QuickshiftPP(k=i, beta=beta)
quicks.fit(X.copy(order='C'))
ri = metrics.adjusted_rand_score(y, quicks.memberships)
mi = metrics.adjusted_mutual_info_score(y, quicks.memberships)
ar.append(ri)
am.append(mi)
return ar, am
def test_classix_radius_labels(X=None, y=None, method=None, minPts=1, sorting='pca', _range=np.arange(0.05, 0.3, 0.005)):
ar = list()
am = list()
for i in _range:
classix = CLASSIX(radius=i, minPts=minPts, post_alloc=True, sorting=sorting,
group_merging=method, verbose=0)
classix.fit(X)
ri = metrics.adjusted_rand_score(y, classix.labels_)
mi = metrics.adjusted_mutual_info_score(y, classix.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def run_sensitivity_test(datasets, _range, clustering='CLASSIX (Density)', fix_k=1, sorting='pca', label_files=None):
np.random.seed(1)
X, y = datasets[0], datasets[1]
nonans = np.isnan(X).sum(1) == 0
X = X[nonans,:]
y = y[nonans]
X = (X - X.mean(axis=0)) / X.std(axis=0)
if clustering == 'CLASSIX (density)':
ari, ami = test_classix_radius_labels(X=X, y=y, method='density', minPts=fix_k, sorting=sorting, _range=_range)
elif clustering == 'CLASSIX (distance)':
ari, ami = test_classix_radius_labels(X=X, y=y, method='distance', minPts=fix_k, sorting=sorting, _range=_range)
elif clustering == 'HDBSCAN':
ari, ami = test_hdbscan_labels(X=X, y=y, _range=_range)
elif clustering == 'DBSCAN':
ari, ami = test_dbscan_labels(X=X, y=y, _range=_range, minPts=fix_k)
elif clustering == 'Quickshift++':
ari, ami = test_quickshiftpp_labels(X=X, y=y, _range=_range, beta=fix_k)
elif clustering == 'k-means++':
ari, ami = test_kmeanspp_labels(X=X, y=y, _range=_range)
elif clustering == 'Meanshift':
ari, ami = test_meanshift_labels(X=X, y=y, _range=_range)
else:
raise ValueError('Specify a concrete clustering algorithms.')
store_df = pd.DataFrame()
store_df['Range'] = _range
store_df['ARI'] = ari
store_df['AMI'] = ami
store_df.to_csv('results/exp5/{}'.format(label_files)+clustering+'.csv', index=False)
def visualize_params_global():
plt.style.use('default')
datasets = ['Banknote', 'Dermatology', 'Ecoli', 'Glass', 'Iris', 'Phoneme', 'WheatSeeds', 'Wine']
algorithms = ['Meanshift', 'DBSCAN', 'HDBSCAN', 'Quickshift++', 'CLASSIX (distance)', 'CLASSIX (density)']
plot_num = 1
fontsize = 60
band = [0.5, 0.01, 0.5, 0.5, 0.015, 0.015]
plt.figure(figsize=(8.5*len(datasets), 9*len(algorithms)))
for data in datasets:
i = 0
for algorithm in algorithms:
store_df = pd.read_csv('results/exp5/{}'.format(data)+algorithm+'.csv')
_range = store_df['Range'].values
ars = store_df['ARI'].values
ami = store_df['AMI'].values
plt.rcParams['axes.facecolor'] = 'white'
plt.subplot(len(datasets), len(algorithms), plot_num)
plt.plot(_range, ars, label='ARI', marker='o', markersize=20, c='red')
plt.plot(_range, ami, label='AMI', marker='*', markersize=18, c='darkorange')
plt.ylim(-.05, 1.05)
plt.xticks([min(_range), max(_range)])
plt.yticks([0.5, 1])
plt.xlim(-band[i]+min(_range), band[i]+max(_range))
if plot_num == len(algorithms):
plt.legend(fontsize=fontsize, ncol=2, bbox_to_anchor=(1, 1.5))
plt.tick_params(axis='both', labelsize=fontsize)
plt.grid(True)
plt.subplots_adjust(bottom=0.01, left=0.01, right=0.99, top=0.99, wspace=0.15, hspace=0.15)
plot_num = plot_num + 1
i = i + 1
plt.tight_layout()
plt.savefig('results/exp5/ARI_AMI_PARAMS.pdf', bbox_inches='tight')
def visualize_params(_range, clustering='CLASSIX (Density)', label_files=None, band=0.01, fig_interval=1):
# sns.set(font_scale=5)
store_df = pd.read_csv('results/exp5/{}'.format(label_files)+clustering+'.csv')
_range = store_df['Range'].values
ami = store_df['AMI'].values
ari = store_df['ARI'].values
plt.figure(figsize=(6, 3.6))
plt.rcParams['axes.facecolor'] = 'white'
# plt.rc('font', family='serif')
plt.plot(_range, ari, label='ARI',
marker='o', markersize=10, c='red')
plt.plot(_range, ami, label='AMI',
marker='*', markersize=8, c='darkorange')
plt.legend(fontsize=32, fancybox=True, loc='best')
plt.ylim(-.05, 1.05)
# plt.xticks(np.arange(min(_range), max(_range)+1, fig_interval))
plt.xticks([min(_range), max(_range)])
plt.yticks([0, 0.5, 1])
plt.xlim(-band+min(_range), band+max(_range))
plt.tick_params(axis='both', labelsize=32)
plt.savefig('results/exp5/{}'.format(label_files)+clustering+'.pdf', bbox_inches='tight')
# plt.show()
def params_search():
datasets = []
data = pd.read_csv('data/Real_data/Banknote_authentication.csv')
X_banknote = data.drop(['4'],axis=1).values
y_banknote = data['4'].values
# print("Shape of banknote data: ", data.shape, ", labels: ", len(set(y_banknote)))
datasets.append((X_banknote, y_banknote))
data = pd.read_csv("data/Real_data/Dermatology.csv").values
X_dermatology = data[:, :data.shape[1]-1]
y_dermatology = data[:, data.shape[1]-1]
# print("Shape of Dermatology data: ", data.shape, ", labels: ", len(set(y_dermatology)))
datasets.append((X_dermatology, y_dermatology))
data = pd.read_csv("data/Real_data/Ecoli.csv").values
X_ecoli = data[:, range(data.shape[1] - 1)]
y_ecoli = data[:, data.shape[1] - 1]
# print("Shape of Ecoli data: ", data.shape, ", labels: ", len(set(y_ecoli)))
datasets.append((X_ecoli,y_ecoli))
data = pd.read_csv("data/Real_data/Glass.csv")
le = preprocessing.LabelEncoder()
data['Glass'] = le.fit_transform(data['Glass'])
X_glass = data.drop(['Glass', 'Id'],axis=1).values
y_glass = data['Glass'].values
# print("Shape of Glass data: ", data.shape, ", labels: ", len(set(y_glass)))
datasets.append((X_glass, y_glass))
data = pd.read_csv("data/Real_data/Iris.csv")
le = preprocessing.LabelEncoder()
data['Species'] = le.fit_transform(data['Species'])
X_irirs = data.drop(['Species','Id'],axis=1).values
y_irirs = data['Species'].values
# print("Shape of Iris data: ", data.shape, ", labels: ", len(set(y_irirs)))
datasets.append((X_irirs,y_irirs))
data = pd.read_csv("data/Real_data/Phoneme.csv")
le = preprocessing.LabelEncoder()
data['g'] = le.fit_transform(data['g'])
X_phoneme = data.drop(['speaker', 'g'],axis=1).values
y_phoneme = data['g'].values
# print("Shape of Phoneme data: ", data.shape, ", labels: ", len(set(y_phoneme)))
datasets.append((X_phoneme, y_phoneme))
data = pd.read_csv('data/Real_data/Seeds.csv')
X_seeds = data.drop(['7'],axis=1).values
y_seeds = data['7'].values
# print("Shape of seeds data: ", data.shape, ", labels: ", len(set(y_seeds)))
datasets.append((X_seeds, y_seeds))
data = pd.read_csv("data/Real_data/Wine.csv")
X_wine = data.drop(['14'],axis=1).values
y_wine = data['14'].values
# print("Shape of Wine data: ", data.shape, ", labels: ", len(set(y_wine)))
datasets.append((X_wine, y_wine))
# ==========================================================================
# ****************************************************************Mean shift
fig_interval = 2
band = 0.5
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[0], _range=_range, clustering='Meanshift',
label_files='Banknote')
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[1], _range=_range, clustering='Meanshift',
label_files='Dermatology')
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[2], _range=_range, clustering='Meanshift',
label_files='Ecoli')
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[3], _range=_range, clustering='Meanshift',
label_files='Glass')
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[4], _range=_range, clustering='Meanshift',
label_files='Iris')
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[5], _range=_range, clustering='Meanshift',
label_files='Phoneme')
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[6], _range=_range, clustering='Meanshift',
label_files='WheatSeeds')
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[7], _range=_range, clustering='Meanshift',
label_files='Wine')
# ==========================================================================
# ****************************************************************DBSCAN
fig_interval = 0.1
band = 0.01
_range = np.arange(0.15, 0.525, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[0], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='Banknote')
_range = np.arange(5.15, 5.525, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[1], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='Dermatology')
_range = np.arange(0.55, 0.925, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[2], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='Ecoli')
_range = np.arange(1.55, 1.925, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[3], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='Glass')
_range = np.arange(0.55, 0.925, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[4], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='Iris')
_range = np.arange(9, 9.375, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[5], _range=_range, clustering='DBSCAN', fix_k=10,
label_files='Phoneme')
_range = np.arange(0.55, 0.925, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[6], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='WheatSeeds')
_range = np.arange(2.2, 2.575, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[7], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='Wine')
# ==========================================================================
# ****************************************************************HDBSCAN
fig_interval = 2
band = 0.5
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[0], _range=_range, clustering='HDBSCAN',
label_files='Banknote')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[1], _range=_range, clustering='HDBSCAN',
label_files='Dermatology')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[2], _range=_range, clustering='HDBSCAN',
label_files='Ecoli')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[3], _range=_range, clustering='HDBSCAN',
label_files='Glass')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[4], _range=_range, clustering='HDBSCAN',
label_files='Iris')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[5], _range=_range, clustering='HDBSCAN',
label_files='Phoneme')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[6], _range=_range, clustering='HDBSCAN',
label_files='WheatSeeds')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[7], _range=_range, clustering='HDBSCAN',
label_files='Wine')
# ==========================================================================
# ****************************************************************Quickshift++
fig_interval = 2
band = 0.5
_range = np.arange(10, 25, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[0], _range=_range, clustering='Quickshift++', fix_k=0.7,
label_files='Banknote')
_range = np.arange(7, 22, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[1], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='Dermatology')
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[2], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='Ecoli')
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[3], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='Glass')
_range = np.arange(10, 25, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[4], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='Iris')
_range = np.arange(235, 250, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[5], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='Phoneme')
_range = np.arange(10, 25, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[6], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='WheatSeeds')
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[7], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='Wine')
# ==========================================================================
# ****************************************************************CLASSIX distance
fig_interval = 0.1
band = 0.015
_range = np.arange(0.01, 0.375, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[0], _range=_range, clustering='CLASSIX (distance)', fix_k=6, sorting='pca',
label_files='Banknote')
_range = np.arange(0.325, 0.676, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[1], _range=_range, clustering='CLASSIX (distance)', fix_k=4, sorting='pca',
label_files='Dermatology')
_range = np.arange(0.1, 0.475, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[2], _range=_range, clustering='CLASSIX (distance)', fix_k=3, sorting='pca',
label_files='Ecoli')
_range = np.arange(0.375, 0.75, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[3], _range=_range, clustering='CLASSIX (distance)', fix_k=0, sorting='pca',
label_files='Glass')
_range = np.arange(0.15, 0.525, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[4], _range=_range, clustering='CLASSIX (distance)', fix_k=6, sorting='pca',
label_files='Iris')
_range = np.arange(0.27, 0.625, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[5], _range=_range, clustering='CLASSIX (distance)', fix_k=9, sorting='pca',
label_files='Phoneme')
_range = np.arange(0.05, 0.425, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[6], _range=_range, clustering='CLASSIX (distance)', fix_k=7, sorting='pca',
label_files='WheatSeeds')
_range = np.arange(0.2, 0.575, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[7], _range=_range, clustering='CLASSIX (distance)', fix_k=7, sorting='pca',
label_files='Wine')
# ==========================================================================
# ****************************************************************CLASSIX density
fig_interval = 0.1
band = 0.015
_range = np.arange(0.05, 0.425, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[0], _range=_range, clustering='CLASSIX (density)', fix_k=6, sorting='pca',
label_files='Banknote')
_range = np.arange(0.5, 0.875, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[1], _range=_range, clustering='CLASSIX (density)', fix_k=4, sorting='pca',
label_files='Dermatology')
_range = np.arange(0.1, 0.475, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[2], _range=_range, clustering='CLASSIX (density)', fix_k=3, sorting='pca',
label_files='Ecoli')
_range = np.arange(0.475, 0.85, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[3], _range=_range, clustering='CLASSIX (density)', fix_k=0, sorting='pca',
label_files='Glass')
_range = np.arange(0.15, 0.525, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[4], _range=_range, clustering='CLASSIX (density)', fix_k=6, sorting='pca',
label_files='Iris')
_range = np.arange(1.3, 1.675, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[5], _range=_range, clustering='CLASSIX (density)', fix_k=9, sorting='pca',
label_files='Phoneme')
_range = np.arange(0.1, 0.475, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[6], _range=_range, clustering='CLASSIX (density)', fix_k=7, sorting='pca',
label_files='WheatSeeds')
_range = np.arange(0.4, 0.775, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[7], _range=_range, clustering='CLASSIX (density)', fix_k=7, sorting='pca',
label_files='Wine')
def visualize_params_search():
# ==========================================================================
# ****************************************************************Mean shift
fig_interval = 2
band = 0.5
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Banknote', band=band, fig_interval=fig_interval)
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Dermatology', band=band, fig_interval=fig_interval)
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Ecoli', band=band, fig_interval=fig_interval)
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Glass', band=band, fig_interval=fig_interval)
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Iris', band=band, fig_interval=fig_interval)
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Phoneme', band=band, fig_interval=fig_interval)
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='WheatSeeds', band=band, fig_interval=fig_interval)
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Wine', band=band, fig_interval=fig_interval)
# ==========================================================================
# ****************************************************************DBSCAN
fig_interval = 0.1
band = 0.01
_range = np.arange(0.15, 0.525, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Banknote', band=band, fig_interval=fig_interval)
_range = np.arange(5.15, 5.525, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Dermatology', band=band, fig_interval=fig_interval)
_range = np.arange(0.55, 0.925, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Ecoli', band=band, fig_interval=fig_interval)
_range = np.arange(1.55, 1.925, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Glass', band=band, fig_interval=fig_interval)
_range = np.arange(0.55, 0.925, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Iris', band=band, fig_interval=fig_interval)
_range = np.arange(9, 9.375, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Phoneme', band=band, fig_interval=fig_interval)
_range = np.arange(0.55, 0.925, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='WheatSeeds', band=band, fig_interval=fig_interval)
_range = np.arange(2.2, 2.575, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Wine', band=band, fig_interval=fig_interval)
# ==========================================================================
# ****************************************************************HDBSCAN
fig_interval = 2
band = 0.5
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Banknote', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Dermatology', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Ecoli', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Glass', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Iris', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Phoneme', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='WheatSeeds', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Wine', band=band, fig_interval=fig_interval)
# ==========================================================================
# ****************************************************************Quickshift++
fig_interval = 2
band = 0.5
_range = np.arange(10, 25, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Banknote', band=band, fig_interval=fig_interval)
_range = np.arange(7, 22, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Dermatology', band=band, fig_interval=fig_interval)
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Ecoli', band=band, fig_interval=fig_interval)
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Glass', band=band, fig_interval=fig_interval)
_range = np.arange(10, 25, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Iris', band=band, fig_interval=fig_interval)
_range = np.arange(235, 250, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Phoneme', band=band, fig_interval=fig_interval+5)
_range = np.arange(10, 25, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='WheatSeeds', band=band, fig_interval=fig_interval)
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Wine', band=band, fig_interval=fig_interval)
# ==========================================================================
# ****************************************************************CLASSIX distance
fig_interval = 0.1
band = 0.015
_range = np.arange(0.01, 0.375, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Banknote', band=band, fig_interval=fig_interval)
_range = np.arange(0.325, 0.676, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Dermatology', band=band, fig_interval=fig_interval)
_range = np.arange(0.1, 0.475, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Ecoli', band=band, fig_interval=fig_interval)
_range = np.arange(0.375, 0.75, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Glass', band=band, fig_interval=fig_interval)
_range = np.arange(0.15, 0.525, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Iris', band=band, fig_interval=fig_interval)
_range = np.arange(0.27, 0.625, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Phoneme', band=band, fig_interval=fig_interval)
_range = np.arange(0.05, 0.425, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='WheatSeeds', band=band, fig_interval=fig_interval)
_range = np.arange(0.2, 0.575, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Wine', band=band, fig_interval=fig_interval)
# ==========================================================================
# ****************************************************************CLASSIX density
fig_interval = 0.1
band = 0.015
_range = np.arange(0.05, 0.425, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Banknote', band=band, fig_interval=fig_interval)
_range = np.arange(0.5, 0.875, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Dermatology', band=band, fig_interval=fig_interval)
_range = np.arange(0.1, 0.475, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Ecoli', band=band, fig_interval=fig_interval)
_range = np.arange(0.475, 0.85, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Glass', band=band, fig_interval=fig_interval)
_range = np.arange(0.15, 0.525, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Iris', band=band, fig_interval=fig_interval)
_range = np.arange(1.3, 1.675, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Phoneme', band=band, fig_interval=fig_interval)
_range = np.arange(0.1, 0.475, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='WheatSeeds', band=band, fig_interval=fig_interval)
_range = np.arange(0.4, 0.775, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Wine', band=band, fig_interval=fig_interval)
def compare_best_params():
datasets = []
data = pd.read_csv("data/Real_data/Iris.csv")
le = preprocessing.LabelEncoder()
data['Species'] = le.fit_transform(data['Species'])
X_irirs = data.drop(['Species','Id'],axis=1).values
y_irirs = data['Species'].values
datasets.append((X_irirs, y_irirs))
data = pd.read_csv("data/Real_data/Dermatology.csv").values
X_dermatology = data[:, :data.shape[1]-1]
y_dermatology = data[:, data.shape[1]-1]
datasets.append((X_dermatology, y_dermatology))
data = pd.read_csv("data/Real_data/Ecoli.csv").values
X_ecoli = data[:, range(data.shape[1] - 1)]
y_ecoli = data[:, data.shape[1] - 1]
datasets.append((X_ecoli, y_ecoli))
data = pd.read_csv("data/Real_data/Glass.csv")
le = preprocessing.LabelEncoder()
data['Glass'] = le.fit_transform(data['Glass'])
X_glass = data.drop(['Glass', 'Id'],axis=1).values
y_glass = data['Glass'].values
datasets.append((X_glass, y_glass))
data = pd.read_csv("data/Real_data/Banknote_authentication.csv")
X_banknote = data.drop(['4'],axis=1).values
y_banknote = data['4'].values
datasets.append((X_banknote, y_banknote))
data = pd.read_csv("data/Real_data/Seeds.csv")
X_seeds = data.drop(['7'],axis=1).values
y_seeds = data['7'].values
datasets.append((X_seeds, y_seeds))
data = pd.read_csv("data/Real_data/Phoneme.csv")
le = preprocessing.LabelEncoder()
data['g'] = le.fit_transform(data['g'])
X_phoneme = data.drop(['speaker', 'g'],axis=1).values
y_phoneme = data['g'].values
datasets.append((X_phoneme, y_phoneme))
data = pd.read_csv("data/Real_data/Wine.csv")
X_wine = data.drop(['14'],axis=1).values
y_wine = data['14'].values
datasets.append((X_wine, y_wine))
clean_datasets = []
x_Banknote, y_Banknote = datasets[4][0], datasets[4][1] # Banknote
nonans = np.isnan(x_Banknote).sum(1) == 0
x_Banknote = x_Banknote[nonans,:]
y_Banknote = y_Banknote[nonans]
x_Banknote = (x_Banknote - x_Banknote.mean(axis=0)) / x_Banknote.std(axis=0)
clean_datasets.append((x_Banknote, y_Banknote))
x_Dermatology, y_Dermatology = datasets[1][0], datasets[1][1] # Dermatology
nonans = np.isnan(x_Dermatology).sum(1) == 0
x_Dermatology = x_Dermatology[nonans,:]
y_Dermatology = y_Dermatology[nonans]
x_Dermatology = (x_Dermatology - x_Dermatology.mean(axis=0)) / x_Dermatology.std(axis=0)
clean_datasets.append((x_Dermatology, y_Dermatology))
x_Ecoli, y_Ecoli = datasets[2][0], datasets[2][1] # Ecoli
nonans = np.isnan(x_Ecoli).sum(1) == 0
x_Ecoli = x_Ecoli[nonans,:]
y_Ecoli = y_Ecoli[nonans]
x_Ecoli = (x_Ecoli - x_Ecoli.mean(axis=0)) / x_Ecoli.std(axis=0)
clean_datasets.append((x_Ecoli, y_Ecoli))
x_Glass, y_Glass = datasets[3][0], datasets[3][1] # Glass
nonans = np.isnan(x_Glass).sum(1) == 0
x_Glass = x_Glass[nonans,:]
y_Glass = y_Glass[nonans]
x_Glass = (x_Glass - x_Glass.mean(axis=0)) / x_Glass.std(axis=0)
clean_datasets.append((x_Glass, y_Glass))
x_Iris, y_Iris = datasets[0][0], datasets[0][1] # Iris
nonans = np.isnan(x_Iris).sum(1) == 0
x_Iris = x_Iris[nonans,:]
y_Iris = y_Iris[nonans]
x_Iris = (x_Iris - x_Iris.mean(axis=0)) / x_Iris.std(axis=0)
clean_datasets.append((x_Iris, y_Iris))
x_Phoneme, y_Phoneme = datasets[6][0], datasets[6][1] # Seeds
nonans = np.isnan(x_Phoneme).sum(1) == 0
x_Phoneme = x_Phoneme[nonans,:]
y_Phoneme = y_Phoneme[nonans]
x_Phoneme = (x_Phoneme - x_Phoneme.mean(axis=0)) / x_Phoneme.std(axis=0)
clean_datasets.append((x_Phoneme, y_Phoneme))
x_Seeds, y_Seeds = datasets[5][0], datasets[5][1] # Seeds
nonans = np.isnan(x_Seeds).sum(1) == 0
x_Seeds = x_Seeds[nonans,:]
y_Seeds = y_Seeds[nonans]
x_Seeds = (x_Seeds - x_Seeds.mean(axis=0)) / x_Seeds.std(axis=0)
clean_datasets.append((x_Seeds, y_Seeds))
x_Wine, y_Wine = datasets[7][0], datasets[7][1] # Seeds
nonans = np.isnan(x_Wine).sum(1) == 0
x_Wine = x_Wine[nonans,:]
y_Wine = y_Wine[nonans]
x_Wine = (x_Wine - x_Wine.mean(axis=0)) / x_Wine.std(axis=0)
clean_datasets.append((x_Wine, y_Wine))
meanshift_params = {"Banknote":{'bandwidth':1},
"Dermatology":{'bandwidth':5},
"Ecoli":{'bandwidth':2},
"Glass":{'bandwidth':2},
"Iris":{'bandwidth':2},
"Phoneme":{'bandwidth':12},
"Seeds":{'bandwidth':2},
"Wine":{'bandwidth':3}
}
dname = list(meanshift_params.keys())
meanshift_mi = list()
meanshift_ri = list()
for i, dataset in enumerate(clean_datasets):
X, y = dataset[0], dataset[1]
np.random.seed(1)
meanshift = MeanShift(**meanshift_params[dname[i]])
st = time.time()
meanshift.fit(X)
ami = metrics.adjusted_mutual_info_score(y.astype(int), meanshift.labels_.astype(int))
ari = metrics.adjusted_rand_score(y.astype(int), meanshift.labels_.astype(int))
meanshift_mi.append(ami)
meanshift_ri.append(ari)
dbscan_params = {"Banknote":{'eps':0.475, 'min_samples':5},
"Dermatology":{'eps':5.275, 'min_samples':5},
"Ecoli":{'eps':0.875, 'min_samples':5},
"Glass":{'eps':1.7, 'min_samples':5},
"Iris":{'eps':0.8, 'min_samples':5},
"Phoneme":{'eps':9.175, 'min_samples':10},
"Seeds":{'eps':0.875, 'min_samples':5},
"Wine":{'eps':2.425, 'min_samples':5},
}
dname = list(dbscan_params.keys())
dbscan_mi = list()
dbscan_ri = list()
for i, dataset in enumerate(clean_datasets):
X, y = dataset[0], dataset[1]
np.random.seed(1)
dbscan = DBSCAN(**dbscan_params[dname[i]])
st = time.time()
dbscan.fit(X)
ami = metrics.adjusted_mutual_info_score(y.astype(int), dbscan.labels_.astype(int))
ari = metrics.adjusted_rand_score(y.astype(int), dbscan.labels_.astype(int))
dbscan_mi.append(ami)
dbscan_ri.append(ari)
hdbscan_params = {"Banknote":{'min_cluster_size':16},
"Dermatology":{'min_cluster_size':5},
"Ecoli":{'min_cluster_size':9},
"Glass":{'min_cluster_size':8},
"Iris":{'min_cluster_size':9},
"Phoneme":{'min_cluster_size':3},
"Seeds":{'min_cluster_size':3},
"Wine":{'min_cluster_size':3},
}
dname = list(hdbscan_params.keys())
hdbscan_mi = list()
hdbscan_ri = list()
for i, dataset in enumerate(clean_datasets):
X, y = dataset[0], dataset[1]
np.random.seed(1)
_hdbscan = hdbscan.HDBSCAN(**hdbscan_params[dname[i]])
st = time.time()
_hdbscan.fit(X)
ami = metrics.adjusted_mutual_info_score(y.astype(int), _hdbscan.labels_.astype(int))
ari = metrics.adjusted_rand_score(y.astype(int), _hdbscan.labels_.astype(int))
hdbscan_mi.append(ami)
hdbscan_ri.append(ari)
quickshiftpp_params = {"Banknote":{'k':20, 'beta':0.7},
"Dermatology":{'k':20, 'beta':0.3},
"Ecoli":{'k':14, 'beta':0.3},
"Glass":{'k':17, 'beta':0.3},
"Iris":{'k':18, 'beta':0.3},
"Phoneme":{'k':245, 'beta':0.3},
"Seeds":{'k':21, 'beta':0.3},
"Wine":{'k':16, 'beta':0.3},
}
dname = list(quickshiftpp_params.keys())
quicks_mi = list()
quicks_ri = list()
for i, dataset in enumerate(clean_datasets):
X, y = dataset[0], dataset[1]
np.random.seed(1)
quicks = QuickshiftPP(**quickshiftpp_params[dname[i]])
st = time.time()
quicks.fit(X.copy(order='C'))
ami = metrics.adjusted_mutual_info_score(y.astype(int), quicks.memberships.astype(int))
ari = metrics.adjusted_rand_score(y.astype(int), quicks.memberships.astype(int))
quicks_mi.append(ami)
quicks_ri.append(ari)
clx_dist_params = {"Banknote":{'radius':0.21, 'group_merging':'distance', 'minPts':41, 'verbose':0},
"Dermatology":{'radius':0.4, 'group_merging':'distance','minPts':4, 'verbose':0},
"Ecoli":{'radius':0.3, 'group_merging':'distance', 'minPts':4, 'verbose':0},
"Glass":{'radius':0.725, 'group_merging':'distance', 'minPts':0, 'verbose':0},
"Iris":{'radius':0.225, 'group_merging':'distance', 'minPts':4, 'verbose':0},
"Phoneme":{'radius':0.445, 'group_merging':'distance', 'minPts':8, 'verbose':0},
"Seeds":{'radius':0.15, 'group_merging':'distance', 'minPts':9, 'verbose':0},
"Wine":{'radius':0.425, 'group_merging':'distance', 'minPts':4, 'verbose':0}
}
dname = list(clx_dist_params.keys())
clx_dis_mi = list()
clx_dis_ri = list()
for i, dataset in enumerate(clean_datasets):
X, y = dataset[0], dataset[1]
np.random.seed(1)
clx = CLASSIX(**clx_dist_params[dname[i]])
st = time.time()
clx.fit(X)
ari = metrics.adjusted_rand_score(y.astype(int), clx.labels_.astype(int))
ami = metrics.adjusted_mutual_info_score(y.astype(int), clx.labels_.astype(int))
clx_dis_ri.append(ari)
clx_dis_mi.append(ami)
clx_den_params = {"Banknote":{'radius':0.28, 'group_merging':'density', 'minPts':40, 'verbose':0},
"Dermatology":{'radius':0.6, 'group_merging':'density','minPts':4, 'verbose':0},
"Ecoli":{'radius':0.375, 'group_merging':'density', 'minPts':4, 'verbose':0},
"Glass":{'radius':0.62, 'group_merging':'density', 'minPts':0, 'verbose':0},
"Iris":{'radius':0.275, 'group_merging':'density', 'minPts':7, 'verbose':0},
"Phoneme":{'radius':1.425, 'group_merging':'density', 'minPts':460, 'verbose':0},
"Seeds":{'radius':0.25, 'group_merging':'density', 'minPts':7, 'verbose':0},
"Wine":{'radius':0.58, 'group_merging':'density', 'minPts':7, 'verbose':0}
}
dname = list(clx_den_params.keys())
clx_den_mi = list()
clx_den_ri = list()
for i, dataset in enumerate(clean_datasets):
X, y = dataset[0], dataset[1]
np.random.seed(1)
clx = CLASSIX(**clx_den_params[dname[i]])
st = time.time()
clx.fit(X)
ari = metrics.adjusted_rand_score(y.astype(int), clx.labels_.astype(int))
ami = metrics.adjusted_mutual_info_score(y.astype(int), clx.labels_.astype(int))
clx_den_ri.append(ari)
clx_den_mi.append(ami)
ri_df = pd.DataFrame()
mi_df = pd.DataFrame()
ri_df['dataset'] = dname
ri_df['meanshift'] = meanshift_ri
ri_df['dbscan'] = dbscan_ri
ri_df['hdbscan'] = hdbscan_ri
ri_df['quickshift++'] = quicks_ri
ri_df['classix (dis)'] = clx_dis_ri
ri_df['classix (den)'] = clx_den_ri
mi_df['dataset'] = dname
mi_df['meanshift'] = meanshift_mi
mi_df['dbscan'] = dbscan_mi
mi_df['hdbscan'] = hdbscan_mi
mi_df['quickshift++'] = quicks_mi
mi_df['classix (dis)'] = clx_dis_mi
mi_df['classix (den)'] = clx_den_mi
mi_df.to_csv("results/exp5/mi_real_cluster_quality.csv", index=False)
ri_df.to_csv("results/exp5/ri_real_cluster_quality.csv", index=False)
def kamil_industry_test():
X_pca = pd.read_csv("data/Kamil/PCA_clustering.csv").values
# preprocess data
final_len = 100000
outliers_position = np.where(X_pca[:,0] > 7.5)[0]
no_outliers_position = np.delete(np.arange(0, len(X_pca[:,0])), outliers_position, axis=0)
outlier_len = len(outliers_position)
data_no_outliers_length = int(final_len - outlier_len)
data_outliers = X_pca[outliers_position, :]
data_no_outliers = np.delete(X_pca, outliers_position, axis=0)
random_integers = np.arange(0, len(no_outliers_position))
np.random.shuffle(random_integers)
data_no_outliers_out = data_no_outliers[random_integers[data_no_outliers_length:],:]
data_no_outliers = data_no_outliers[random_integers[:data_no_outliers_length],:]
X = np.concatenate((data_no_outliers, data_outliers))
sample_size = 10
kamil_timing = []
sum_time = 0
for i in range(sample_size):
st = time.time()
dbscan = DBSCAN(eps=0.7, min_samples=6)
dbscan.fit(X)
et = time.time()
sum_time = sum_time + et - st
kamil_timing.append(sum_time/sample_size)
# print("Average consume time: ", sum_time/sample_size)
plt.figure(figsize=(24,10))
plt.scatter(X[:,0], X[:,1], c=dbscan.labels_, cmap='jet')
# cbar = plt.colorbar()
# cbar.ax.tick_params(labelsize=15)
plt.tick_params(axis='both', labelsize=28)
plt.savefig('results/exp5/DBSCAN_kamil.png', bbox_inches='tight')
# plt.show()
sum_time = 0
for i in range(sample_size):
st = time.time()
_hdbscan = hdbscan.HDBSCAN(min_cluster_size=1000, core_dist_n_jobs=1)
hdbscan_labels = _hdbscan.fit_predict(X)
et = time.time()
sum_time = sum_time + et - st
kamil_timing.append(sum_time/sample_size)
# print("Average consume time: ", sum_time/sample_size)
plt.figure(figsize=(24,10))
plt.scatter(X[:,0], X[:,1], c=hdbscan_labels, cmap='jet')
# cbar = plt.colorbar()
# cbar.ax.tick_params(labelsize=15)
plt.tick_params(axis='both', labelsize=28)
plt.savefig('results/exp5/HDBSCAN_kamil.png', bbox_inches='tight')
# plt.show()
sum_time = 0
for i in range(sample_size):
st = time.time()
quicks = QuickshiftPP(k=800, beta=0.7)
quicks.fit(X.copy(order='C'))
quicks_labels = quicks.memberships
et = time.time()
sum_time = sum_time + et - st
kamil_timing.append(sum_time/sample_size)
# print("Average consume time: ", sum_time/sample_size)
plt.figure(figsize=(24,10))
plt.scatter(X[:,0], X[:,1], c=quicks_labels, cmap='jet')
# cbar = plt.colorbar()
# cbar.ax.tick_params(labelsize=15)
plt.tick_params(axis='both', labelsize=28)
plt.savefig('results/exp5/Quickshiftpp_kamil.png', bbox_inches='tight')
# plt.show()
sum_time = 0
for i in range(sample_size):
st = time.time()
clx = CLASSIX(sorting='pca', radius=0.3, verbose=0, group_merging='distance')
clx.fit_transform(X)
et = time.time()
sum_time = sum_time + et - st
kamil_timing.append(sum_time/sample_size)
# print("Average consume time: ", sum_time/sample_size)
plt.figure(figsize=(24,10))
plt.scatter(X[:,0], X[:,1], c=clx.labels_, cmap='jet')
# cbar = plt.colorbar()
# cbar.ax.tick_params(labelsize=15)
plt.tick_params(axis='both', labelsize=28)
plt.savefig('results/exp5/CLASSIX_kamil.png', bbox_inches='tight')
# plt.show()
kamil_timing = | pd.DataFrame(kamil_timing) | pandas.DataFrame |
# Robust Bayesian Binary logistic regression in 1d for iris flowers
# Code is based on
# https://github.com/aloctavodia/BAP/blob/master/code/Chp4/04_Generalizing_linear_models.ipynb
import superimport
import pymc3 as pm
import numpy as np
import pandas as pd
import theano.tensor as tt
#import seaborn as sns
import scipy.stats as stats
from scipy.special import expit as logistic
import matplotlib.pyplot as plt
import arviz as az
from sklearn.datasets import load_iris
import pyprobml_utils as pml
iris = load_iris()
X = iris.data
y = iris.target
# Convert to pandas dataframe
df_iris = pd.DataFrame(data=iris.data,
columns=['sepal_length', 'sepal_width',
'petal_length', 'petal_width'])
df_iris['species'] = pd.Series(iris.target_names[y], dtype='category')
df = df_iris.query("species == ('setosa', 'versicolor')")
y_0 = | pd.Categorical(df['species']) | pandas.Categorical |
import pandas as pd
import numpy as np
import requests
import random
import urllib
import json
import time
import sys
import datetime
from datetime import date
from bs4 import BeautifulSoup
from selenium import webdriver
from discourse_ordering import DiscourseOrderingClass
from twitter_api import TwitterClass
import os
class HelperClassTempo:
"""
Classe de métodos auxiliares
"""
def __init__(self):
# mapeamento de meses
self.dict_map_mes = {1: 'janeiro',
2: 'fevereiro',
3: 'março',
4: 'abril',
5: 'maio',
6: 'junho',
7: 'julho',
8: 'agosto',
9: 'setembro',
10: 'outubro',
11: 'novembro',
12: 'dezembro'
}
# dia atual
print (self.get_dia_atual())
# path atual
self.current_path = str(os.getcwd())
# path do chromedriver
self.path_to_chromedriver = os.path.join(self.current_path, 'chromedriver')
# API do Twitter
self.twitter_api = TwitterClass()
# arquivos auxiliares
self.path_infos_cidades = os.path.join(self.current_path, "cidades.csv")
self.path_bd = os.path.join(self.current_path, "cidades_bd.csv")
path_credenciais_user_agent = os.path.join(self.current_path, "credenciais_user_agent.json")
path_intents = os.path.join(self.current_path, "intents.json")
path_analisador_lexico = os.path.join(self.current_path, "analisador_lexico.json")
self.discourse_ordering_object = DiscourseOrderingClass()
# leitura do arquivo json com as credenciais
try:
f = open(path_credenciais_user_agent, mode="r")
infos_login = json.load(f)
self.user_agent = infos_login['user_agent']
f.close()
except:
self.user_agent = "temporary_agent"
# leitura do arquivo json com os intents
f = open(path_intents, encoding='utf-8', mode="r")
self.dict_intents = json.load(f)
f.close()
# leitura do arquivo json com o analisador léxico
f = open(path_analisador_lexico, mode="r")
dict_json_lexico = json.load(f)
f.close()
self.dict_analisador_lexico = {}
self.lista_palavras_analisador_lexico = set()
for chave, valor in dict_json_lexico.items():
palavra = chave.split(']')[1].strip()
numero = chave.split('numero=')[1].split(',')[0].strip()
genero = chave.split('genero=')[1].split(']')[0].strip()
chave = f"{palavra}|{numero}|{genero}"
self.dict_analisador_lexico[chave] = valor.strip()
self.lista_palavras_analisador_lexico.add(palavra)
self.lista_palavras_analisador_lexico = list(self.lista_palavras_analisador_lexico)
# parametros do webdriver
self.chromeOptions = webdriver.ChromeOptions()
self.chromeOptions.add_argument('--no-sandbox')
self.chromeOptions.add_argument("--headless")
self.chromeOptions.add_argument(f"user-agent={self.user_agent}")
# parâmetros
self.url_tabua_mares = "https://www.tideschart.com"
self.tempo_espera_tweet_segundos = 60
self.qtd_cidades_selecionadas = 15
self.qtd_min_dias_consecutivos = 10
self.multiplicador_std = 1.3
self.altura_mare_ruim = 1.6
self.filler_tempo = 'céu nublado'
self.modulo = 'tempo'
# temperaturas max e min possíveis (validação conceitual)
self.maior_temperatura_possivel = 55
self.menor_temperatura_possivel = -10
# icones
self.icone_up = '▲'
self.icone_down = '▼'
# df cidades
self.df_cidades = pd.read_csv(self.path_infos_cidades, encoding='latin-1', sep=';')
# colunas para atribuir valor
self.lista_colunas_tempo = ['cidade',
'uf',
'tempo',
'temperatura',
'temperatura_max',
'temperatura_min',
'nebulosidade',
'umidade',
'vento',
'horario_por_sol',
'pesca',
'melhor_horario_pesca',
'altura_maior_onda',
'texto_onda',
'url_imagem']
# colunas para atribuir valor
self.lista_colunas_salvar = ['cidade',
'uf',
'tempo',
'temperatura',
'temperatura_max',
'temperatura_min',
'nebulosidade',
'umidade',
'vento',
'horario_por_sol',
'pesca',
'melhor_horario_pesca',
'altura_maior_onda',
'data']
# se não existe arquivo de bd, cria
if not os.path.exists(self.path_bd):
pd.DataFrame(columns=self.lista_colunas_salvar).to_csv(self.path_bd, sep=';', index=False)
# colunas de clima
f = open("mapeamento_climas.json", mode="r", encoding="utf-8")
self.dict_map_clima = json.load(f)
f.close()
# colunas de pesca
self.dict_map_pesca = {'Today is an excellent fishing day': 'Excelente',
'Today is a good fishing day': 'Bom',
'Today is an average fishing day': 'Mediano'}
# cidades
self.lista_cidades_em = ['Rio de Janeiro']
# path dos conteúdos do site
self.path_url_imagem = '//*[@id="map"]'
self.path_tempo1 = '//*[@id="main-content"]/div/div/div/div[1]/div[7]/div[1]/div/div[1]'
self.path_temperatura1 = '//*[@id="main-content"]/div/div/div/div[1]/div[7]/div[2]/div/div[1]'
self.path_temperatura_max_min1 = '//*[@id="main-content"]/div/div/div/div[1]/div[7]/div[2]/div/div[2]'
self.path_nebulosidade1 = '//*[@id="main-content"]/div/div/div/div[1]/div[7]/div[1]/div/div[2]'
self.path_umidade1 = '//*[@id="main-content"]/div/div/div/div[1]/div[7]/div[4]/div/div[1]'
self.path_vento1 = '//*[@id="main-content"]/div/div/div/div[1]/div[7]/div[3]/div/div[1]'
self.path_tempo2 = '//*[@id="main-content"]/div/div/div/div[1]/div[4]/div[1]/div/div[1]'
self.path_temperatura2 = '//*[@id="main-content"]/div/div/div/div[1]/div[4]/div[2]/div/div[1]'
self.path_temperatura_max_min2 = '//*[@id="main-content"]/div/div/div/div[1]/div[4]/div[2]/div/div[2]'
self.path_nebulosidade2 = '//*[@id="main-content"]/div/div/div/div[1]/div[4]/div[1]/div/div[2]'
self.path_umidade2 = '//*[@id="main-content"]/div/div/div/div[1]/div[4]/div[4]/div/div[1]'
self.path_vento2 = '//*[@id="main-content"]/div/div/div/div[1]/div[4]/div[3]/div/div[1]'
self.path_situacao_pesca1 = '//*[@id="main-content"]/div/div/div/div[1]/div[5]/div/h2/span'
self.path_situacao_pesca2 = '//*[@id="main-content"]/div/div/div/div[1]/div[3]/div/h2/span'
self.path_horario_por_sol = '//*[@id="main-content"]/div/div/div/div[1]/div[3]/div/div/table/tbody/tr[1]/td[7]'
self.path_melhor_horario_pesca1 = '//*[@id="main-content"]/div/div/div/div[1]/div[5]/div/div/div[1]/div/h4'
self.path_melhor_horario_pesca2 = '//*[@id="main-content"]/div/div/div/div[1]/div[5]/div/div/div[1]/div/h4[2]'
self.path_por_sol = '//*[@id="main-content"]/div/div/div/div[1]/div[3]/div/div/table/tbody/tr[1]/td[7]'
self.path_mare1 = '//*[@id="main-content"]/div/div/div/div[1]/div[3]/div/div/table/tbody/tr[1]/td[2]'
self.path_mare2 = '//*[@id="main-content"]/div/div/div/div[1]/div[3]/div/div/table/tbody/tr[2]/td[3]'
self.path_mare3 = '//*[@id="main-content"]/div/div/div/div[1]/div[3]/div/div/table/tbody/tr[1]/td[4]'
self.path_mare4 = '//*[@id="main-content"]/div/div/div/div[1]/div[3]/div/div/table/tbody/tr[1]/td[5]'
def get_dia_atual(self):
'''
data de hoje
'''
# data de hoje
dia = date.today().strftime("%d")
mes = self.dict_map_mes[int(date.today().strftime("%m"))]
ano = date.today().strftime("%Y")
return f"{dia} de {mes} de {ano}"
def trata_mare(self, elemento):
'''
trata maré
'''
horario = (elemento.split('m')[0] + 'm').strip()
altura = float(elemento.split(' ')[1].strip())
simbolo = elemento.split(' ')[0][-1].strip()
if simbolo == self.icone_up:
simbolo = 'up'
else:
simbolo = 'down'
return (horario, altura, simbolo)
def get_analisador_lexico(self, palavra, numero, genero):
'''
Retorna início do texto para publicação
'''
return (self.dict_analisador_lexico[f"{palavra}|{numero}|{genero}"])
def count_dias(self, data_hoje, lista_datas):
'''
retorna qtd de dias consecutivos
'''
qtd_dias = 0
data = data_hoje
while (True):
data = data - datetime.timedelta(1)
if data.strftime("%d/%m/%Y") in lista_datas:
qtd_dias+=1
else:
break
return qtd_dias
def count_valor(self, valor_hoje, lista_valores, tipo_operacao):
'''
retorna qtd de dias onde o valor satisfaz determinada condição
'''
qtd_dias = 0
if tipo_operacao == 'maior':
for valor in lista_valores:
if (valor_hoje > valor):
qtd_dias +=1
else:
break
elif tipo_operacao == 'menor':
for valor in lista_valores:
if (valor_hoje < valor):
qtd_dias +=1
else:
break
else:
return 0
# compara com mínimo
if (qtd_dias < self.qtd_min_dias_consecutivos):
return 0
# retorna qtd de dias
return qtd_dias
def gera_df_tabua_mares(self):
'''
Gera resultados dos climas
'''
lista_infos = []
# itera cidades
for index, row in self.df_cidades.iterrows():
try:
cidade = row['Cidade']
uf = row['UF']
valor = row['Tabua_mares']
# cria urls
url_dia = f"{self.url_tabua_mares}/{valor}"
# entra na url
driver = webdriver.Chrome(self.path_to_chromedriver, options=self.chromeOptions)
driver.get(url_dia)
time.sleep(2)
# leitura do conteúdo
try:
tempo = driver.find_element_by_xpath(self.path_tempo1).text
except:
try:
tempo = driver.find_element_by_xpath(self.path_tempo2).text
except Exception as e:
print (f'Erro na cidade {cidade}! {e}')
continue
try:
temperatura = int((driver.find_element_by_xpath(self.path_temperatura1).text).split('°C')[0])
except:
try:
temperatura = int((driver.find_element_by_xpath(self.path_temperatura2).text).split('°C')[0])
except Exception as e:
print (f'Erro na cidade {cidade}! {e}')
continue
try:
temperatura_max_min = driver.find_element_by_xpath(self.path_temperatura_max_min1).text
except:
try:
temperatura_max_min = driver.find_element_by_xpath(self.path_temperatura_max_min2).text
except Exception as e:
print (f'Erro na cidade {cidade}! {e}')
continue
try:
nebulosidade = int(driver.find_element_by_xpath(self.path_nebulosidade1).text.split('Cloud cover ')[1].split('%')[0])
except:
try:
nebulosidade = int(driver.find_element_by_xpath(self.path_nebulosidade2).text.split('Cloud cover ')[1].split('%')[0])
except Exception as e:
print (f'Erro na cidade {cidade}! {e}')
continue
try:
umidade = int(driver.find_element_by_xpath(self.path_umidade1).text.split('%')[0])
except:
try:
umidade = int(driver.find_element_by_xpath(self.path_umidade2).text.split('%')[0])
except Exception as e:
print (f'Erro na cidade {cidade}! {e}')
continue
try:
vento = int(driver.find_element_by_xpath(self.path_vento1).text.split(' ')[0])
except:
try:
vento = int(driver.find_element_by_xpath(self.path_vento2).text.split(' ')[0])
except Exception as e:
print (f'Erro na cidade {cidade}! {e}')
continue
try:
pesca = driver.find_element_by_xpath(self.path_situacao_pesca1).text
except:
try:
pesca = driver.find_element_by_xpath(self.path_situacao_pesca2).text
except Exception as e:
print (f'Erro na cidade {cidade}! {e}')
continue
# melhor horário de pesca
try:
melhor_horario_pesca = driver.find_element_by_xpath(self.path_melhor_horario_pesca2).text.split('From ')[1].replace(' to ', ' - ')
except:
try:
melhor_horario_pesca = driver.find_element_by_xpath(self.path_melhor_horario_pesca1).text.split('From ')[1].replace(' to ', ' - ')
except Exception as e:
melhor_horario_pesca = ''
# trata melhor horário para pescar
if melhor_horario_pesca != '':
try:
horario1, horario2 = melhor_horario_pesca.split("-")
horario1 = horario1.strip()
horario2 = horario2.strip()
# horario1
if "am" in horario1:
horario1 = horario1.split("am")[0]
elif "pm" in horario1:
horario1 = horario1.split("pm")[0]
hora, minuto = horario1.split(":")
hora = str(int(hora) + 12)
if int(hora) == 12:
hora = "0"
horario1 = f"{hora}:{minuto}"
# horario2
if "am" in horario2:
horario2 = horario2.split("am")[0]
elif "pm" in horario2:
horario2 = horario2.split("pm")[0]
hora, minuto = horario2.split(":")
hora = str(int(hora) + 12)
if int(hora) == 12:
hora = "0"
horario2 = f"{hora}:{minuto}"
melhor_horario_pesca = f"{horario1} - {horario2}"
except:
pass
# por do sol
horario_por_sol = str(driver.find_element_by_xpath(self.path_horario_por_sol).text).split(' ')[1].strip()
if "am" in horario_por_sol:
horario_por_sol = horario_por_sol.split("am")[0]
elif "pm" in horario_por_sol:
horario_por_sol = horario_por_sol.split("pm")[0]
hora, minuto = horario_por_sol.split(":")
hora = str(int(hora) + 12)
horario_por_sol = f"{hora}:{minuto}"
# marés
try:
horario_mare_1, altura_mare_1, simbolo_mare_1 = self.trata_mare(driver.find_element_by_xpath(self.path_mare1).text)
except:
horario_mare_1 = ""
altura_mare_1 = -1
simbolo_mare_1 = ""
try:
horario_mare_2, altura_mare_2, simbolo_mare_2 = self.trata_mare(driver.find_element_by_xpath(self.path_mare2).text)
except:
horario_mare_2 = ""
altura_mare_2 = -1
simbolo_mare_2 = ""
try:
horario_mare_3, altura_mare_3, simbolo_mare_3 = self.trata_mare(driver.find_element_by_xpath(self.path_mare3).text)
except:
horario_mare_3 = ""
altura_mare_3 = -1
simbolo_mare_3 = ""
try:
horario_mare_4, altura_mare_4, simbolo_mare_4 = self.trata_mare(driver.find_element_by_xpath(self.path_mare4).text)
except:
horario_mare_4 = ""
altura_mare_4 = -1
simbolo_mare_4 = ""
# altura da maior onda
altura_maior_onda = str(max(altura_mare_1, altura_mare_2, altura_mare_3, altura_mare_4))
# tratamento dos campos
altura_maior_onda = str(altura_maior_onda).replace('.',',')
altura_mare_1 = str(altura_mare_1).replace('.',',')
altura_mare_2 = str(altura_mare_2).replace('.',',')
altura_mare_3 = str(altura_mare_3).replace('.',',')
altura_mare_4 = str(altura_mare_4).replace('.',',')
# texto onda
if (simbolo_mare_1 == ""):
continue
elif (simbolo_mare_1 == "up" and simbolo_mare_2 == ""):
texto_onda = f"Horário de maré alta: {horario_mare_1} ({altura_mare_1}m)"
elif (simbolo_mare_1 == "down" and simbolo_mare_2 == ""):
texto_onda = f"Horário de maré baixa: {horario_mare_1} ({altura_mare_1}m)"
elif (simbolo_mare_1 == 'up' and simbolo_mare_4 != ""):
texto_onda = f"Horários de maré alta: {horario_mare_1} ({altura_mare_1}m) e {horario_mare_3} ({altura_mare_3}m).\nHorários de maré baixa: {horario_mare_2} ({altura_mare_2}m) e {horario_mare_4} ({altura_mare_4}m)"
elif (simbolo_mare_1 == 'up' and simbolo_mare_4 == ""):
texto_onda = f"Horários de maré alta: {horario_mare_1} ({altura_mare_1}m) e {horario_mare_3} ({altura_mare_3}m).\nHorário de maré baixa: {horario_mare_2} ({altura_mare_2}m)"
elif (simbolo_mare_1 == 'down' and simbolo_mare_4 != ""):
texto_onda = f"Horário de maré alta: {horario_mare_2} ({altura_mare_2}m) e {horario_mare_4} ({altura_mare_4}m).\nHorários de maré baixa: {horario_mare_1} ({altura_mare_1}m) e {horario_mare_3} ({altura_mare_3}m)"
elif (simbolo_mare_1 == 'down' and simbolo_mare_4 == ""):
texto_onda = f"Horário de maré alta: {horario_mare_2} ({altura_mare_2}m).\nHorários de maré baixa: {horario_mare_1} ({altura_mare_1}m) e {horario_mare_3} ({altura_mare_3}m)"
else:
continue
# temperatura max
temperatura_max = int(temperatura_max_min.split('Min ')[1].split('°')[0])
# temperatura min
temperatura_min = int(temperatura_max_min.split('Max ')[1].split('°')[0])
# validação conceitual de temperaturas
maior_valor_temperatura = max([temperatura, temperatura_max, temperatura_min])
menor_valor_temperatura = min([temperatura, temperatura_max, temperatura_min])
# se estiver fora da range, ignora a cidade e continua o processo
if (maior_valor_temperatura > self.maior_temperatura_possivel or menor_valor_temperatura < self.menor_temperatura_possivel):
continue
# mapeia tempo
if tempo in self.dict_map_clima.keys():
try:
tempo = self.dict_map_clima[tempo]
except:
tempo = self.filler_tempo
else:
tempo = self.filler_tempo
# mapeia pesca
if pesca in self.dict_map_pesca.keys():
try:
pesca = self.dict_map_pesca[pesca]
except:
pesca = ''
else:
pesca = ''
# imagem adicional do litoral da cidade
url_imagem = driver.find_element_by_xpath(self.path_url_imagem).get_attribute("src")
# salva lista
lista_infos.append([cidade,
uf,
tempo,
temperatura,
temperatura_max,
temperatura_min,
nebulosidade,
umidade,
vento,
horario_por_sol,
pesca,
melhor_horario_pesca,
altura_maior_onda,
texto_onda,
url_imagem])
# erro de execução
except Exception as e:
print (f'Erro na cidade {cidade}! {e}')
continue
# fecha o driver
driver.close()
# cria o dataframe
try:
df_infos = pd.DataFrame(lista_infos,
columns=self.lista_colunas_tempo)
# tratamentos adicionais
df_infos['temperatura_max'] = df_infos[["temperatura", "temperatura_max", "temperatura_min"]].max(axis=1)
df_infos['temperatura_min'] = df_infos[["temperatura", "temperatura_max", "temperatura_min"]].min(axis=1)
# transforma tudo em string
for column in df_infos.columns.tolist():
df_infos[column] = df_infos[column].astype(str)
# inclui data de hoje
df_infos['data'] = date.today().strftime("%d/%m/%Y")
# retorna resultados
return df_infos
# erro de execução
except Exception as e:
print (f'Erro no fim do processo! {e}')
sys.exit(0)
def seleciona_conteudo_publicar(self, df_resultados):
'''
Seleciona conteúdo para publicar, de acordo com a estratégia implementada
'''
try:
# estratéga de seleção de conteúdo
max_observacoes = min(len(df_resultados), self.qtd_cidades_selecionadas)
df_selecionados = df_resultados.sample(max_observacoes)
# retorna resultados selecionados
return df_selecionados
except:
sys.exit(0)
def atribui_intent(self, df_linha):
'''
retorna intent com base no conteúdo
'''
try:
# campos principais
cidade = df_linha['cidade']
uf = df_linha['uf']
tempo = df_linha['tempo']
temperatura = int(df_linha['temperatura'])
temperatura_max = int(df_linha['temperatura_max'])
temperatura_min = int(df_linha['temperatura_min'])
nebulosidade = int(df_linha['nebulosidade'])
umidade = int(df_linha['umidade'])
vento = int(df_linha['vento'])
pesca = df_linha['pesca']
altura_maior_onda = df_linha['altura_maior_onda']
# flag
qtd_dias_temperatura_max = int(df_linha["qtd_dias_temperatura_max"])
qtd_dias_temperatura_min = int(df_linha["qtd_dias_temperatura_min"])
qtd_dias_nebulosidade_max = int(df_linha["qtd_dias_nebulosidade_max"])
qtd_dias_umidade_max = int(df_linha["qtd_dias_umidade_max"])
qtd_dias_vento_max = int(df_linha["qtd_dias_vento_max"])
qtd_dias_onda_max = int(df_linha["qtd_dias_onda_max"])
flag_outlier_temperatura = int(df_linha["flag_outlier_temperatura"])
##################
# Valores outlier
##################
# temperatura maxima
if qtd_dias_temperatura_max > 0:
return "TEMPERATURA_MAXIMA"
# temperatura minima
if qtd_dias_temperatura_min > 0:
return "TEMPERATURA_MINIMA"
# nebulosidade maxima
if qtd_dias_nebulosidade_max > 0:
return "NEBULOSIDADE_MAXIMA"
# umidade maxima
if qtd_dias_umidade_max > 0:
return "UMIDADE_MAXIMA"
# vento maxima
if qtd_dias_vento_max > 0:
return "VENTO_MAXIMA"
if qtd_dias_onda_max > 0:
return "ONDA_MAXIMA"
# muito bom para pescar
if pesca == "Excelente" and melhor_horario_pesca != '':
return "BOM_PESCAR"
# mar não está para peixes
if (pesca not in ["Excelente", "Bom"] and float(altura_maior_onda.replace(",",".")) > self.altura_mare_ruim):
return "MAR_NAO_ESTA_PARA_PEIXES"
# sol ultravioleta
if temperatura > 32:
return "SOL_ULTRAVIOLETA"
if flag_outlier_temperatura == 1:
return "TEMPERATURA_ALTA_OUTLIER"
if flag_outlier_temperatura == -1:
return "TEMPERATURA_BAIXA_OUTLIER"
##################
# Valores altos
##################
# umidade alta
if umidade >= 95:
return "UMIDADE_ALTA"
# nebulosidade alta
if nebulosidade >= 90:
return "NEBULOSIDADE_ALTA"
# vento alto
if vento >= 32:
return "VENTO_ALTO"
if (temperatura == temperatura_max and temperatura_max == temperatura_min):
return "TEMP_IGUAIS"
# pesca incorreta
if melhor_horario_pesca == '':
return 'NORMAL_PESCA_INCORRETA'
# se não caiu em nenhum caso, retorna comportamento usual
return 'NORMAL'
except Exception as e:
print (f'Erro! {e}')
return ""
def atribui_template(self, df_linha, intent):
'''
Retorna template
'''
print (f'Intent: {intent}')
try:
# campos principais
cidade = df_linha['cidade']
uf = df_linha['uf']
tempo = df_linha['tempo']
temperatura = df_linha['temperatura']
temperatura_max = df_linha['temperatura_max']
temperatura_min = df_linha['temperatura_min']
nebulosidade = df_linha['nebulosidade']
umidade = df_linha['umidade']
vento = df_linha['vento']
pesca = df_linha['pesca']
melhor_horario_pesca = df_linha['melhor_horario_pesca']
horario_por_sol = df_linha['horario_por_sol']
altura_maior_onda = df_linha['altura_maior_onda']
texto_onda = df_linha['texto_onda']
# flags
qtd_dias_consecutivos = df_linha['qtd_dias_consecutivos']
qtd_dias_temperatura_max = df_linha['qtd_dias_temperatura_max']
qtd_dias_temperatura_min = df_linha['qtd_dias_temperatura_min']
qtd_dias_nebulosidade_max = df_linha['qtd_dias_nebulosidade_max']
qtd_dias_umidade_max = df_linha['qtd_dias_umidade_max']
qtd_dias_vento_max = df_linha['qtd_dias_vento_max']
qtd_dias_onda_max = df_linha['qtd_dias_onda_max']
# início do texto
if cidade in self.lista_cidades_em:
numero = "singular"
genero = "masculino"
else:
numero = "singular"
genero = "neutro"
# lista de textos possíveis para cada intent
lista_possibilidades = self.dict_intents[intent]
# seleciona texto
texto_selecionado = random.choice(lista_possibilidades).strip()
# dicionário para substituição de campos
dicionario_map = {
"[cidade]":cidade,
"[uf]":uf,
"[tempo]":tempo,
"[temperatura]":temperatura,
"[temperatura_max]":temperatura_max,
"[temperatura_min]":temperatura_min,
"[nebulosidade]":nebulosidade,
"[umidade]":umidade,
"[vento]":vento,
"[pesca]":pesca,
"[melhor_horario_pesca]":melhor_horario_pesca,
"[horario_por_sol]":horario_por_sol,
"[texto_onda]":texto_onda,
"[altura_maior_onda]":altura_maior_onda,
"[qtd_dias_temperatura_max]":qtd_dias_temperatura_max,
"[qtd_dias_temperatura_min]":qtd_dias_temperatura_min,
"[qtd_dias_nebulosidade_max]":qtd_dias_nebulosidade_max,
"[qtd_dias_umidade_max]":qtd_dias_umidade_max,
"[qtd_dias_vento_max]":qtd_dias_vento_max,
"[qtd_dias_onda_max]":qtd_dias_onda_max
}
# aplica substituições no template
for key, value in dicionario_map.items():
texto_selecionado = texto_selecionado.replace(key, value)
# aplica analisador léxico
for palavra in self.lista_palavras_analisador_lexico:
valor = self.get_analisador_lexico(palavra, numero, genero)
texto_selecionado = texto_selecionado.replace(f"[{palavra}]", valor)
# atribui ordenação do discurso (discourse ordering)
texto_selecionado = self.discourse_ordering_object.discourse_ordering(intent, texto_selecionado)
# adiciona pós-processamentos ao tweet
tweet = f"{self.twitter_api.get_inicio_post()}{texto_selecionado.strip()}{self.twitter_api.get_fim_post()}"
return tweet
except Exception as e:
print (f'Erro! {e}')
return ""
def popula_estatisticas(self, df_selecionados, df_atual):
'''
popula df selecionado com estatísticas envolendo df atual
'''
data_hoje = date.today()
lista_cidades = list(set(df_selecionados['cidade']))
lista_retorno = []
# se df estiver vazio...
if (len(df_atual) == 0):
# itera cidades
for cidade in lista_cidades:
lista_valores = [cidade, 0, 0, 0, 0, 0, 0, 0, 0]
lista_retorno.append(lista_valores)
# se não estiver vazio...
else:
df_atual['data_formatada'] = pd.to_datetime(df_atual['data'])
df_passado = df_atual.loc[(df_atual['data_formatada'] < pd.to_datetime(data_hoje))]
# itera cidades
for cidade in lista_cidades:
df_cidade = df_passado.loc[(df_passado['cidade'] == cidade)]
lista_datas = set(df_cidade['data'])
qtd_dias = self.count_dias(data_hoje, lista_datas)
df_valores_cidade = df_selecionados.loc[(df_selecionados['cidade'] == cidade)]
# se poucos dias, retorna 0
if (qtd_dias < self.qtd_min_dias_consecutivos):
lista_valores = [cidade, 0, 0, 0, 0, 0, 0, 0, 0]
lista_retorno.append(lista_valores)
continue
# filtra datas consecutivas
data_ultima = data_hoje - datetime.timedelta(qtd_dias)
data_ultima = data_ultima.strftime("%d/%m/%Y")
df_cidade = df_cidade.loc[df_cidade['data_formatada'] >= data_ultima].sort_values(by=['data_formatada'], ascending=False)
# valores computados
qtd_dias_temperatura_max = self.count_valor(float(df_valores_cidade['temperatura']), df_cidade['temperatura'].values.tolist(), 'maior')
qtd_dias_temperatura_min = self.count_valor(float(df_valores_cidade['temperatura']), df_cidade['temperatura'].values.tolist(), 'menor')
qtd_dias_nebulosidade_max = self.count_valor(float(df_valores_cidade['nebulosidade']), df_cidade['nebulosidade'].values.tolist(), 'maior')
qtd_dias_umidade_max = self.count_valor(float(df_valores_cidade['umidade']), df_cidade['umidade'].values.tolist(), 'maior')
qtd_dias_vento_max = self.count_valor(float(df_valores_cidade['vento']), df_cidade['vento'].values.tolist(), 'maior')
qtd_dias_onda_max = self.count_valor(float(df_valores_cidade['altura_maior_onda']), df_cidade['altura_maior_onda'].values.tolist(), 'maior')
# outliers
temperatura_media = df_cidade['temperatura'].mean()
temperatura_std = df_cidade['temperatura'].std()
temp_max = temperatura_media + (temperatura_std * self.multiplicador_std)
temp_min = temperatura_media - (temperatura_std * self.multiplicador_std)
if (df_valores_cidade['temperatura'] > temp_max):
flag_outlier_temperatura = 1
elif (df_valores_cidade['temperatura'] < temp_min):
flag_outlier_temperatura = -1
else:
flag_outlier_temperatura = 0
lista_valores = [cidade,
qtd_dias,
qtd_dias_temperatura_max,
qtd_dias_temperatura_min,
qtd_dias_nebulosidade_max,
qtd_dias_umidade_max,
qtd_dias_vento_max,
qtd_dias_onda_max,
flag_outlier_temperatura]
lista_retorno.append(lista_valores)
# estatísticas
df_estatisticas = pd.DataFrame(lista_retorno, columns=['cidade',
'qtd_dias_consecutivos',
'qtd_dias_temperatura_max',
'qtd_dias_temperatura_min',
'qtd_dias_nebulosidade_max',
'qtd_dias_umidade_max',
'qtd_dias_vento_max',
'qtd_dias_onda_max',
'flag_outlier_temperatura'])
# transforma dados em string
df_estatisticas['qtd_dias_consecutivos'] = df_estatisticas['qtd_dias_consecutivos'].astype(str)
df_estatisticas['qtd_dias_temperatura_max'] = df_estatisticas['qtd_dias_temperatura_max'].astype(str)
df_estatisticas['qtd_dias_temperatura_min'] = df_estatisticas['qtd_dias_temperatura_min'].astype(str)
df_estatisticas['qtd_dias_nebulosidade_max'] = df_estatisticas['qtd_dias_nebulosidade_max'].astype(str)
df_estatisticas['qtd_dias_umidade_max'] = df_estatisticas['qtd_dias_umidade_max'].astype(str)
df_estatisticas['qtd_dias_vento_max'] = df_estatisticas['qtd_dias_vento_max'].astype(str)
df_estatisticas['qtd_dias_onda_max'] = df_estatisticas['qtd_dias_onda_max'].astype(str)
df_estatisticas['flag_outlier_temperatura'] = df_estatisticas['flag_outlier_temperatura'].astype(str)
return df_estatisticas
def publica_conteudo(self):
'''
Publica previsão do tempo (tábua de marés)
'''
# data de hoje
data_hoje = date.today().strftime("%d_%m_%Y")
try:
# gera resultados
df_resultados = self.gera_df_tabua_mares()
# salva resultados na pasta
df_resultados[self.lista_colunas_salvar].to_csv(f"resultados_cidades/{data_hoje}.csv", index=False, sep=';')
# adiciona dados da rodada ao BD
df_atual = pd.read_csv(self.path_bd, sep=';')
df_atual = df_atual.loc[(df_atual['data'] != date.today().strftime("%d/%m/%Y")) & (~df_atual['cidade'].isnull())]
df_novo = df_atual.append(df_resultados[self.lista_colunas_salvar])
df_novo.to_csv(self.path_bd, index=False, sep=';')
# filtra dados para publicação
df_selecionados = self.seleciona_conteudo_publicar(df_resultados)
except:
sys.exit(0)
# se não encontrar nada para publicar, encerra execução
try:
if (len(df_selecionados) == 0):
sys.exit(0)
except:
sys.exit(0)
# popula resultados com estatísticas
df_estatisticas = self.popula_estatisticas(df_selecionados, df_atual)
# junta tabelas
df_selecionados = | pd.merge(df_selecionados, df_estatisticas, how='left', on='cidade') | pandas.merge |
import glob
import os
import re
import pandas as pd
import numpy as np
from collections import Counter
path = "files/"
files = glob.glob(path+"*.txt")
# first we need a list of all words in all files.
finalDataframe = pd.DataFrame()
for file in files:
with open(file, mode="r") as f:
data = f.read()
# Split data into array of words, non case sensitive
word = re.split(r"\W+", data, flags=re.IGNORECASE)
# Remove withe spaces and empty strings
cleanWords = [line for line in [l.strip() for l in word] if line]
# Remove duplicates, we don't want them
words = list(set(cleanWords))
# Add data into dictionary
dictionary = {"filename":file, "values":pd.Series(words)}
finalDataframe = finalDataframe.append(pd.DataFrame(dictionary))
# list of words in total
wordstmp = finalDataframe['values']
dic2 = { "words" : wordstmp }
df1 = pd.DataFrame(dic2)
df1 = df1.set_index("words")
sss2 = [finalDataframe['filename']=='files/file2.txt']
sss1 = [finalDataframe['filename']=='files/file1.txt']
sss1[0]
dd1=list(sss1[0])
df1['file1']= dd1
dd2=list(sss2[0])
df1['file2']=dd2
finalDataframe['file1']=dd1
finalDataframe['file2']=dd2
test1=pd.pivot_table(finalDataframe, values=['file1','file2'],index=['values'])
test3 = | pd.crosstab(finalDataframe['values'], finalDataframe['filename'], margins=True) | pandas.crosstab |
# ---------------------------------------------------------------------------------------------
# MIT License
# Copyright (c) 2020, Solace Corporation, <NAME> (<EMAIL>)
# ---------------------------------------------------------------------------------------------
import array as arr
import json
from .broker_series import BrokerSeries
from .common_base import CommonBase
from .constants import *
from ._constants import *
from .latency_broker_latency_series import LatencyBrokerLatencySeries
from .latency_node_latency_series import LatencyNodeLatencySeries
from .ping_series import PingSeries
from .run_meta import RunMeta
from .run import Run
import numpy as np
import pandas as pd
CHECK_PASSING_MD="**<span style='color:green'>passing</span>**"
CHECK_FAILING_MD="**<span style='color:red'>failing</span>**"
d_latency_percentile = {
# k_latency_00_05th : 0.005,
# k_latency_01_th : 0.01,
# k_latency_00_5th : 0.05,
# k_latency_10th : 0.10,
# k_latency_25th : 0.25,
# k_latency_50th : 0.5,
# k_latency_75th : 0.75,
k_latency_90th : 0.90,
k_latency_95th : 0.95,
k_latency_99th : 0.99,
k_latency_99_5th : 0.995,
k_latency_99_9th : 0.999,
# k_latency_99_95th : 0.9995,
# k_latency_99_99th : 0.9999,
# k_latency_99_995th : 0.99995,
# k_latency_99_999th : 0.99999,
}
class RunAnalytics():
def __init__(self, run):
self.run = run
def _export_broker_metric_by_consumer_as_dataframe(self, broker_metric, conversion_function=None):
client_connection_details_series = self.run.broker_series.getSeriesOfListOfClientConnectionDetails()
client_list=self.run.run_meta.getConsumerNamesAsDict()
for client_connection_details_sample in client_connection_details_series:
for client_connection_detail in client_connection_details_sample["client_connection_details"]:
client_name = self.run.run_meta.composeDisplayClientName(client_connection_detail['clientName'])
if client_name in client_list:
if conversion_function:
value = conversion_function(client_connection_detail[broker_metric])
else:
value = client_connection_detail[broker_metric]
client_list[client_name].append(value)
return pd.DataFrame(
data=client_list
)
def export_broker_txQueueByteCount_by_consumer_as_dataframe(self):
return self._export_broker_metric_by_consumer_as_dataframe('txQueueByteCount')
def export_broker_smoothedRoundTripTime_by_consumer_as_dataframe(self):
def convert2Micros(value):
return value / 1000
return self._export_broker_metric_by_consumer_as_dataframe('smoothedRoundTripTime', convert2Micros)
def export_broker_timedRetransmitCount_by_consumer_as_dataframe(self):
return self._export_broker_metric_by_consumer_as_dataframe('timedRetransmitCount')
def export_broker_uptime_by_consumer_as_dataframe(self):
return self._export_broker_metric_by_consumer_as_dataframe('uptime')
def export_broker_node_distinct_latencies_as_dataframe(self, col_name:str ="run"):
return pd.DataFrame(data={col_name: self.run.export_broker_node_distinct_latencies()})
def export_latency_node_distinct_latencies_as_dataframe(self, col_name:str ="run"):
return pd.DataFrame(data={col_name: self.run.export_latency_node_distinct_latencies()})
def export_latency_node_series_latencies_metrics_as_dataframe(self):
return pd.DataFrame(data=self.export_latency_node_series_latencies_metrics())
def export_broker_node_series_latencies_metrics_as_dataframe(self):
return pd.DataFrame(data=self.export_broker_node_series_latencies_metrics())
def export_broker_node_series_latencies_metrics(self):
result = dict()
#quantiles
percentiles = list(d_latency_percentile.values())
lat_dict =self.run.export_broker_node_distinct_latencies_per_sample()
for key, value in lat_dict.items():
tmp_df = pd.DataFrame(data={"sample":value})
tmp_quantiles = tmp_df['sample'].quantile(q=percentiles)
#self.add_to_dict(result,k_latency_minimum, tmp_df['sample'].min())
#self.add_to_dict(result,k_latency_maximum, tmp_df['sample'].max())
self.add_to_dict(result,k_latency_average, tmp_df['sample'].mean())
for map_key,map_percentile in d_latency_percentile.items():
self.add_to_dict(result,map_key, tmp_quantiles[map_percentile])
return result
def export_latency_node_series_latencies_metrics(self):
result = dict()
#quantiles
percentiles = list(d_latency_percentile.values())
lat_dict =self.run.export_latency_node_distinct_latencies_per_sample()
for key, value in lat_dict.items():
tmp_df = | pd.DataFrame(data={"sample":value}) | pandas.DataFrame |
import json
import numpy as np
import os
import pandas as pd
import urllib2
def collectData():
# connect to poloniex's API
url = 'https://poloniex.com/public?command=returnChartData¤cyPair=USDT_BTC&start=1518393227&end=9999999999&resolution=auto'
# parse json returned from the API to Pandas DF
openUrl = urllib2.urlopen(url)
r = openUrl.read()
openUrl.close()
d = json.loads(r.decode())
df = | pd.DataFrame(d) | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.union(index), index)
tm.assert_index_equal(index.union(index[:1]), index)
def test_intersection(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
result = other.intersection(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.intersection(index), index)
def test_difference(self, closed):
index = self.create_index(closed=closed)
tm.assert_index_equal(index.difference(index[:1]), index[1:])
def test_symmetric_difference(self, closed):
idx = self.create_index(closed=closed)
result = idx[1:].symmetric_difference(idx[:-1])
expected = IntervalIndex([idx[0], idx[-1]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
def test_set_operation_errors(self, closed, op_name):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# test errors
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
with tm.assert_raises_regex(ValueError, msg):
set_op(Index([1, 2, 3]))
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
set_op(other)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
def test_datetime(self):
dates = date_range('2000', periods=3)
idx = IntervalIndex.from_breaks(dates)
tm.assert_index_equal(idx.left, dates[:2])
tm.assert_index_equal(idx.right, dates[-2:])
expected = date_range('2000-01-01T12:00', periods=2)
tm.assert_index_equal(idx.mid, expected)
assert Timestamp('2000-01-01T12') not in idx
assert Timestamp('2000-01-01T12') not in idx
target = date_range('1999-12-31T12:00', periods=7, freq='12H')
actual = idx.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', overwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
class TestIntervalRange(object):
def test_construction_from_numeric(self, closed, name):
# combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
np.arange(0, 6), name=name, closed=closed)
result = interval_range(start=0, end=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=5, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with freq
expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
name=name, closed=closed)
result = interval_range(start=0, end=6, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=6, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
name=name, closed=closed)
result = interval_range(start=0, end=4, freq=1.5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timestamp(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
breaks = date_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2017-01-08')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with non-fixed freq
freq = 'M'
start, end = Timestamp('2017-01-01'), | Timestamp('2017-12-31') | pandas.Timestamp |
"""
Code for the optimization and gaming component of the Baselining work.
@author: <NAME>, <NAME>
@date Mar 2, 2016
"""
import numpy as np
import pandas as pd
import logging
from gurobipy import GRB, Model, quicksum, LinExpr
from pandas.tseries.holiday import USFederalHolidayCalendar
from datetime import datetime
from .utils import (get_energy_charges, get_demand_charge, dem_charges, dem_charges_yearly,
get_pdp_demand_credit, get_DR_rewards, powerset, E19,
carbon_costs)
# define some string formatters
psform = '%Y-%m-%d %H:%M'
dsform = '%Y-%m-%d'
class BLModel(object):
"""
Abstract base class for Baselining models.
"""
def __init__(self, name):
"""
Construct an abstract dynamical system object based on the
gurobipy Model object 'model'.
"""
self._name = name
self._model = Model()
def get_model(self):
"""
Returns the underlying gurobiy Model object.
"""
return self._model
def set_dynsys(self, dynsys):
"""
Initialize dynamical system for underlying dynamics.
"""
self._dynsys = dynsys
def set_window(self, index):
"""
Set the window for the optimization. Here index is a pandas
DatetimeIndex.
"""
self._index = index
self._dynsys.set_window(index)
def energy_charges(self, tariff, isRT=False, LMP=None, isPDP=False,
twindow=None, carbon=False):
"""
Return total enery consumption charges (as determined by the
tariff's energy charge) as a gurobipy LinExpr.
"""
locidx = self._index.tz_convert('US/Pacific')
year = locidx[0].year
if isRT and isPDP:
raise Exception('Cannot combine RTP and PDP.')
nrg_charges = get_energy_charges(
self._index, tariff, isRT=isRT, LMP=LMP,
isPDP=isPDP, carbon=carbon, year=year)['EnergyCharge']
cons = self._dynsys.get_consumption()['energy']
if twindow is None:
# echrg_= quicksum([ec * con for ec, con in
# zip(nrg_charges.values, cons.values)])
echrg_ = [ec * con for ec, con in
zip(nrg_charges.values, cons.values)]
echrg = pd.Series(echrg_, index=locidx)
else:
nrg_charges_ = nrg_charges.loc[twindow[0]:twindow[1]]
cons_ = cons.loc[twindow[0]:twindow[1]]
# echrg = quicksum([ec * con for ec, con in
# zip(nrg_charges_.values, cons_.values)])
echrg_ = [ec * con for ec, con in
zip(nrg_charges_.values, cons_.values)]
indx = locidx[locidx.get_loc(twindow[0]):
locidx.get_loc(twindow[1])+1]
echrg = pd.Series(echrg_, index=indx)
return echrg
def demand_charges(self, tariff, isPDP=False):
"""
Return the total demand charges under the tariff as a
gurobipy LinExpr.
"""
# determine which year/month combinations there is a demand charge,
# and create a variable for each of them
if hasattr(self, '_maxcon'):
for maxcon in self._maxcon.values():
self._model.remove(maxcon)
del self._maxcon
if hasattr(self, '_maxconbnd'):
for maxconbnd in self._maxconbnd.values():
self._model.remove(maxconbnd)
del self._maxconbnd
if hasattr(self, '_maxconppk'):
for maxconppk in self._maxconppk.values():
self._model.remove(maxconppk)
del self._maxconppk
if hasattr(self, '_maxconppkbnd'):
for maxconppkbnd in self._maxconppkbnd.values():
self._model.remove(maxconppkbnd)
del self._maxconppkbnd
if hasattr(self, '_maxconpk'):
for maxconpk in self._maxconpk.values():
self._model.remove(maxconpk)
del self._maxconpk
if hasattr(self, '_maxconpkbnd'):
for maxconpkbnd in self._maxconpkbnd.values():
self._model.remove(maxconpkbnd)
del self._maxconpkbnd
if hasattr(self, '_maxconpks'):
for maxconpks in self._maxconpks.values():
self._model.remove(maxconpks)
del self._maxconpks
if hasattr(self, '_maxconppkw'):
for maxconppkw in self._maxconppkw.values():
self._model.remove(maxconppkw)
del self._maxconppkw
if hasattr(self, '_maxconppkbndw'):
for maxconppkbndw in self._maxconppkbndw.values():
self._model.remove(maxconppkbndw)
del self._maxconppkbndw
if hasattr(self, '_maxconppks'):
for maxconppks in self._maxconppks.values():
self._model.remove(maxconppks)
del self._maxconppks
if hasattr(self, '_maxconppkbnds'):
for maxconppkbnds in self._maxconppkbnds.values():
self._model.remove(maxconppkbnds)
del self._maxconppkbnds
self._model.update()
locidx = self._index.tz_convert('US/Pacific')
ym_dict = {year: np.unique(locidx[locidx.year == year].month)
for year in np.unique(locidx.year)}
indx = []
for year, months in ym_dict.items():
for month in months:
indx.append(pd.Timestamp(datetime(year, month, 1),
tz='US/Pacific'))
if tariff in dem_charges:
if not(tariff in E19):
self._maxcon, self._maxconbnd = {}, {}
# locidx = self._index.tz_convert('US/Pacific')
# print locidx
# the following creates a dictionary with all years in the data
# as keys, and for each year the value is an array of (unique)
# months that appear during that year. This is used for keeping
# track of the peak consumpiton for the demand charge
# ym_dict = {year: np.unique(locidx[locidx.year == year].month)
# for year in np.unique(locidx.year)}
# indx=[]
for year, months in ym_dict.items():
for month in months:
# declare variable for max consumption
self._maxcon[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxcon[{},{}]'.format(year, month))
# indx.append(pd.Timestamp(datetime(year,month,1),tz='US/Pacific'))
self._model.update()
# now add in the necessary constraints and update objective
dcharges = []
cons = self._dynsys.get_consumption()['power']
for year, months in ym_dict.items():
for month in months:
relcons = cons[(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(relcons):
self._maxconbnd[year, month, i] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con, name='maxconbnd[{},{},{}]'.format(
year, month, i))
# dcharges += (get_demand_charge(tariff, month, isPDP)*
# self._maxcon[year, month])
dcharges.append(
(get_demand_charge(tariff, month, isPDP, year=year) *
self._maxcon[year, month]))
dcharges = | pd.Series(dcharges, index=indx) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 15:36:17 2019
@author: fgw
"""
import pandas as pd
from collections import deque
from sklearn.preprocessing import MinMaxScaler
class Data_Process(object):
def __init__(self, data_path):
data = | pd.read_csv(data_path, sep=',') | pandas.read_csv |
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from .naive_bayes import NaiveBayes
import pandas as pd
import numpy as np
from scipy.stats import mode
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
import random
np.seterr(divide='ignore', invalid='ignore')
classifier = 'our'
use_all = True
class Model:
def __init__(self):
#Create a Gaussian Classifier
self.our = NaiveBayes()
self.gnb = GaussianNB()
self.knc = KNeighborsClassifier(n_neighbors=3)
# self.clf = svm.SVC(gamma='scale') # NOT WORKING
print("model initiallized")
self.gen = 0
def train(self, data):
df = | pd.DataFrame(data) | pandas.DataFrame |
from collections import OrderedDict
import datetime as dt
import itertools
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sys
import xarray as xr
def _raiseException(prefix, msg):
sys.tracebacklimit = None
raise(Exception('[OSMPythonTools.' + prefix + '] ' + msg))
def dictRange(start, end, step=1):
return OrderedDict([(x, x) for x in range(start, end, step)])
def dictRangeYears(start, end, step=1):
def yearToString(year):
y = int(year)
f = year - y
timespanYear = (dt.datetime(y + 1, 1, 1) - dt.datetime(y, 1, 1))
return (dt.datetime(y, 1, 1) + f * timespanYear).isoformat() + 'Z'
return OrderedDict([(year, yearToString(year)) for year in np.arange(start, end, step)])
class All:
def __repr__(self):
return 'ALL'
ALL = All()
class Data:
def __init__(self, arg1, arg2):
self._dataset = None
self._dataFrame = None
if callable(arg1) and isinstance(arg2, OrderedDict):
fetch = arg1
dimensions = arg2
self._dimensions = dimensions
data = list(map(lambda params: fetch(*params), itertools.product(*[[v for v in dimension.values()] for dimension in dimensions.values()])))
for dim, v in reversed(dimensions.items()):
data = [data[i:i+len(v)] for i in range(0, len(data), len(v))]
self._dataset = xr.DataArray(data=data[0], dims=list(dimensions.keys()), coords=[list(v.keys()) for v in dimensions.values()]).to_dataset(name='value')
elif isinstance(arg1, Data) and isinstance(arg2, xr.Dataset):
self._dimensions = arg1._dimensions
self._dataset = arg2
elif isinstance(arg1, Data) and isinstance(arg2, list) and all(isinstance(df, xr.Dataset) for df in arg2):
self._dimensions = arg1._dimensions
self._dataset = xr.merge(map(lambda d: d.getDataset(), arg2))
elif isinstance(arg1, Data) and isinstance(arg2, pd.DataFrame):
self._dimensions = arg1._dimensions
self._dataFrame = arg2
elif isinstance(arg1, Data) and isinstance(arg2, list) and all(isinstance(df, pd.DataFrame) for df in arg2):
self._dimensions = arg1._dimensions
self._dataFrame = | pd.concat(arg2, axis=1) | pandas.concat |
from __future__ import annotations
import copy
import itertools
from typing import (
TYPE_CHECKING,
Sequence,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
internals as libinternals,
)
from pandas._libs.missing import NA
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
Shape,
)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
ensure_dtype_can_hold_na,
find_common_type,
)
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_1d_only_ea_obj,
is_datetime64tz_dtype,
is_dtype_equal,
needs_i8_conversion,
)
from pandas.core.dtypes.concat import (
cast_to_common_type,
concat_compat,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna_all,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
)
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.internals.array_manager import (
ArrayManager,
NullArrayProxy,
)
from pandas.core.internals.blocks import (
ensure_block_shape,
new_block,
)
from pandas.core.internals.managers import BlockManager
if TYPE_CHECKING:
from pandas import Index
def _concatenate_array_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate array managers into one.
Parameters
----------
mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
ArrayManager
"""
# reindex all arrays
mgrs = []
for mgr, indexers in mgrs_indexers:
for ax, indexer in indexers.items():
mgr = mgr.reindex_indexer(
axes[ax], indexer, axis=ax, allow_dups=True, use_na_proxy=True
)
mgrs.append(mgr)
if concat_axis == 1:
# concatting along the rows -> concat the reindexed arrays
# TODO(ArrayManager) doesn't yet preserve the correct dtype
arrays = [
concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))])
for j in range(len(mgrs[0].arrays))
]
else:
# concatting along the columns -> combine reindexed arrays in a single manager
assert concat_axis == 0
arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))
if copy:
arrays = [x.copy() for x in arrays]
new_mgr = ArrayManager(arrays, [axes[1], axes[0]], verify_integrity=False)
return new_mgr
def concat_arrays(to_concat: list) -> ArrayLike:
"""
Alternative for concat_compat but specialized for use in the ArrayManager.
Differences: only deals with 1D arrays (no axis keyword), assumes
ensure_wrapped_if_datetimelike and does not skip empty arrays to determine
the dtype.
In addition ensures that all NullArrayProxies get replaced with actual
arrays.
Parameters
----------
to_concat : list of arrays
Returns
-------
np.ndarray or ExtensionArray
"""
# ignore the all-NA proxies to determine the resulting dtype
to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)]
dtypes = {x.dtype for x in to_concat_no_proxy}
single_dtype = len(dtypes) == 1
if single_dtype:
target_dtype = to_concat_no_proxy[0].dtype
elif all(x.kind in ["i", "u", "b"] and isinstance(x, np.dtype) for x in dtypes):
# GH#42092
target_dtype = np.find_common_type(list(dtypes), [])
else:
target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy])
if target_dtype.kind in ["m", "M"]:
# for datetimelike use DatetimeArray/TimedeltaArray concatenation
# don't use arr.astype(target_dtype, copy=False), because that doesn't
# work for DatetimeArray/TimedeltaArray (returns ndarray)
to_concat = [
arr.to_array(target_dtype) if isinstance(arr, NullArrayProxy) else arr
for arr in to_concat
]
return type(to_concat_no_proxy[0])._concat_same_type(to_concat, axis=0)
to_concat = [
arr.to_array(target_dtype)
if isinstance(arr, NullArrayProxy)
else cast_to_common_type(arr, target_dtype)
for arr in to_concat
]
if isinstance(to_concat[0], ExtensionArray):
cls = type(to_concat[0])
return cls._concat_same_type(to_concat)
result = np.concatenate(to_concat)
# TODO decide on exact behaviour (we shouldn't do this only for empty result)
# see https://github.com/pandas-dev/pandas/issues/39817
if len(result) == 0:
# all empties -> check for bool to not coerce to float
kinds = {obj.dtype.kind for obj in to_concat_no_proxy}
if len(kinds) != 1:
if "b" in kinds:
result = result.astype(object)
return result
def concatenate_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
BlockManager
"""
# TODO(ArrayManager) this assumes that all managers are of the same type
if isinstance(mgrs_indexers[0][0], ArrayManager):
return _concatenate_array_managers(mgrs_indexers, axes, concat_axis, copy)
concat_plans = [
_get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
]
concat_plan = _combine_concat_plans(concat_plans, concat_axis)
blocks = []
for placement, join_units in concat_plan:
unit = join_units[0]
blk = unit.block
if len(join_units) == 1 and not join_units[0].indexers:
values = blk.values
if copy:
values = values.copy()
else:
values = values.view()
fastpath = True
elif _is_uniform_join_units(join_units):
vals = [ju.block.values for ju in join_units]
if not blk.is_extension:
# _is_uniform_join_units ensures a single dtype, so
# we can use np.concatenate, which is more performant
# than concat_compat
values = np.concatenate(vals, axis=blk.ndim - 1)
else:
# TODO(EA2D): special-casing not needed with 2D EAs
values = concat_compat(vals, axis=1)
values = ensure_block_shape(values, blk.ndim)
values = ensure_wrapped_if_datetimelike(values)
fastpath = blk.values.dtype == values.dtype
else:
values = _concatenate_join_units(join_units, concat_axis, copy=copy)
fastpath = False
if fastpath:
b = blk.make_block_same_class(values, placement=placement)
else:
b = new_block(values, placement=placement, ndim=len(axes))
blocks.append(b)
return BlockManager(tuple(blocks), axes)
def _get_mgr_concatenation_plan(mgr: BlockManager, indexers: dict[int, np.ndarray]):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape_list = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape_list[ax] = len(indexer)
mgr_shape = tuple(mgr_shape_list)
has_column_indexer = False
if 0 in indexers:
has_column_indexer = True
ax0_indexer = indexers.pop(0)
blknos = algos.take_nd(mgr.blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_nd(mgr.blklocs, ax0_indexer, fill_value=-1)
else:
if mgr.is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
blknos = mgr.blknos
blklocs = mgr.blklocs
plan = []
for blkno, placements in libinternals.get_blkno_placements(blknos, group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape_list = list(mgr_shape)
shape_list[0] = len(placements)
shape = tuple(shape_list)
if blkno == -1:
# only reachable in the `0 in indexers` case
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs)
and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
(
(
not has_column_indexer
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1
)
or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()
)
)
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
class JoinUnit:
def __init__(self, block, shape: Shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
# Note: block is None implies indexers is None, but not vice-versa
if indexers is None:
indexers = {}
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self) -> str:
return f"{type(self).__name__}({repr(self.block)}, {self.indexers})"
@cache_readonly
def needs_filling(self) -> bool:
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
blk = self.block
if blk is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return blk.dtype
return ensure_dtype_can_hold_na(blk.dtype)
def _is_valid_na_for(self, dtype: DtypeObj) -> bool:
"""
Check that we are all-NA of a type/dtype that is compatible with this dtype.
Augments `self.is_na` with an additional check of the type of NA values.
"""
if not self.is_na:
return False
if self.block is None:
return True
if self.dtype == object:
values = self.block.values
return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K"))
na_value = self.block.fill_value
if na_value is NaT and not is_dtype_equal(self.dtype, dtype):
# e.g. we are dt64 and other is td64
# fill_values match but we should not cast self.block.values to dtype
# TODO: this will need updating if we ever have non-nano dt64/td64
return False
if na_value is NA and needs_i8_conversion(dtype):
# FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat
# e.g. self.dtype == "Int64" and dtype is td64, we dont want
# to consider these as matching
return False
# TODO: better to use can_hold_element?
return is_valid_na_for_dtype(na_value, dtype)
@cache_readonly
def is_na(self) -> bool:
if self.block is None:
return True
if not self.block._can_hold_na:
return False
values = self.block.values
if isinstance(self.block.values.dtype, SparseDtype):
return False
elif self.block.is_extension:
# TODO(EA2D): no need for special case with 2D EAs
values_flat = values
else:
values_flat = values.ravel(order="K")
return isna_all(values_flat)
def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self._is_valid_na_for(empty_dtype):
# note: always holds when self.block is None
blk_dtype = getattr(self.block, "dtype", None)
if blk_dtype == np.dtype("object"):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order="K")
if len(values) and values[0] is None:
fill_value = None
if is_datetime64tz_dtype(empty_dtype):
i8values = np.full(self.shape, fill_value.value)
return DatetimeArray(i8values, dtype=empty_dtype)
elif | is_1d_only_ea_dtype(empty_dtype) | pandas.core.dtypes.common.is_1d_only_ea_dtype |
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.signals import nb
seed = 42
day_dt = np.timedelta64(86400000000000)
index = pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
])
columns = ['a', 'b', 'c']
sig = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=index, columns=columns)
# ############# accessors.py ############# #
class TestAccessors:
def test_freq(self):
assert sig.vbt.signals.freq == day_dt
assert sig['a'].vbt.signals.freq == day_dt
assert sig.vbt.signals(freq='2D').freq == day_dt * 2
assert sig['a'].vbt.signals(freq='2D').freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).freq == day_dt * 4
def test_shuffle(self):
pd.testing.assert_series_equal(
sig['a'].vbt.signals.shuffle(seed=seed),
pd.Series(
np.array([False, False, False, True, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
np.testing.assert_array_equal(
sig['a'].vbt.signals.shuffle(seed=seed).values,
nb.shuffle_1d_nb(sig['a'].values, seed=seed)
)
pd.testing.assert_frame_equal(
sig.vbt.signals.shuffle(seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, True, False],
[False, False, False],
[True, False, False],
[True, True, True]
]),
index=sig.index,
columns=sig.columns
)
)
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(sig['a'].vbt.signals.fshift(test_n), sig['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
sig['a'].vbt.signals.fshift(test_n).values,
nb.fshift_1d_nb(sig['a'].values, test_n)
)
pd.testing.assert_frame_equal(sig.vbt.signals.fshift(test_n), sig.shift(test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty_like(sig['a']),
pd.Series(np.full(sig['a'].shape, False), index=sig['a'].index, name=sig['a'].name)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty_like(sig),
pd.DataFrame(np.full(sig.shape, False), index=sig.index, columns=sig.columns)
)
def test_generate(self):
@njit
def choice_func_nb(col, from_i, to_i, n):
if col == 0:
return np.arange(from_i, to_i)
elif col == 1:
return np.full(1, from_i)
else:
return np.full(1, to_i-n)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=sig['a'].index, name=sig['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate((5,), choice_func_nb, 1, index=sig['a'].index, name=sig['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
with pytest.raises(Exception) as e_info:
_ = | pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1) | pandas.Series.vbt.signals.generate |
from Bio import Entrez
import numpy
import pandas
from urllib.error import HTTPError
import os
import re
import lxml.etree
import datetime
import time
def check_config_dir(args):
files = os.listdir(args.config_dir)
asserted_files = [
'group_attribute.config',
'group_tissue.config',
'give_value.config',
'replace_value.config',
'exclude_keyword.config',
'control_term.config',
'rescue_id.config',
'search_term_exclusion.config',
'search_term_other.config',
'search_term_species.config',
'search_term_keyword.config',
'orthographical_variant.config',
]
for af in asserted_files:
assert (af in files), 'config file not found: '+af
def get_search_term(species_name="", bioprojects=[], biosamples=[], keywords=[], publication_date='',
other_conditions=pandas.DataFrame(), excluded_conditions=pandas.DataFrame()):
assert ((len(bioprojects)>0)+(len(biosamples)>0))!=2, "bioprojects and biosamples cannot be specified simultaneously."
if species_name == '':
species_term = ''
else:
species_term = '"'+species_name+'"'+"[Organism]"
keyword_term = "(" + " OR ".join(keywords) + ")"
other_terms = list()
for i in numpy.arange(other_conditions.shape[0]):
other_terms.append('\"'+other_conditions.loc[i,1]+'\"['+other_conditions.loc[i,0]+']')
other_term = " AND ".join(other_terms)
excluded_terms = list()
for i in numpy.arange(excluded_conditions.shape[0]):
excluded_terms.append('\"'+excluded_conditions.loc[i,1]+'\"['+excluded_conditions.loc[i,0]+']')
excluded_term = '('+" OR ".join(excluded_terms)+')'
date_term = publication_date+'[Publication Date]'
if len(bioprojects):
bioproject_term = "(" + " OR ".join(bioprojects) + ")"
search_term = species_term + " AND " + bioproject_term + " AND " + other_term + " NOT " + excluded_term
elif len(biosamples):
biosample_term = "(" + " OR ".join(biosamples) + ")"
search_term = biosample_term
else:
search_term = species_term + " AND " + keyword_term + " AND " + other_term + " AND " + date_term + " NOT " + excluded_term
# search_term = species_term + " AND " + other_term + " AND " + date_term + " NOT " + excluded_term
return search_term
def fetch_sra_xml(species_name, search_term, save_xml=True, read_from_existing_file=False, retmax=100):
file_xml = "SRA_"+species_name.replace(" ", "_")+".xml"
flag = True
if (read_from_existing_file)&(os.path.exists(file_xml)):
with open(file_xml) as f:
if '<Error>' in f.read():
print(species_name, ': <Error> found in the saved file. Deleting...')
os.remove(file_xml)
else:
print(species_name, ': reading xml from file')
root = lxml.etree.parse(file_xml, parser=lxml.etree.XMLParser())
flag = False
if flag:
try:
sra_handle = Entrez.esearch(db="sra", term=search_term, retmax=10000000)
except HTTPError as e:
print(e, '- Trying Entrez.esearch() again...')
sra_handle = Entrez.esearch(db="sra", term=search_term, retmax=10000000)
sra_record = Entrez.read(sra_handle)
record_ids = sra_record["IdList"]
num_record = len(record_ids)
print('Number of SRA records:', num_record)
start_time = time.time()
query_search_time = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
root = None
for i in numpy.arange(numpy.ceil(num_record//retmax)+1):
start = int(i*retmax)
end = int(((i+1)*retmax)-1) if num_record >= int(((i+1)*retmax)-1) else num_record
print('processing SRA records:', start, '-', end, flush=True)
max_retry = 10
for i in range(max_retry):
try:
handle = Entrez.efetch(db="sra", id=record_ids[start:end], rettype="full", retmode="xml", retmax=retmax)
except HTTPError as e:
sleep_second = 60
print('{} - Trying Entrez.efetch() again after {} seconds...'.format(e, sleep_second), flush=True)
time.sleep(sleep_second)
continue
try:
chunk = lxml.etree.parse(handle).getroot()
except:
print('XML may be truncated. Retrying...', flush=True)
continue
break
if root is None:
root = chunk
else:
root.append(chunk)
elapsed_time = int(time.time() - start_time)
xml_string = lxml.etree.tostring(root, pretty_print=True)
for line in str(xml_string).split('\n'):
if '<Error>' in line:
print(line)
if os.path.exists(file_xml):
os.remove(file_xml)
raise Exception(species_name, ': <Error> found in the xml.')
if save_xml:
with open(file_xml, 'wb') as f:
f.write(xml_string)
return root
def create_run_dir(run_path, run_no=1):
if not os.path.exists(os.path.join(run_path + '_' + str(run_no))):
os.makedirs(os.path.join(run_path + '_' + str(run_no)))
return(os.path.join(run_path + '_' + str(run_no)))
else:
run_no = run_no + 1
return create_run_dir(run_path, run_no)
class Metadata:
column_names = ['scientific_name','tissue','curate_group','tissue_original','genotype','sex','age','treatment','source_name',
'is_sampled','is_qualified','exclusion','protocol','bioproject','biosample',
'experiment','run','sra_primary','sra_sample','sra_study','study_title','exp_title','design',
'sample_title','sample_description','lib_name','lib_layout','lib_strategy','lib_source',
'lib_selection','instrument','total_spots','total_bases','size','nominal_length','nominal_sdev',
'spot_length','read_index','read_class','read_type','base_coord','lab','center','submitter_id',
'pubmed_id','taxid','published_date','biomaterial_provider','cell','location','antibody','batch',
'misc','NCBI_Link','AWS_Link','GCP_Link',]
id_cols = ['bioproject','biosample','experiment','run','sra_primary','sra_sample','sra_study']
def __init__(self, column_names=column_names):
self.config_dir = ''
self.df = pandas.DataFrame(index=[], columns=column_names)
def reorder(self, omit_misc=False, column_names=column_names):
if (self.df.shape[0]==0):
return None
for col in column_names:
if not col in self.df.columns:
self.df.loc[:,col] = ''
if omit_misc:
self.df = self.df.loc[:,column_names]
else:
misc_columns = [ col for col in self.df.columns if col not in column_names ]
self.df = self.df.loc[:,column_names+misc_columns]
self.df.loc[:,'exclusion'] = self.df.loc[:,'exclusion'].replace('', 'no')
# reorder curate_group to the front
if 'curate_group' in self.df.columns:
cols = list(self.df)
cols.insert(1, cols.pop(cols.index('curate_group')))
self.df = self.df.loc[:, cols]
def from_DataFrame(df):
metadata = Metadata()
metadata.df = df
metadata.reorder(omit_misc=False)
return metadata
def from_xml(xml_root):
if isinstance(xml_root, lxml.etree._Element):
xml_root = lxml.etree.ElementTree(xml_root)
root = xml_root
assert isinstance(root, lxml.etree._ElementTree), "Unknown input type."
df = pandas.DataFrame()
for entry in root.iter(tag="EXPERIMENT_PACKAGE"):
items = []
bioproject = entry.findall('.//EXTERNAL_ID[@namespace="BioProject"]')
if not len(bioproject):
labels = entry.findall('.//LABEL')
for label in labels:
text = label.text
if text.startswith("PRJ"):
bioproject = [label]
break
is_single = len(entry.findall('.//LIBRARY_LAYOUT/SINGLE'))
is_paired = len(entry.findall('.//LIBRARY_LAYOUT/PAIRED'))
if is_single:
library_layout = ["single"]
elif is_paired:
library_layout = ["paired"]
else:
library_layout = [""]
values = entry.findall('.//VALUE')
is_protected = ["No"]
if len(values):
for value in values:
text = value.text
if not text is None:
if text.endswith("PROTECTED"):
is_protected = ["Yes"]
break
items.append(["bioproject", bioproject])
items.append(["scientific_name", entry.xpath('./SAMPLE/SAMPLE_NAME/SCIENTIFIC_NAME')])
items.append(["biosample", entry.findall('.//EXTERNAL_ID[@namespace="BioSample"]')])
items.append(["experiment", entry.xpath('./EXPERIMENT/IDENTIFIERS/PRIMARY_ID')])
items.append(["run", entry.xpath('./RUN_SET/RUN/IDENTIFIERS/PRIMARY_ID')])
items.append(["sra_primary", entry.xpath('./SUBMISSION/IDENTIFIERS/PRIMARY_ID')])
items.append(["sra_sample", entry.xpath('./SAMPLE/IDENTIFIERS/PRIMARY_ID')])
items.append(["sra_study", entry.xpath('./EXPERIMENT/STUDY_REF/IDENTIFIERS/PRIMARY_ID')])
items.append(["published_date", entry.xpath('./RUN_SET/RUN/@published')])
items.append(["exp_title", entry.xpath('./EXPERIMENT/TITLE')])
items.append(["design", entry.xpath('./EXPERIMENT/DESIGN/DESIGN_DESCRIPTION')])
items.append(["lib_name", entry.xpath('./EXPERIMENT/DESIGN/LIBRARY_DESCRIPTOR/LIBRARY_NAME')])
items.append(["lib_strategy", entry.xpath('./EXPERIMENT/DESIGN/LIBRARY_DESCRIPTOR/LIBRARY_STRATEGY')])
items.append(["lib_source", entry.xpath('./EXPERIMENT/DESIGN/LIBRARY_DESCRIPTOR/LIBRARY_SOURCE')])
items.append(["lib_selection", entry.xpath('./EXPERIMENT/DESIGN/LIBRARY_DESCRIPTOR/LIBRARY_SELECTION')])
items.append(["lib_layout", library_layout])
items.append(["nominal_length", entry.xpath('./EXPERIMENT/DESIGN/LIBRARY_DESCRIPTOR/LIBRARY_LAYOUT/PAIRED/@NOMINAL_LENGTH')])
items.append(["nominal_sdev", entry.xpath('./EXPERIMENT/DESIGN/LIBRARY_DESCRIPTOR/LIBRARY_LAYOUT/PAIRED/@NOMINAL_SDEV')])
items.append(["spot_length", entry.xpath('./EXPERIMENT/DESIGN/SPOT_DESCRIPTOR/SPOT_DECODE_SPEC/SPOT_LENGTH')])
items.append(["read_index", entry.xpath('./EXPERIMENT/DESIGN/SPOT_DESCRIPTOR/SPOT_DECODE_SPEC/READ_SPEC/READ_INDEX')])
items.append(["read_class", entry.xpath('./EXPERIMENT/DESIGN/SPOT_DESCRIPTOR/SPOT_DECODE_SPEC/READ_SPEC/READ_CLASS')])
items.append(["read_type", entry.xpath('./EXPERIMENT/DESIGN/SPOT_DESCRIPTOR/SPOT_DECODE_SPEC/READ_SPEC/READ_TYPE')])
items.append(["base_coord", entry.xpath('./EXPERIMENT/DESIGN/SPOT_DESCRIPTOR/SPOT_DECODE_SPEC/READ_SPEC/BASE_COORD')])
items.append(["instrument", entry.xpath('./EXPERIMENT/PLATFORM/ILLUMINA/INSTRUMENT_MODEL')])
items.append(["lab", entry.xpath('./SUBMISSION/@lab_name')])
items.append(["center", entry.xpath('./SUBMISSION/@center_name')])
items.append(["submitter_id", entry.xpath('./SUBMISSION/IDENTIFIERS/SUBMITTER_ID')])
items.append(["study_title", entry.xpath('./STUDY/DESCRIPTOR/STUDY_TITLE')])
items.append(["pubmed_id", entry.xpath('./STUDY/STUDY_LINKS/STUDY_LINK/XREF_LINK/ID')])
items.append(["sample_title", entry.xpath('./SAMPLE/TITLE')])
items.append(["taxid", entry.xpath('./SAMPLE/SAMPLE_NAME/TAXON_ID')])
items.append(["sample_description", entry.xpath('./SAMPLE/DESCRIPTION')])
items.append(["total_spots", entry.xpath('./RUN_SET/RUN/@total_spots')])
items.append(["total_bases", entry.xpath('./RUN_SET/RUN/@total_bases')])
items.append(["size", entry.xpath('./RUN_SET/RUN/@size')])
items.append(["NCBI_Link", entry.xpath('./RUN_SET/RUN/SRAFiles/SRAFile[@supertype="Primary ETL"]/Alternatives[@org="NCBI"]/@url')])
items.append(["AWS_Link", entry.xpath('./RUN_SET/RUN/SRAFiles/SRAFile[@supertype="Primary ETL"]/Alternatives[@org="AWS"]/@url')])
items.append(["GCP_Link", entry.xpath('./RUN_SET/RUN/SRAFiles/SRAFile[@supertype="Primary ETL"]/Alternatives[@org="GCP"]/@url')])
row = []
for item in items:
try:
if isinstance(item[1][0], (lxml.etree._ElementUnicodeResult, int, str)):
row.append(str(item[1][0]))
else:
row.append(item[1][0].text)
except:
row.append("")
colnames = []
for item in items:
colnames.append(item[0])
row_df = pandas.DataFrame(row).T
row_df.columns = colnames
sas = entry.xpath('./SAMPLE/SAMPLE_ATTRIBUTES/SAMPLE_ATTRIBUTE')
for sa in sas:
tag = sa.xpath('./TAG')
if not tag[0].text == None:
tag = tag[0].text.lower()
tag = re.sub(" \(.*", "", tag)
tag = re.sub(" ", "_", tag)
if not tag in row_df.columns:
value = sa.xpath('./VALUE')
if len(value):
value = value[0].text
if tag in colnames:
tag = tag+"_2"
sa_df = pandas.DataFrame([value])
sa_df.columns = [tag]
row_df = pandas.concat([row_df,sa_df], axis=1)
df = pandas.concat([df, row_df], ignore_index=True, sort=False)
if "scientific_name" in df.columns and len(df.loc[(df.loc[:,"scientific_name"]==""), "scientific_name"]):
species_name = df.loc[~(df.loc[:,"scientific_name"]==""), "scientific_name"].iloc[0]
df.loc[(df.loc[:,"scientific_name"]==""), "scientific_name"] = species_name
metadata = Metadata()
metadata.df = df
metadata.reorder(omit_misc=False)
return metadata
def mark_exclude_ids(self, id_cols=id_cols):
config = pandas.read_csv(os.path.join(self.config_dir, 'exclude_id.config'),
parse_dates=False, infer_datetime_format=False, quotechar='"', sep='\t',
header=None, index_col=None, skip_blank_lines=True, comment='#')
config = config.replace(numpy.nan, '')
for i in numpy.arange(config.shape[0]):
reason = config.iloc[i,0]
exclude_id = config.iloc[i,1]
for col in id_cols:
is_exclude_id = (self.df.loc[:,col]==exclude_id).fillna(False)
if any(is_exclude_id):
self.df.loc[is_exclude_id,'exclusion'] = reason
def unmark_rescue_ids(self, id_cols=id_cols):
try:
config = pandas.read_csv(os.path.join(self.config_dir, 'rescue_id.config'),
parse_dates=False, infer_datetime_format=False, quotechar='"', sep='\t',
header=None, index_col=None, skip_blank_lines=True, comment='#')
except:
config = | pandas.DataFrame() | pandas.DataFrame |
import igraph as Graph
import pandas as pd
import os
import numpy as np
import spacy
from sklearn.cluster import KMeans
from pylab import *
import re
import time
import src.pickle_handler as ph
import src.relation_creator as rc
# the dataframe has been preprocessed by many other functions. However we only need a subset of this information to
# create a graph representation of the relations.
# distillDataframe() takes the dataframe given to it. It selects
# a) character_A
# b) character_B
# formate_bible should transform all rows, that contain >= 2 characters into multiple rows between all characters
# from [lukas, mark, maria] to [[lukas, mark],[lukas, maria], [maria, mark]]
def formate_bible(df_bible):
# Parameter
# df_bible : expects a pandas dataframe that consists of "characters" and "emotion" column
# Return
# df_bible_formate : pandas dataframe, that consists of 3 columns "character_A", "character_B", "emotion"
df_bible_formate = pd.DataFrame()
for i, row in df_bible.iterrows():
names = row["characters"]
emotion = row["emotion"]
names = names.replace("[", "")
names = names.replace("]", "")
names = names.replace(",", "|")
names = names.replace("'", "")
names = names.strip()
names = names.replace("\n", "")
names = names.split("|")
names_remove = names.copy()
if len(names) >= 2:
for name in names:
for r_name in names_remove:
if name != r_name:
new_row = {
"character_A": name.strip(),
"character_B": r_name.strip(),
"emotion": emotion,
}
df_bible_formate = df_bible_formate.append(
new_row, ignore_index=True
)
names_remove.remove(name)
print(" - Pre-processed the dataframe to run the graph generation")
return df_bible_formate
# distillDataframe should turn the dataframe to distinct rows, which have been aggregated in terms of
# their emotion. The dataframe needs to be aggregated because characters may occur at multiple verses
# to not list those multiple times within an graph and to given an more "global" represenation of their
# emotional state the emotion is aggregated. If the emotion_mean > 0.75, the relation is considered to be positive
# if the emotion_mean < -0.75 the relation is considered to be negative.
# else wise it is neutral. The relation will later be used to project an color to the graph.
def distillDataframe(df_bible, load, threshold, save):
# Parameter
# df_bible : pandas dataframe of the bible
# load : determines if a csv should be loaded or if one has to be produced by the function, bool
# threshold : counts how often relations should occur before being considered reasonable
# save : if file should be saved at the end, bool
# i.e. one time mentions may not be displayed, integer
# Return
# df_distilled : pandas dataframe consistent of distinct relations
# label : unique list of all characters
# create a list of labels (names) which have been detected in both rows character_A and #character_B
file = os.path.join("src", "csv", "bibleTA_distilled" + "_" + str(threshold) + ".csv")
if load == True:
try:
df_distilled = | pd.read_csv(file) | pandas.read_csv |
# License: BSD_3_clause
#
# Copyright (c) 2015, <NAME>, <NAME>, <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the Technical University of Denmark (DTU)
# nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import pandas as pd
import numpy as np
from datetime import datetime, time
import gc
import math
import random
class expando:
pass
#Function needed to define distances between nodes from longitudes and latitudes
def distance_from_long_lat(lat1, long1, lat2, long2):
# Convert latitude and longitude to spherical coordinates in radians.
degrees_to_radians = math.pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
# theta = longitude
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates (1, theta, phi) and (1, theta', phi')
# cosine( arc length ) = sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +
math.cos(phi1)*math.cos(phi2))
if cos>1:#numerical approximations can bring to a number slightly >1
cos=1
arc = math.acos( cos )
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
R_earth = 6371 #km
arc = arc * R_earth
return arc
class dataReader:
def __init__(self, countries,max_number_loc,renewable_type,data_type,start_time,
end_time,fore_start_time,fore_end_time,nbr_leadTimes,folder_location):
self._set_attributes(countries,max_number_loc,renewable_type,data_type,start_time,
end_time,fore_start_time,fore_end_time,nbr_leadTimes,folder_location)
self._check_countries()
self._load_observations()
self._tod_observations()
self._load_forecasts()
self._get_distances()
print('Data has been imported!')
pass
#Function that stores all inputs as attributes of the output
def _set_attributes(self, countries,max_number_loc,renewable_type,data_type,start_time,
end_time,fore_start_time,fore_end_time,nbr_leadTimes,folder_location):
self.attributes = expando()
self.attributes.renew_type = renewable_type
self.attributes.data_type = data_type
self.attributes.folder_loc = folder_location
self.attributes.start_time = start_time
self.attributes.end_time = end_time
self.attributes.fore_start_time = fore_start_time
self.attributes.fore_end_time = fore_end_time
self.attributes.nbr_leadT = nbr_leadTimes
self.attributes.countries = countries
self.attributes.max_number_loc = max_number_loc
self.metadata = expando()
pass
#Function that check input countries and display an error message if they
#don't correspond. Returns the available countries and indices from nodes
def _check_countries(self):
self.metadata.network_nodes = pd.read_csv(self.attributes.folder_loc+'/Metadata/network_nodes.csv',
sep=',')
available_countries = set(self.metadata.network_nodes.country)
countries = self.attributes.countries
if bool(countries-available_countries.intersection(countries)):
print(', '.join(countries-available_countries.intersection(countries)) + \
' are not in the country list. ' + 'See in:' + ', '.join(available_countries))
self.attributes.countries = list(available_countries.intersection(countries))
ix_net_nodes_bool = np.in1d(self.metadata.network_nodes.country, self.attributes.countries)
self.metadata.ix_nodes = np.where(ix_net_nodes_bool)[0]+1
if self.attributes.max_number_loc != None and len(self.metadata.ix_nodes)>self.attributes.max_number_loc:
self.metadata.ix_nodes = np.sort(random.sample(list(self.metadata.ix_nodes),
self.attributes.max_number_loc))
print('The number of nodes selected was higher than the maximum number of locations (' +\
str(self.attributes.max_number_loc) + ') and therefore reduced.')
pass
#Function that loads observations and stores them in the 'obs' attribute of output
def _load_observations(self):
filename = self.attributes.folder_loc + '/Nodal_TS/' + self.attributes.renew_type + \
'_signal_' + self.attributes.data_type + '.csv'
data_observations_aux = pd.read_csv(filename, sep=',')
#Getting observations of training period
ix_time_bool = np.in1d(data_observations_aux.Time,
[self.attributes.start_time,self.attributes.end_time])
ix_time = np.where(ix_time_bool)[0]
if len(ix_time) == 1:
sys.exit('Training period contains only one element.'+ \
'There must be an error in the definition of starting/ending dates.'+\
'Check day, month and year selected. Remember that data are available hourly only.')
ix_net_nodes = np.append(0, self.metadata.ix_nodes)
data_observations = data_observations_aux.ix[ix_time[0]:ix_time[len(ix_time)-1],
ix_net_nodes]
data_observations.Time = pd.to_datetime(data_observations.Time)
del ix_time_bool, ix_time
#Getting observations of testing period
ix_time_bool = np.in1d(data_observations_aux.Time,
[self.attributes.fore_start_time,self.attributes.fore_end_time])
ix_time = np.where(ix_time_bool)[0]
data_observations_cf = data_observations_aux.ix[ix_time[0]:ix_time[len(ix_time)-1],
ix_net_nodes]
data_observations_cf.Time = | pd.to_datetime(data_observations_cf.Time) | pandas.to_datetime |
# This code extract the features from the raw joined dataset (data.csv)
# and save it in the LibSVM format.
# Usage: python construct_features.py
import pandas as pd
import numpy as np
from sklearn.datasets import dump_svmlight_file
df = pd.read_csv("data.csv", low_memory=False)
# NPU
NPU = df.NPU.copy()
NPU[NPU == ' '] = np.nan
NPU = pd.get_dummies(NPU, prefix="NPU")
# SiteZip
SiteZip = df.SiteZip.copy()
SiteZip = SiteZip.str.replace(',','')
SiteZip = SiteZip.str.replace('\.00','')
SiteZip = SiteZip.replace('0',np.nan)
SiteZip = pd.get_dummies(SiteZip, prefix="SiteZip")
# Submarket1
Submarket1 = df.Submarket1.copy()
Submarket1 = pd.get_dummies(Submarket1, prefix="Submarket1")
# TAX_DISTR
TAX_DISTR = df.TAX_DISTR.copy()
TAX_DISTR[TAX_DISTR == ' '] = np.nan
TAX_DISTR = pd.get_dummies(TAX_DISTR, prefix="TAX_DISTR")
# NBHD
NBHD = df.NBHD.copy()
NBHD[NBHD == ' '] = np.nan
NBHD = pd.get_dummies(NBHD, prefix="NBHD")
# ZONING_NUM
ZONING_NUM = df.ZONING_NUM.copy()
ZONING_NUM[ZONING_NUM == ' '] = np.nan
ZONING_NUM = pd.get_dummies(ZONING_NUM, prefix="ZONING_NUM")
# building_c
building_c = df.building_c.copy()
building_c[building_c == ' '] = np.nan
building_c = pd.get_dummies(building_c, prefix="building_c")
# PROP_CLASS
PROP_CLASS = df.PROP_CLASS.copy()
PROP_CLASS[PROP_CLASS == ' '] = np.nan
PROP_CLASS = pd.get_dummies(PROP_CLASS, prefix="PROP_CLASS")
# Existing_p
Existing_p = df.Existing_p.copy()
Existing_p[Existing_p == ' '] = np.nan
Existing_p = pd.get_dummies(Existing_p, prefix="Existing_p")
# PropertyTy
PropertyTy = df.PropertyTy.copy()
PropertyTy = pd.get_dummies(PropertyTy, prefix="PropertyTy")
# secondaryT
secondaryT = df.secondaryT.copy()
secondaryT[secondaryT == ' '] = np.nan
secondaryT = pd.get_dummies(secondaryT, prefix="secondaryT")
# LUC
LUC = df.LUC.copy()
LUC[LUC == ' '] = np.nan
LUC = pd.get_dummies(LUC, prefix="LUC")
# Taxes_Per_
Taxes_Per_ = df.Taxes_Per_.copy()
Taxes_Per_zero = (Taxes_Per_ == "0").apply(int)
Taxes_Per_zero.name = 'Taxes_Per_zero'
Taxes_Per_ = Taxes_Per_.str.replace(',','').astype(float)
Taxes_Per_ = np.log1p(Taxes_Per_)
Taxes_Per_ = Taxes_Per_ / Taxes_Per_.max()
Taxes_Per_ = pd.concat([Taxes_Per_, Taxes_Per_zero], axis=1)
# Taxes_Tota
Taxes_Tota = df.Taxes_Tota.copy()
Taxes_Tota_zero = (Taxes_Tota == "0").apply(int)
Taxes_Tota_zero.name = 'Taxes_Tota_zero'
Taxes_Tota = Taxes_Tota.str.replace(',','').astype(float)
Taxes_Tota = np.log1p(Taxes_Tota)
Taxes_Tota = Taxes_Tota / Taxes_Tota.max()
Taxes_Tota = pd.concat([Taxes_Tota, Taxes_Tota_zero], axis=1)
# TOT_APPR
TOT_APPR = df.TOT_APPR.copy()
TOT_APPR_zero = (TOT_APPR == "0").apply(int)
TOT_APPR_zero.name = 'TOT_APPR_zero'
TOT_APPR = TOT_APPR.str.replace(',','').astype(float)
TOT_APPR = np.log1p(TOT_APPR)
TOT_APPR = TOT_APPR / TOT_APPR.max()
TOT_APPR = pd.concat([TOT_APPR, TOT_APPR_zero], axis=1)
# VAL_ACRES
VAL_ACRES = df.VAL_ACRES.copy()
VAL_ACRES_zero = (VAL_ACRES == 0).apply(int)
VAL_ACRES_zero.name = 'VAL_ACRES_zero'
VAL_ACRES = np.log1p(VAL_ACRES)
VAL_ACRES = VAL_ACRES / VAL_ACRES.max()
VAL_ACRES = pd.concat([VAL_ACRES, VAL_ACRES_zero], axis=1)
# For_Sale_P
For_Sale_P = df.For_Sale_P.copy()
For_Sale_P_notNA = (For_Sale_P != " ").apply(int)
For_Sale_P_notNA.name = 'For_Sale_P_notNA'
For_Sale_P[For_Sale_P == ' '] = 0
For_Sale_P = For_Sale_P.astype(float)
For_Sale_P = np.log1p(For_Sale_P)
For_Sale_P = For_Sale_P / For_Sale_P.max()
For_Sale_P = pd.concat([For_Sale_P, For_Sale_P_notNA], axis=1)
# Last_Sale1
Last_Sale1 = df.Last_Sale1.copy()
Last_Sale1_zero = (Last_Sale1 == "0").apply(int)
Last_Sale1_zero.name = "Last_Sale1_zero"
Last_Sale1 = Last_Sale1.str.replace(',','').astype(float)
Last_Sale1 = np.log1p(Last_Sale1)
Last_Sale1 = (Last_Sale1 - Last_Sale1.min()) / (Last_Sale1.max() - Last_Sale1.min())
Last_Sale1 = pd.concat([Last_Sale1, Last_Sale1_zero], axis=1)
# yearbuilt
yearbuilt = df.yearbuilt.copy()
yearbuilt_zero = (yearbuilt == "0").apply(int)
yearbuilt_zero.name = "yearbuilt_zero"
yearbuilt[yearbuilt == "0"] = np.nan
yearbuilt = yearbuilt.str.replace(',','').astype(float)
yearbuilt = (yearbuilt - yearbuilt.min()) / (yearbuilt.max() - yearbuilt.min())
yearbuilt = yearbuilt.fillna(0)
yearbuilt = pd.concat([yearbuilt, yearbuilt_zero], axis=1)
# year_reno
year_reno = df.year_reno.copy()
reno = (year_reno != "0").apply(int)
reno.name = "reno"
year_reno[year_reno == "0"] = np.nan
year_reno = year_reno.str.replace(',','').astype(float)
year_reno = (year_reno - year_reno.min()) / (year_reno.max() - year_reno.min())
year_reno = year_reno.fillna(0)
year_reno = pd.concat([year_reno, reno], axis=1)
# Lot_Condition
Lot_Condition = df.Lot_Condition.copy()
Lot_Condition[Lot_Condition == ' '] = np.nan
Lot_Condition = pd.get_dummies(Lot_Condition, prefix="Lot_Condition")
# Structure_Condition
Structure_Condition = df.Structure_Condition.copy()
Structure_Condition[Structure_Condition == ' '] = np.nan
Structure_Condition = pd.get_dummies(Structure_Condition, prefix="Structure_Condition")
# Sidewalks
Sidewalks = df.Sidewalks.copy()
Sidewalks[Sidewalks == "YES"] = "Yes"
Sidewalks[Sidewalks == " "] = np.nan
Sidewalks = pd.get_dummies(Sidewalks, prefix="Sidewalks")
# Multiple_Violations
Multiple_Violations = df.Multiple_Violations.copy()
Multiple_Violations[Multiple_Violations == ' '] = np.nan
Multiple_Violations = pd.get_dummies(Multiple_Violations, prefix="Multiple_Violations")
# Vacancy_pc
Vacancy_pc = df.Vacancy_pc.copy()
Vacancy_pc_nonzero = (Vacancy_pc != 0).apply(int)
Vacancy_pc_nonzero.name = "Vacancy_pc_nonzero"
Vacancy_pc = Vacancy_pc / Vacancy_pc.max()
Vacancy_pc = pd.concat([Vacancy_pc, Vacancy_pc_nonzero], axis=1)
# Total_Avai
Total_Avai = df.Total_Avai.copy()
Total_Avai_nonzero = (Total_Avai != "0").apply(int)
Total_Avai_nonzero.name = "Total_Avai_nonzero"
Total_Avai = Total_Avai.str.replace(',','').astype(float)
Total_Avai = np.log1p(Total_Avai)
Total_Avai = (Total_Avai - Total_Avai.min()) / (Total_Avai.max() - Total_Avai.min())
Total_Avai = pd.concat([Total_Avai, Total_Avai_nonzero], axis=1)
# Percent_Le
Percent_Le = df.Percent_Le.copy()
Percent_Le_bin = (Percent_Le == 100).apply(int)
Percent_Le_bin[Percent_Le == 0] = -1
Percent_Le_bin.name = "Percent_Le_bin"
Percent_Le = Percent_Le / Percent_Le.max()
Percent_Le = pd.concat([Percent_Le, Percent_Le_bin], axis=1)
# LandArea_a
LandArea_a = df.LandArea_a.copy()
LandArea_a_zero = (LandArea_a == "0").apply(int)
LandArea_a_zero.name = "LandArea_a_zero"
LandArea_a = LandArea_a.str.replace(',','').astype(float)
LandArea_a = np.log1p(LandArea_a)
LandArea_a = LandArea_a / LandArea_a.max()
LandArea_a = pd.concat([LandArea_a, LandArea_a_zero], axis=1)
# totalbuild
totalbuild = df.totalbuild.copy()
totalbuild_nonzero = (totalbuild != 0).apply(int)
totalbuild_nonzero.name = "totalbuild_nonzero"
totalbuild = np.log1p(totalbuild)
totalbuild = totalbuild / totalbuild.max()
totalbuild = pd.concat([totalbuild, totalbuild_nonzero], axis=1)
# avg_sf
avg_sf = df.avg_sf.copy()
avg_sf_nonzero = (avg_sf != "0").apply(int)
avg_sf_nonzero.name = "avg_sf_nonzero"
avg_sf[avg_sf == "0"] = np.nan
avg_sf = avg_sf.str.replace(',','').astype(float)
avg_sf = (avg_sf - avg_sf.min()) / (avg_sf.max() - avg_sf.min())
avg_sf = avg_sf.fillna(0)
avg_sf = pd.concat([avg_sf, avg_sf_nonzero], axis=1)
# Floorsize
Floorsize = df.Floorsize.copy()
Floorsize[Floorsize == "0"] = np.nan
Floorsize = Floorsize.str.replace(',','').astype(float)
Floorsize = np.log1p(Floorsize)
Floorsize = (Floorsize - Floorsize.min()) / (Floorsize.max() - Floorsize.min())
Floorsize = Floorsize.fillna(Floorsize.median())
# BldgSF
BldgSF = df.BldgSF.copy()
BldgSF_zero = (BldgSF == "0").apply(int)
BldgSF_zero.name = "BldgSF_zero"
BldgSF = BldgSF.str.replace(',','').astype(float)
BldgSF = np.log1p(BldgSF)
BldgSF = BldgSF / BldgSF.max()
BldgSF = pd.concat([BldgSF, BldgSF_zero], axis=1)
# LotSize
LotSize = df.LotSize.copy()
LotSize_zero = (LotSize == "0").apply(int)
LotSize_zero.name = "LotSize_zero"
LotSize = LotSize.str.replace(',','').astype(float)
LotSize = np.log1p(LotSize)
LotSize = LotSize / LotSize.max()
LotSize = | pd.concat([LotSize, LotSize_zero], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
from pandas._testing import assert_frame_equal
from NEMPRO import planner, units
def test_start_off_with_initial_down_time_of_zero():
forward_data = pd.DataFrame({
'interval': [0, 1, 2],
'nsw-energy': [200, 200, 200]})
p = planner.DispatchPlanner(dispatch_interval=60, planning_horizon=3)
u = units.GenericUnit(p, initial_dispatch=0.0)
u.set_service_region('energy', 'nsw')
u.add_to_market_energy_flow(capacity=100.0)
u.add_primary_energy_source(capacity=100.0)
u.add_unit_minimum_operating_level(min_loading=50.0, shutdown_ramp_rate=100.0, start_up_ramp_rate=100.0,
min_up_time=60, min_down_time=120, time_in_initial_state=0)
p.add_regional_market('nsw', 'energy', forward_data)
p.optimise()
dispatch = u.get_dispatch()
expect_dispatch = pd.DataFrame({
'interval': [0, 1, 2],
'net_dispatch': [0.0, 0.0, 100.0]
})
assert_frame_equal(expect_dispatch, dispatch)
def test_start_off_with_initial_down_time_less_than_min_down_time():
forward_data = pd.DataFrame({
'interval': [0, 1, 2],
'nsw-energy': [200, 200, 200]})
p = planner.DispatchPlanner(dispatch_interval=60, planning_horizon=3)
u = units.GenericUnit(p, initial_dispatch=0.0)
u.set_service_region('energy', 'nsw')
u.add_to_market_energy_flow(100.0)
u.add_primary_energy_source(100.0)
u.add_unit_minimum_operating_level(min_loading=50.0, shutdown_ramp_rate=100.0, start_up_ramp_rate=100.0,
min_up_time=60, min_down_time=120, time_in_initial_state=60)
p.add_regional_market('nsw', 'energy', forward_data)
p.optimise()
dispatch = u.get_dispatch()
expect_dispatch = pd.DataFrame({
'interval': [0, 1, 2],
'net_dispatch': [0.0, 100.0, 100.0]
})
| assert_frame_equal(expect_dispatch, dispatch) | pandas._testing.assert_frame_equal |
import warnings
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Series,
TimedeltaIndex,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import PeriodArray
from pandas.core.arrays.categorical import CategoricalAccessor
from pandas.core.indexes.accessors import Properties
class TestCatAccessor:
@pytest.mark.parametrize(
"method",
[
lambda x: x.cat.set_categories([1, 2, 3]),
lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),
lambda x: x.cat.rename_categories([1, 2, 3]),
lambda x: x.cat.remove_unused_categories(),
lambda x: x.cat.remove_categories([2]),
lambda x: x.cat.add_categories([4]),
lambda x: x.cat.as_ordered(),
lambda x: x.cat.as_unordered(),
],
)
def test_getname_categorical_accessor(self, method):
# GH#17509
ser = Series([1, 2, 3], name="A").astype("category")
expected = "A"
result = method(ser).name
assert result == expected
def test_cat_accessor(self):
ser = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(ser.cat.categories, Index(["a", "b"]))
assert not ser.cat.ordered, False
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
return_value = ser.cat.set_categories(["b", "a"], inplace=True)
assert return_value is None
tm.assert_categorical_equal(ser.values, exp)
res = ser.cat.set_categories(["b", "a"])
tm.assert_categorical_equal(res.values, exp)
ser[:] = "a"
ser = ser.cat.remove_unused_categories()
tm.assert_index_equal(ser.cat.categories, Index(["a"]))
def test_cat_accessor_api(self):
# GH#9322
assert Series.cat is CategoricalAccessor
ser = Series(list("aabbcde")).astype("category")
assert isinstance(ser.cat, CategoricalAccessor)
invalid = Series([1])
with pytest.raises(AttributeError, match="only use .cat accessor"):
invalid.cat
assert not hasattr(invalid, "cat")
def test_cat_accessor_no_new_attributes(self):
# https://github.com/pandas-dev/pandas/issues/10673
cat = Series(list("aabbcde")).astype("category")
with pytest.raises(AttributeError, match="You cannot add any new attribute"):
cat.cat.xlabel = "a"
def test_cat_accessor_updates_on_inplace(self):
ser = Series(list("abc")).astype("category")
return_value = ser.drop(0, inplace=True)
assert return_value is None
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
return_value = ser.cat.remove_unused_categories(inplace=True)
assert return_value is None
assert len(ser.cat.categories) == 2
def test_categorical_delegations(self):
# invalid accessor
msg = r"Can only use \.cat accessor with a 'category' dtype"
with pytest.raises(AttributeError, match=msg):
Series([1, 2, 3]).cat
with pytest.raises(AttributeError, match=msg):
Series([1, 2, 3]).cat()
with pytest.raises(AttributeError, match=msg):
Series(["a", "b", "c"]).cat
with pytest.raises(AttributeError, match=msg):
Series(np.arange(5.0)).cat
with pytest.raises(AttributeError, match=msg):
Series([Timestamp("20130101")]).cat
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["a", "b", "c"])
tm.assert_index_equal(ser.cat.categories, exp_categories)
ser.cat.categories = [1, 2, 3]
exp_categories = Index([1, 2, 3])
tm.assert_index_equal(ser.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype="int8")
tm.assert_series_equal(ser.cat.codes, exp_codes)
assert ser.cat.ordered
ser = ser.cat.as_unordered()
assert not ser.cat.ordered
return_value = ser.cat.as_ordered(inplace=True)
assert return_value is None
assert ser.cat.ordered
# reorder
ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
ser = ser.cat.set_categories(["c", "b", "a"])
tm.assert_index_equal(ser.cat.categories, exp_categories)
tm.assert_numpy_array_equal(ser.values.__array__(), exp_values)
tm.assert_numpy_array_equal(ser.__array__(), exp_values)
# remove unused categories
ser = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"]))
exp_categories = Index(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)
ser = ser.cat.remove_unused_categories()
tm.assert_index_equal(ser.cat.categories, exp_categories)
tm.assert_numpy_array_equal(ser.values.__array__(), exp_values)
tm.assert_numpy_array_equal(ser.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
msg = "'Series' object has no attribute 'set_categories'"
with pytest.raises(AttributeError, match=msg):
ser.set_categories([4, 3, 2, 1])
# right: ser.cat.set_categories([4,3,2,1])
# GH#18862 (let Series.cat.rename_categories take callables)
ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
result = ser.cat.rename_categories(lambda x: x.upper())
expected = Series(
Categorical(["A", "B", "C", "A"], categories=["A", "B", "C"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_dt_accessor_api_for_categorical(self):
# https://github.com/pandas-dev/pandas/issues/10661
s_dr = Series(date_range("1/1/2015", periods=5, tz="MET"))
c_dr = s_dr.astype("category")
s_pr = Series(period_range("1/1/2015", freq="D", periods=5))
c_pr = s_pr.astype("category")
s_tdr = Series( | timedelta_range("1 days", "10 days") | pandas.timedelta_range |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
| assert_panel_equal(result, expected) | pandas.util.testing.assert_panel_equal |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
| pd.Timestamp("2015-01-01", tz="UTC") | pandas.Timestamp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.