prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("<KEY>")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.rand(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": Series([1.0, 50.0, 100.0]).astype("float32"),
"F": Series([3.0, 4.0, 5.0]).astype("float64"),
"G": False,
"H": Series([1, 200, 923442], dtype="int8"),
}
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
tm.assert_frame_equal(df.stack(level=[1, 2]), df.stack(level=1).stack(level=1))
tm.assert_frame_equal(
df.stack(level=[-2, -1]), df.stack(level=1).stack(level=1)
)
df_named = df.copy()
return_value = df_named.columns.set_names(range(3), inplace=True)
assert return_value is None
tm.assert_frame_equal(
df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1]), exp_hair_stacked, check_names=False
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0]), animal_hair_stacked, check_names=False
)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=["exp", "animal"])
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 1]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 2]), exp_hair_stacked, check_names=False
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 0]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 1]), exp_hair_stacked, check_names=False
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = DataFrame(
np.array(
[[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64
),
index=expected_mi,
columns= | Index(["a", "b"], name="third") | pandas.Index |
#dependencies
from sklearn.cross_decomposition import PLSRegression
from sklearn.model_selection import cross_validate
import pandas as pd
import numpy as np
from scipy.signal import savgol_filter
from sklearn.base import TransformerMixin, RegressorMixin, BaseEstimator
from scipy import sparse, signal
from BaselineRemoval import BaselineRemoval
from sklearn.model_selection import ShuffleSplit
from scipy.sparse.linalg import spsolve
#Import prep methods
import sklearn
from sklearn.preprocessing import StandardScaler, MinMaxScaler,MaxAbsScaler, RobustScaler
from sklearn.preprocessing import FunctionTransformer, PowerTransformer, QuantileTransformer
from sklearn.decomposition import PCA, KernelPCA
class SavgolFilter(BaseEstimator,TransformerMixin):
def __init__(self,window_length=5,polyorder=2,axis=1):
self.__name__='SavgolFilter'
self.window_length=window_length
self.polyorder=polyorder
self.axis=axis
self.output=None
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
self.output=savgol_filter(X,window_length=self.window_length,polyorder=self.polyorder,axis=self.axis)
return self.output
def fit_transform(self,X,y=None):
self.output=savgol_filter(X,window_length=self.window_length,polyorder=self.polyorder,axis=self.axis)
return self.output
class BaselineASLS(BaseEstimator,TransformerMixin):
#Asymmetric Least Squares
def __init__(self, lam=1e5, p=1e-3, niter=10):
self.__name__='BaselineAsLS'
self.lam=lam
self.p=p
self.niter=niter
self.y=None
self.output=None
def fit(self,X,y=None):
self.y=y
def transform(self,X,y=None):
y=self.y
self.output=np.apply_along_axis(lambda x: self.line_remove(x), 0, X)
return self.output
def line_remove(self,f):
L = len(f)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
z = 0
for i in range(self.niter):
W = sparse.spdiags(w, 0, L, L)
Z=W + self.lam * D.dot(D.transpose())
z = spsolve(Z, w * f)
w = self.p * (f > z) + (1 - self.p) * (f < z)
return z
def fit_transform(self,X,y=None):
self.y=y
return self.transform(X,y)
class BaselineModpoly(BaseEstimator,TransformerMixin):
def __init__(self, degree=2):
self.__name__='BaselineModPoly'
self.degree=degree
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.ModPoly(self.degree)
del MP
return X_
def fit_transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.ModPoly(self.degree)
del MP
return X_
class BaselineZhangFit(BaseEstimator,TransformerMixin):
def __init__(self, itermax=50):
self.__name__='BaselineZhangFit'
self.itermax=itermax
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.ZhangFit(itermax=self.itermax)
del MP
return X_
def fit_transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.ZhangFit(itermax=self.itermax)
del MP
return X_
class BaselineIModPoly(BaseEstimator,TransformerMixin):
def __init__(self, degree=2):
self.__name__='BaselineImprovedModPoly'
self.degree=degree
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.IModPoly(self.degree)
del MP
return X_
def fit_transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.IModPoly(self.degree)
del MP
return X_
class BaselineLinear(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='BaselineLinear'
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
return signal.detrend(X)
def fit_transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
return signal.detrend(X)
class BaselineSecondOrder(BaseEstimator,TransformerMixin):
def __init__(self,degree=2):
self.__name__='BaselineSecondOrder'
self.degree=degree
def fit(self,X,y=None):
pass
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
t=np.arange(0,X.shape[1])
X_s= X.apply(lambda x: x- np.polyval(np.polyfit(t,x,self.degree), t),axis=1)
return X_s
def transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
t=np.arange(0,X.shape[1])
X_s= X.apply(lambda x: x- np.polyval(np.polyfit(t,x,self.degree), t),axis=1)
return X_s
class MSC(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='MSC'
self.mean=None
def fit(self,X,y=None):
self.mean= np.array(X.mean(axis=0))
def transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
#self.mean= np.array(X.mean(axis=0))
def transformMSC(x,mean):
m,b= np.polyfit(mean,x,1)
return (x-b)*m
return X.apply(transformMSC,args=(self.mean,),axis=1).values
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.mean= np.array(X.mean(axis=0))
def transformMSC(x,mean):
m,b= np.polyfit(mean,x,1)
return (x-b)*m
return X.apply(transformMSC,args=(self.mean,),axis=1).values
class FirstDerivative(BaseEstimator,TransformerMixin):
def __init__(self,d=2):
self.__name__='First Derivative'
self.d=d
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
X_=X.diff(self.d,axis=1)
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
return X_
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
X_=X.diff(self.d,axis=1)
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
return X_
# TO DO:
#Piecewise MSC (PMSC)
#Extended MSC (2nd order), Inverse MSC, EIMSC
#Weighted MSC, Loopy MSC (LMSC)
#Norris-Williams
#WhittakerSmooth
class SecondDerivative(BaseEstimator,TransformerMixin):
def __init__(self,d=2):
self.__name__='Second Derivative'
self.d=d
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
X_=X.diff(self.d,axis=1)
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
X_=X_.diff(self.d,axis=1) #second dev
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
return X_
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
X_=X.diff(self.d,axis=1)
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
X_=X_.diff(self.d,axis=1) #second dev
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
return X_
class SNV(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='SNV'
self.mean=None
self.std=None
def fit(self,X):
try:
X=pd.DataFrame(X)
except:
pass
self.mean=X.mean(axis=0)
self.std=X.std(axis=0)
def transform(self,X, y=None):
try:
X=pd.DataFrame(X)
except:
pass
X=X.T
R=(X.subtract(self.mean,axis=0)).divide(self.std+np.finfo(float).eps,axis=0)
return R.T
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.fit(X)
return self.transform(X)
class RNV(BaseEstimator,TransformerMixin):
def __init__(self,q=0.1):
self.__name__='RNV'
self.q=q
self.quantile=None
self.std=None
def fit(self,X):
try:
X=pd.DataFrame(X)
except:
pass
X=X.T
self.quantile=X.quantile(q=self.q,axis=1)
self.std=X.quantile(q=self.q,axis=1).std()
def transform(self,X, y=None):
try:
X=pd.DataFrame(X)
except:
pass
X=X.T
R=(X.subtract(self.quantile,axis=0))/(self.std+np.finfo(float).eps)
return R.T
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.fit(X)
return self.transform(X)
class MeanScaling(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='MeanScaling'
self.mean=0
def fit(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.mean=X.mean(axis=0)
def transform(self,X, y=None):
try:
X=pd.DataFrame(X)
except:
pass
return pd.DataFrame(np.divide(np.asarray(X),np.asarray(self.mean)))
def fit_transform(self,X,y=None):
self.fit(X)
return self.transform(X)
class MedianScaling(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='MedianScaling'
self.median=0
def fit(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.median=X.median(axis=0)
def transform(self,X, y=None):
try:
X=pd.DataFrame(X)
except:
pass
return pd.DataFrame(np.divide(np.asarray(X),np.asarray(self.median)))
def fit_transform(self,X,y=None):
self.fit(X)
return self.transform(X)
class MaxScaling(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='MaxScaling'
self.max=0
def fit(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.max=X.max(axis=0)
def transform(self,X, y=None):
try:
X=pd.DataFrame(X)
except:
pass
return pd.DataFrame(np.divide(np.asarray(X),np.asarray(self.max)))
def fit_transform(self,X,y=None):
self.fit(X)
return self.transform(X)
class MeanCentering(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='MeanCentering'
self.mean=0
def fit(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.mean=X.mean(axis=0)
def transform(self,X, y=None):
try:
X= | pd.DataFrame(X) | pandas.DataFrame |
## Bu bölümde Frekans , Tfidf , Rowsum , VectorNorm hesapları yapılmaktadır.
import pandas as pd
import numpy as np
from TurkishStemmer import TurkishStemmer ##elasticSearch
from math import sqrt
kok = TurkishStemmer()
count = []
vectorNorm = []
class getFrequency:
def __init__(self,spor,saglik,magazin,ekonomi):
self.spor = spor
self.saglik = saglik
self.magazin = magazin
self.ekonomi = ekonomi
count.clear()
## parça parça gelen dataframler tek tek frekansı hesaplanıp birleştiriliyor.
def get(self,features):
self.features = features
data = self.update(self.ekonomi,self.features)
ekonomiFreq = pd.DataFrame(data=data,columns=features)
ekonomiFreq["nameOfTxt"] = self.ekonomi.iloc[:,0]
ekonomiFreq["classOfTxt"] = "ekonomi"
data = self.update(self.saglik,self.features)
saglikFreq = pd.DataFrame(data=data,columns=features)
saglikFreq["nameOfTxt"] = self.saglik.iloc[:,0]
saglikFreq["classOfTxt"] = "saglik"
data = self.update(self.spor,self.features)
sporFreq = pd.DataFrame(data=data,columns=features)
sporFreq["nameOfTxt"] = self.spor.iloc[:,0]
sporFreq["classOfTxt"] = "spor"
data = self.update(self.magazin,self.features)
magazinFreq = | pd.DataFrame(data=data,columns=features) | pandas.DataFrame |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities functions to manipulate the data in the colab."""
import datetime
import itertools
import operator
from typing import List, Optional
import dataclasses
import numpy as np
import pandas as pd
import pandas.io.formats.style as style
from scipy import stats
from trimmed_match.design import common_classes
TimeWindow = common_classes.TimeWindow
FormatOptions = common_classes.FormatOptions
_operator_functions = {'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne}
_inverse_op = {'<': '>', '<=': '>=', '>': '<', '>=': '<=', '=': '!='}
@dataclasses.dataclass
class CalculateMinDetectableIroas:
"""Class for the calculation of the minimum detectable iROAS.
Hypothesis testing for H0: iROAS=0 vs H1: iROAS>=min_detectable_iroas based
on one sample X which follows a normal distribution with mean iROAS (unknown)
and standard deviation rmse (known).
Typical usage example:
calc_min_detectable_iroas = CalculateMinDetectableIroas(0.1, 0.9)
min_detectable_iroas = calc_min_detectable_iroas.at(2.0)
"""
# chance of rejecting H0 incorrectly when H0 holds.
significance_level: float = 0.1
# chance of rejecting H0 correctly when H1 holds.
power_level: float = 0.9
# minimum detectable iroas at rmse=1.
rmse_multiplier: float = dataclasses.field(init=False)
def __post_init__(self):
"""Calculates rmse_multiplier.
Raises:
ValueError: if significance_level or power_level is not in (0, 1).
"""
if self.significance_level <= 0 or self.significance_level >= 1.0:
raise ValueError('significance_level must be in (0, 1), but got '
f'{self.significance_level}.')
if self.power_level <= 0 or self.power_level >= 1.0:
raise ValueError('power_level must be in (0, 1), but got '
f'{self.power_level}.')
self.rmse_multiplier = (
stats.norm.ppf(self.power_level) +
stats.norm.ppf(1 - self.significance_level))
def at(self, rmse: float) -> float:
"""Calculates min_detectable_iroas at the specified rmse."""
return rmse * self.rmse_multiplier
def find_days_to_exclude(
dates_to_exclude: List[str]) -> List[TimeWindow]:
"""Returns a list of time windows to exclude from a list of days and weeks.
Args:
dates_to_exclude: a List of strings with format indicating a single day as
'2020/01/01' (YYYY/MM/DD) or an entire time period as
'2020/01/01 - 2020/02/01' (indicating start and end date of the time period)
Returns:
days_exclude: a List of TimeWindows obtained from the list in input.
"""
days_exclude = []
for x in dates_to_exclude:
tmp = x.split('-')
if len(tmp) == 1:
try:
days_exclude.append(
TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[0])))
except ValueError:
raise ValueError(f'Cannot convert the string {tmp[0]} to a valid date.')
elif len(tmp) == 2:
try:
days_exclude.append(
TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[1])))
except ValueError:
raise ValueError(
f'Cannot convert the strings in {tmp} to a valid date.')
else:
raise ValueError(f'The input {tmp} cannot be interpreted as a single' +
' day or a time window')
return days_exclude
def expand_time_windows(periods: List[TimeWindow]) -> List[pd.Timestamp]:
"""Return a list of days to exclude from a list of TimeWindows.
Args:
periods: List of time windows (first day, last day).
Returns:
days_exclude: a List of obtained by expanding the list in input.
"""
days_exclude = []
for window in periods:
days_exclude += pd.date_range(window.first_day, window.last_day, freq='D')
return list(set(days_exclude))
def overlap_percent(dates_left: List['datetime.datetime'],
dates_right: List['datetime.datetime']) -> float:
"""Find the size of the intersections of two arrays, relative to the first array.
Args:
dates_left: List of datetime.datetime
dates_right: List of datetime.datetime
Returns:
percentage: the percentage of elements of dates_right that also appear in
dates_left
"""
intersection = np.intersect1d(dates_left, dates_right)
percentage = 100 * len(intersection) / len(dates_right)
return percentage
def check_time_periods(geox_data: pd.DataFrame,
start_date_eval: pd.Timestamp,
start_date_aa_test: pd.Timestamp,
experiment_duration_weeks: int,
frequency: str) -> bool:
"""Checks that the geox_data contains the data for the two periods.
Check that the geox_data contains all observations during the evaluation and
AA test periods to guarantee that the experiment lasts exactly a certain
number of days/weeks, depending on the frequency of the data (daily/weekly).
Args:
geox_data: pd.Dataframe with at least the columns (date, geo).
start_date_eval: start date of the evaluation period.
start_date_aa_test: start date of the aa test period.
experiment_duration_weeks: int, length of the experiment in weeks.
frequency: str indicating the frequency of the time series. It should be one
of 'infer', 'D', 'W'.
Returns:
bool: a bool, True if the time periods specified pass all the checks
Raises:
ValueError: if part of the evaluation or AA test period are shorter than
experiment_duration (either weeks or days).
"""
if frequency not in ['infer', 'D', 'W']:
raise ValueError(
f'frequency should be one of ["infer", "D", "W"], got {frequency}')
if frequency == 'infer':
tmp = geox_data.copy().set_index(['date', 'geo'])
frequency = infer_frequency(tmp, 'date', 'geo')
if frequency == 'W':
frequency = '7D'
number_of_observations = experiment_duration_weeks
else:
number_of_observations = 7 * experiment_duration_weeks
freq_str = 'weeks' if frequency == '7D' else 'days'
missing_eval = find_missing_dates(geox_data, start_date_eval,
experiment_duration_weeks,
number_of_observations, frequency)
if missing_eval:
raise ValueError(
(f'The evaluation period contains the following {freq_str} ' +
f'{missing_eval} for which we do not have data.'))
missing_aa_test = find_missing_dates(geox_data, start_date_aa_test,
experiment_duration_weeks,
number_of_observations, frequency)
if missing_aa_test:
raise ValueError((f'The AA test period contains the following {freq_str} ' +
f'{missing_aa_test} for which we do not have data.'))
return True
def find_missing_dates(geox_data: pd.DataFrame, start_date: pd.Timestamp,
period_duration_weeks: int,
number_of_observations: int,
frequency: str) -> List[str]:
"""Find missing observations in a time period.
Args:
geox_data: pd.Dataframe with at least the columns (date, geo).
start_date: start date of the evaluation period.
period_duration_weeks: int, length of the period in weeks.
number_of_observations: expected number of time points.
frequency: str or pd.DateOffset indicating the frequency of the time series.
Returns:
missing: a list of strings, containing the dates for which data are missing
in geox_data.
"""
days = datetime.timedelta(days=7 * period_duration_weeks - 1)
period_dates = ((geox_data['date'] >= start_date) &
(geox_data['date'] <= start_date + days))
days_in_period = geox_data.loc[
period_dates, 'date'].drop_duplicates().dt.strftime('%Y-%m-%d').to_list()
missing = np.array([])
if len(days_in_period) != number_of_observations:
expected_observations = list(
pd.date_range(start_date, start_date + days,
freq=frequency).strftime('%Y-%m-%d'))
missing = set(expected_observations) - set(days_in_period)
return sorted(missing)
def infer_frequency(data: pd.DataFrame, date_index: str,
series_index: str) -> str:
"""Infers frequency of data from pd.DataFrame with multiple indices.
Infers frequency of data from pd.DataFrame with two indices, one for the slice
name and one for the date-time.
Example:
df = pd.Dataframe{'date': [2020-10-10, 2020-10-11], 'geo': [1, 1],
'response': [10, 20]}
df.set_index(['geo', 'date'], inplace=True)
infer_frequency(df, 'date', 'geo')
Args:
data: a pd.DataFrame for which frequency needs to be inferred.
date_index: string containing the name of the time index.
series_index: string containing the name of the series index.
Returns:
A str, either 'D' or 'W' indicating the most likely frequency inferred
from the data.
Raises:
ValueError: if it is not possible to infer frequency of sampling from the
provided pd.DataFrame.
"""
data = data.sort_values(by=[date_index, series_index])
# Infer most likely frequence for each series_index
series_names = data.index.get_level_values(series_index).unique().tolist()
series_frequencies = []
for series in series_names:
observed_times = data.iloc[data.index.get_level_values(series_index) ==
series].index.get_level_values(date_index)
n_steps = len(observed_times)
if n_steps > 1:
time_diffs = (
observed_times[1:n_steps] -
observed_times[0:(n_steps - 1)]).astype('timedelta64[D]').values
modal_frequency, _ = np.unique(time_diffs, return_counts=True)
series_frequencies.append(modal_frequency[0])
if not series_frequencies:
raise ValueError(
'At least one series with more than one observation must be provided.')
if series_frequencies.count(series_frequencies[0]) != len(series_frequencies):
raise ValueError(
'The provided time series seem to have irregular frequencies.')
try:
frequency = {
1: 'D',
7: 'W'
}[series_frequencies[0]]
except KeyError:
raise ValueError('Frequency could not be identified. Got %d days.' %
series_frequencies[0])
return frequency
def human_readable_number(number: float) -> str:
"""Print a large number in a readable format.
Return a readable format for a number, e.g. 123 milions becomes 123M.
Args:
number: a float to be printed in human readable format.
Returns:
readable_number: a string containing the formatted number.
"""
number = float('{:.3g}'.format(number))
magnitude = 0
while abs(number) >= 1000 and magnitude < 4:
magnitude += 1
number /= 1000.0
readable_number = '{}{}'.format('{:f}'.format(number).rstrip('0').rstrip('.'),
['', 'K', 'M', 'B', 'tn'][magnitude])
return readable_number
def change_background_row(df: pd.DataFrame, value: float, operation: str,
column: str):
"""Colors a row of a table based on the expression in input.
Color a row in:
- orange if the value of the column satisfies the expression in input
- beige if the value of the column satisfies the inverse expression in input
- green otherwise
For example, if the column has values [1, 2, 3] and we pass 'value' equal to
2, and operation '>', then
- 1 is marked in beige (1 < 2, which is the inverse expression)
- 2 is marked in green (it's not > and it's not <)
- 3 is marked in orange(3 > 2, which is the expression)
Args:
df: the table of which we want to change the background color.
value: term of comparison to be used in the expression.
operation: a string to define which operator to use, e.g. '>' or '='. For a
full list check _operator_functions.
column: name of the column to be used for the comparison
Returns:
pd.Series
"""
if _operator_functions[operation](float(df[column]), value):
return pd.Series('background-color: orange', df.index)
elif _operator_functions[_inverse_op[operation]](float(df[column]), value):
return pd.Series('background-color: beige', df.index)
else:
return pd.Series('background-color: lightgreen', df.index)
def flag_percentage_value(val, value: float, operation: str):
"""Colors a cell in red if its value satisfy the expression in input.
Colors a cell in red if the expression is true for that cell, e.g. if the
value of the cell is 10, 'value' in input is 5 and operation is '>', then we
will color the cell in red as 10 > 5.
Args:
val: value in a cell of a dataframe.
value: term of comparison used to decide the color of the cell.
operation: a string to define which operator to use, e.g. '>' or '='. For a
full list check _operator_functions.
Returns:
a str defining the color coding of the cell.
"""
if _operator_functions[operation](float(val.strip(' %')), value):
color = 'red'
else:
color = 'black'
return 'color: %s' % color
def create_output_table(results: pd.DataFrame,
total_response: float,
total_spend: float,
geo_treatment: pd.DataFrame,
budgets_for_design: List[float],
average_order_value: float,
num_geos: int,
confidence_level: float = 0.9,
power_level: float = 0.8) -> pd.DataFrame:
"""Creates the table with the output designs.
Args:
results: table with columns (num_pairs_filtered,
experiment_response, experiment_spend, spend_response_ratio, budget,
iroas, rmse, proportion_cost_in_experiment) containing the generated
design, e.g. the first output of the
function TrimmedMatchGeoXDesign.report_candidate_design.
total_response: total response for all geos (excluded as well) during the
evaluation period.
total_spend: total spend for all geos (excluded as well) during the
evaluation period.
geo_treatment: table with columns (geo, response, spend, pair) containing the
treatment geos and their overall response and spend during the evaluation
period.
budgets_for_design: list of budgets to be considered for the designs.
average_order_value: factor used to change scale from conversion count to
conversion value.
num_geos: number of geos available.
confidence_level: confidence level for the test H0: iROAS=0
vs H1: iROAS>=minimum_detectable_iroas.
power_level: level used for the power analysis.
Returns:
a pd.DataFrame with the output designs.
"""
calc_min_detectable_iroas = CalculateMinDetectableIroas(
1 - confidence_level, power_level)
designs = []
for budget in budgets_for_design:
tmp_result = results[results['budget'] == budget]
chosen_design = tmp_result.loc[tmp_result['rmse_cost_adjusted'].idxmin()]
baseline = geo_treatment.loc[
geo_treatment['pair'] > chosen_design['num_pairs_filtered'],
'response'].sum()
cost_in_experiment = geo_treatment.loc[
geo_treatment['pair'] > chosen_design['num_pairs_filtered'],
'spend'].sum()
min_detectable_iroas_raw = calc_min_detectable_iroas.at(
chosen_design['rmse'])
min_detectable_iroas = average_order_value * min_detectable_iroas_raw
min_detectable_lift = budget * 100 * min_detectable_iroas_raw / baseline
num_removed_geos = int(2 * chosen_design['num_pairs_filtered'])
num_geo_pairs = int((num_geos - num_removed_geos) / 2)
treat_control_removed = (f'{num_geo_pairs} / {num_geo_pairs} / ' +
f'{num_removed_geos}')
revenue_covered = 100 * baseline / total_response
proportion_cost_in_experiment = cost_in_experiment / total_spend
national_budget = human_readable_number(
budget / proportion_cost_in_experiment)
designs.append({
'Budget': human_readable_number(budget),
'Minimum detectable iROAS': f'{min_detectable_iroas:.3}',
'Minimum detectable lift in response': f'{min_detectable_lift:.2f} %',
'Treatment/control/excluded geos': treat_control_removed,
'Revenue covered by treatment group': f'{revenue_covered:.2f} %',
'Cost/baseline response': f'{(budget / baseline * 100):.2f} %',
'Cost if test budget is scaled nationally': national_budget
})
designs = pd.DataFrame(designs)
designs.index.rename('Design', inplace=True)
return designs
def format_table(
df: pd.DataFrame,
formatting_options: List[FormatOptions]) -> style.Styler:
"""Formats a table with the output designs.
Args:
df: a table to be formatted.
formatting_options: a dictionary indicating for each column (key) what
formatting function to be used and its additional args, e.g.
formatting_options =
{'column_1': {'function': fnc, 'args': {'input1': 1, 'input2': 2}}}
Returns:
a pandas.io.formats.style.Styler with the table formatted.
"""
for ind in range(len(formatting_options)):
tmp_options = formatting_options[ind]
if ind == 0:
# if axis is in the args, then the function should be applied on rows/cols
if 'axis' in tmp_options.args:
formatted_table = df.style.apply(tmp_options.function,
**tmp_options.args)
# apply the formatting elementwise
else:
formatted_table = df.style.applymap(tmp_options.function,
**tmp_options.args)
else:
# if axis is in the args, then the function should be applied on rows/cols
if 'axis' in tmp_options.args:
formatted_table = formatted_table.apply(tmp_options.function,
**tmp_options.args)
# apply the formatting elementwise
else:
formatted_table = formatted_table.applymap(tmp_options.function,
**tmp_options.args)
return formatted_table
def format_design_table(designs: pd.DataFrame,
minimum_detectable_iroas: float,
minimum_lift_in_response_metric: float = 10.0,
minimum_revenue_covered_by_treatment: float = 5.0):
"""Formats a table with the output designs.
Args:
designs: table with columns (Budget, Minimum detectable iROAS,
Minimum Detectable lift in response, Treatment/control/excluded geos,
Revenue covered by treatment group, Cost/baseline response,
Cost if test budget is scaled nationally) containing the output designs,
e.g. the output of the function create_output_table.
minimum_detectable_iroas: target minimum detectable iROAS used to define
the optimality of a design.
minimum_lift_in_response_metric: threshold minimum detectable lift
in percentage used to flag designs with higher detectable lift.
minimum_revenue_covered_by_treatment: value used to flag any design where the
treatment group is too small based on response.
Returns:
a pandas.io.formats.style.Styler with the table formatted.
"""
formatting_options = [
FormatOptions(
column='Minimum detectable lift in response',
function=flag_percentage_value,
args={
'value': minimum_lift_in_response_metric,
'operation': '>'
}),
FormatOptions(
column='Revenue covered by treatment group',
function=flag_percentage_value,
args={
'value': minimum_revenue_covered_by_treatment,
'operation': '<'
}),
FormatOptions(
column='Minimum detectable iROAS',
function=change_background_row,
args={
'value': minimum_detectable_iroas,
'operation': '>',
'axis': 1
})
]
return format_table(designs, formatting_options)
def check_input_data(
data: pd.DataFrame,
numeric_columns_to_impute: Optional[List[str]] = None) -> pd.DataFrame:
"""Returns data to be analysed using Trimmed Match with data imputation.
Args:
data: data frame with columns (date, geo) and any column specified in
numeric_columns_to_impute, which should contain at least the columns with
response and spend information if they have a different name than
'response' and 'cost', respectively.
numeric_columns_to_impute: list of columns for which data imputation must be
performed.
Returns:
data frame with columns (date, geo, response, cost) and imputed missing
data.
Raises:
ValueError: if one of the mandatory columns is missing.
"""
numeric_columns_to_impute = numeric_columns_to_impute or ['response', 'cost']
mandatory_columns = set(['date', 'geo'] + numeric_columns_to_impute)
if not mandatory_columns.issubset(data.columns):
raise ValueError('The mandatory columns ' +
f'{mandatory_columns - set(data.columns)} are missing ' +
'from the input data.')
data['date'] = | pd.to_datetime(data['date']) | pandas.to_datetime |
from binance.client import Client
import pandas as pd
from utils import configure_logging
from multiprocessing import Process, freeze_support, Pool, cpu_count
import os
try:
from credentials import API_KEY, API_SECRET
except ImportError:
API_KEY = API_SECRET = None
exit("CAN'T RUN SCRIPT WITHOUT BINANCE API KEY/SECRET")
log = configure_logging()
class FuturesDataPuller(Process):
SYMBOLS = ['BTCUSDT', 'ETHUSDT', 'LTCUSDT', 'LINKUSDT']
# 'BTCUSDT', 'XRPUSDT', 'SXPUSDT', 'ADAUSDT', 'EOSUSDT', 'DOTUSDT', 'VETUSDT', 'ETHUSDT', 'LTCUSDT', 'LINKUSDT'
KLINE_INTERVALS = ['1m', '3m', '5m', '15m', '30m', '1h', '2h', '4h', '6h', '8h', '12h', '1d', '3d', '1w', '1M']
def __init__(self, client, symbol, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = client
self.symbol = symbol
def run(self):
klines = self.get_klines(
interval='1m',
start_date="15 Feb 2021 00:00:00",
end_date="19 Feb 2021 12:00:00",
)
funding_rates = self.get_funding_rate(klines=klines)
df = self.reformat_data(klines=klines, funding_rates=funding_rates)
self.to_csv(df=df)
def get_klines(self, interval, start_date, end_date):
"""
:param interval: str, one of the supported intervals from KLINES_INTERVALS list
:param start_date: str, format 'DD MMM YYYY HH:mm:ss'
:param end_date: str
:return: list of lists with klines[Open_time: int,
Open: Decimal,
High: Decimal,
Low: Decimal,
Close: Decimal,
Volume: Decimal,
Close_time: int,
Quote asset volume: Decimal,
Number of trades: int,
Taker buy base asset volume: Decimal,
Taker buy quote asset volume: Decimal,
Ignore: Decimal]
"""
try:
data = self.client.get_historical_klines(
symbol=self.symbol,
interval=interval,
start_str=start_date,
end_str=end_date,
)
except Exception as exc:
log.exception(exc)
return {'msg': exc}
return data
def get_funding_rate(self, klines: list):
"""
Uses first and last kline time to get funding rates for that period
:param klines: trade klines
:return: list of dicts(symbol=(str), fundingTime=(int), fundingRate=(Decimal))
"""
start, bypass_limit, end = klines[0][0], klines[int(len(klines)/2)][0], klines[len(klines) - 1][0]
try:
data = self.client.futures_funding_rate(
symbol=self.symbol,
startTime=start,
endTime=bypass_limit,
)
data_2 = self.client.futures_funding_rate(
symbol=self.symbol,
startTime=bypass_limit,
endTime=end,
)
except Exception as exc:
log.exception(exc)
return {'msg': exc}
for instance in data_2:
data.append(instance)
return data
def to_csv(self, df):
"""
:param df: pd.DataFrame obj.
:return: .csv file with data
"""
file_directory = 'data'
file_full_path = os.path.join(file_directory, f'{self.symbol}.csv')
if not os.path.exists(file_directory):
os.makedirs(name=file_directory)
df.to_csv(path_or_buf=file_full_path, sep=',')
@staticmethod
def reformat_data(klines, funding_rates):
"""
:return: pd.DataFrame obj. with required_data
"""
df = pd.DataFrame.from_records(klines)
df = df.drop(range(5, 12), axis=1)
col_names = ['time', 'open', 'high', 'low', 'close']
df.columns = col_names
for col in col_names:
df[col] = df[col].astype(float)
df['date'] = | pd.to_datetime(df['time'] * 1000000, format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
#%%
import os
import sys
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
# %%
import sys
sys.path.append('/Volumes/GoogleDrive/My Drive/python_code/maggot_models/')
sys.path.append('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
from pymaid_creds import url, name, password, token
import pymaid
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from graspy.plot import gridplot, heatmap
from graspy.utils import binarize, pass_to_ranks
from src.data import load_metagraph
from src.visualization import CLASS_COLOR_DICT, adjplot
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams.update({'font.size': 6})
rm = pymaid.CatmaidInstance(url, token, name, password)
mg = load_metagraph("Gad", version="2020-06-10", path = '/Volumes/GoogleDrive/My Drive/python_code/maggot_models/data/processed/')
mg.calculate_degrees(inplace=True)
adj = pd.read_csv('VNC_interaction/data/brA1_axon-dendrite.csv', header = 0, index_col = 0)
adj.columns = adj.columns.astype(int) #convert column names to int for easier indexing
A1 = pymaid.get_skids_by_annotation('mw A1 neurons paired')
A1_sens = [val for sublist in list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw A1 sensories').name)) for val in sublist]
brain_only = np.setdiff1d(adj.index.values, A1 + A1_sens)
adj = adj.loc[brain_only, brain_only]
#adj = mg.adj # adjacency matrix from the "mg" object
# pull skids of different output types
output_order = [1, 0, 2]
output_names = pymaid.get_annotated('mw brain outputs').name
output_skids_list = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain outputs').name))
output_skids = [val for sublist in output_skids_list for val in sublist]
output_names_reordered = [output_names[i] for i in output_order]
output_skids_list_reordered = [output_skids_list[i] for i in output_order]
def skid_to_index(skid, mg):
index_match = np.where(mg.meta.index == skid)[0]
if(len(index_match)==1):
return(index_match[0])
if(len(index_match)!=1):
print('Not one match for skid %i!' %skid)
# convert skids to indices
output_indices_list = []
for skids in output_skids_list_reordered:
indices = []
for skid in skids:
index = skid_to_index(skid, mg)
indices.append(index)
output_indices_list.append(indices)
# %%
# identify pre-descending types
# defined as 50% outputs to a particular descending type
# doesn't work well (no neurons with high fraction output to descending neurons)
# ** is this normal? perhaps test with random sample of neuron same size as descendings; started at bottom
dVNC_mat = pd.DataFrame(adj[:, output_indices_list[0]], index = mg.meta.index, columns = output_skids_list_reordered[0])
dSEZ_mat = pd.DataFrame(adj[:, output_indices_list[1]], index = mg.meta.index, columns = output_skids_list_reordered[1])
RG_mat = pd.DataFrame(adj[:, output_indices_list[2]], index = mg.meta.index, columns = output_skids_list_reordered[2])
# number of outputs per neuron
# how many go to output types
outputs = pd.read_csv('data/mw_brain_matrix_skeleton_measurements.csv', header=0, index_col=0)
dVNC_mat_sum = dVNC_mat.sum(axis = 1)
dSEZ_mat_sum = dSEZ_mat.sum(axis = 1)
RG_mat_sum = RG_mat.sum(axis = 1)
fraction_output_dVNC = []
fraction_output_dSEZ = []
fraction_output_RG = []
# determine fraction of output from each brain neuron to each brain output type
for i in range(0, len(dVNC_mat_sum)):
output_total = outputs[outputs.Skeleton == dVNC_mat_sum.index[i]]['N outputs'].values[0]
if(output_total > 0):
fraction_output_dVNC.append(dVNC_mat_sum.iloc[i]/output_total)
fraction_output_dSEZ.append(dSEZ_mat_sum.iloc[i]/output_total)
fraction_output_RG.append(RG_mat_sum.iloc[i]/output_total)
if(output_total == 0):
fraction_output_dVNC.append(0)
fraction_output_dSEZ.append(0)
fraction_output_RG.append(0)
# convert to np arrays
fraction_output_dVNC = np.array(fraction_output_dVNC)
fraction_output_dSEZ = np.array(fraction_output_dSEZ)
fraction_output_RG = np.array(fraction_output_RG)
fig, ax = plt.subplots(1,1,figsize=(5,5))
sns.distplot(fraction_output_dVNC[fraction_output_dVNC>0], hist = False, ax = ax)
sns.distplot(fraction_output_dSEZ[fraction_output_dSEZ>0], hist = False, ax = ax)
sns.distplot(fraction_output_RG[fraction_output_RG>0], hist = False, ax = ax)
ax.set(xlabel = 'Fraction output to dVNC(blue), dSEZ(orange), or RG(green)')
ax.get_yaxis().set_visible(False)
plt.savefig('cascades/feedback_through_brain/plots/output_fraction_to_descendings.pdf', format='pdf', bbox_inches='tight')
'''
import numpy.random as random
rand_mat_list = []
for i in range(0, 2):
random.seed(i)
random_indices = random.choice(len(dVNC_mat_sum), 183, replace = False)
rand_mat = pd.DataFrame(adj[:, random_indices], index = mg.meta.index, columns = random_indices)
rand_mat_sum = rand_mat.sum(axis = 1)
fraction_rand_mat = []
for j in range(0, len(rand_mat_sum)):
output_total = outputs[outputs.Skeleton == rand_mat_sum.index[j]]['N outputs'].values[0]
if(output_total > 0):
fraction_rand_mat.append(rand_mat_sum.iloc[j]/output_total)
if(output_total == 0):
fraction_rand_mat.append(0)
rand_mat_list.append(np.array(fraction_rand_mat))
'''
# %%
# identify pre-descending types
# contributing 5% input to a particular descending neuron
# old method, using the new one below
from connectome_tools.process_matrix import Promat
def skid_to_index(skid, mg):
index_match = np.where(mg.meta.index == skid)[0]
if(len(index_match)==1):
return(index_match[0])
if(len(index_match)!=1):
print('Not one match for skid %i!' %skid)
def index_to_skid(index, mg):
return(mg.meta.iloc[index, :].name)
# import pairs
pairs = pd.read_csv('VNC_interaction/data/pairs-2020-10-26.csv', header = 0)
dVNC_pairs = Promat.extract_pairs_from_list(output_skids_list_reordered[0], pairs)
dSEZ_pairs = Promat.extract_pairs_from_list(output_skids_list_reordered[1], pairs)
RG_pairs = Promat.extract_pairs_from_list(output_skids_list_reordered[2], pairs)
brain_skids_pairs = Promat.extract_pairs_from_list(mg.meta.index, pairs)
# left_right interlaced order for dVNC pairs
dVNC_pair_order = []
for i in range(0, len(dVNC_pairs)):
dVNC_pair_order.append(dVNC_pairs.iloc[i].leftid)
dVNC_pair_order.append(dVNC_pairs.iloc[i].rightid)
dSEZ_pair_order = []
for i in range(0, len(dSEZ_pairs)):
dSEZ_pair_order.append(dSEZ_pairs.iloc[i].leftid)
dSEZ_pair_order.append(dSEZ_pairs.iloc[i].rightid)
RG_pair_order = []
for i in range(0, len(RG_pairs)):
RG_pair_order.append(RG_pairs.iloc[i].leftid)
RG_pair_order.append(RG_pairs.iloc[i].rightid)
# left_right interlaced order for brain matrix
brain_pair_order = []
for i in range(0, len(brain_skids_pairs)):
brain_pair_order.append(brain_skids_pairs.iloc[i].leftid)
brain_pair_order.append(brain_skids_pairs.iloc[i].rightid)
interlaced_dVNC_mat = dVNC_mat.loc[brain_pair_order, dVNC_pair_order]
interlaced_dSEZ_mat = dSEZ_mat.loc[brain_pair_order, dSEZ_pair_order]
interlaced_RG_mat = RG_mat.loc[brain_pair_order, RG_pair_order]
# convert to %input of descendings' dendrite
for column in interlaced_dVNC_mat.columns:
dendrite_input = mg.meta.loc[column].dendrite_input
interlaced_dVNC_mat.loc[:, column] = interlaced_dVNC_mat.loc[:, column]/dendrite_input
for column in interlaced_dSEZ_mat.columns:
dendrite_input = mg.meta.loc[column].dendrite_input
interlaced_dSEZ_mat.loc[:, column] = interlaced_dSEZ_mat.loc[:, column]/dendrite_input
for column in interlaced_RG_mat.columns:
dendrite_input = mg.meta.loc[column].dendrite_input
interlaced_RG_mat.loc[:, column] = interlaced_RG_mat.loc[:, column]/dendrite_input
# summed input onto descending between pairs
oddCols_dVNC = np.arange(0, len(interlaced_dVNC_mat.columns), 2)
oddCols_dSEZ = np.arange(0, len(interlaced_dSEZ_mat.columns), 2)
oddCols_RG = np.arange(0, len(interlaced_RG_mat.columns), 2)
oddRows = np.arange(0, len(interlaced_dVNC_mat.index), 2)
# initializing summed matrices for each descending type
sumMat_dVNC = np.zeros(shape=(len(oddRows),len(oddCols_dVNC)))
sumMat_dVNC = pd.DataFrame(sumMat_dVNC, columns = interlaced_dVNC_mat.columns[oddCols_dVNC], index = interlaced_dVNC_mat.index[oddRows])
sumMat_dSEZ = np.zeros(shape=(len(oddRows),len(oddCols_dSEZ)))
sumMat_dSEZ = pd.DataFrame(sumMat_dVNC, columns = interlaced_dSEZ_mat.columns[oddCols_dSEZ], index = interlaced_dSEZ_mat.index[oddRows])
sumMat_RG = np.zeros(shape=(len(oddRows),len(oddCols_RG)))
sumMat_RG = pd.DataFrame(sumMat_RG, columns = interlaced_RG_mat.columns[oddCols_RG], index = interlaced_RG_mat.index[oddRows])
for i_iter, i in enumerate(oddRows):
for j_iter, j in enumerate(oddCols_dVNC):
summed_pairs = interlaced_dVNC_mat.iat[i, j] + interlaced_dVNC_mat.iat[i+1, j+1] + interlaced_dVNC_mat.iat[i+1, j] + interlaced_dVNC_mat.iat[i, j+1]
sumMat_dVNC.iat[i_iter, j_iter] = summed_pairs/2
for i_iter, i in enumerate(oddRows):
for j_iter, j in enumerate(oddCols_dSEZ):
summed_pairs = interlaced_dSEZ_mat.iat[i, j] + interlaced_dSEZ_mat.iat[i+1, j+1] + interlaced_dSEZ_mat.iat[i+1, j] + interlaced_dSEZ_mat.iat[i, j+1]
sumMat_dSEZ.iat[i_iter, j_iter] = summed_pairs/2
for i_iter, i in enumerate(oddRows):
for j_iter, j in enumerate(oddCols_RG):
summed_pairs = interlaced_RG_mat.iat[i, j] + interlaced_RG_mat.iat[i+1, j+1] + interlaced_RG_mat.iat[i+1, j] + interlaced_RG_mat.iat[i, j+1]
sumMat_RG.iat[i_iter, j_iter] = summed_pairs/2
# %%
# identifying pre-descendings based on an individual pair of neurons contributing 1% input to descending neuron
from connectome_tools.process_matrix import Adjacency_matrix, Promat
from datetime import date
threshold = 0.01
inputs = pd.read_csv('VNC_interaction/data/brA1_input_counts.csv', index_col = 0)
inputs = pd.DataFrame(inputs.values, index = inputs.index, columns = ['axon_input', 'dendrite_input'])
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
dSEZ = pymaid.get_skids_by_annotation('mw dSEZ')
RGN = pymaid.get_skids_by_annotation('mw RGN')
brain_adj = Adjacency_matrix(adj, adj.index, pairs, inputs,'axo-dendritic')
pre_dVNC, pre_dVNC_edges = brain_adj.upstream(dVNC, threshold, exclude = dVNC)
_, pre_dVNC = brain_adj.edge_threshold(pre_dVNC_edges, threshold, direction='upstream')
# compare to other cell types
MBON = pymaid.get_skids_by_annotation('mw MBON')
MBIN = pymaid.get_skids_by_annotation('mw MBIN')
LHN = pymaid.get_skids_by_annotation('mw LHN')
CN = pymaid.get_skids_by_annotation('mw CN')
KC = pymaid.get_skids_by_annotation('mw KC')
dSEZ = pymaid.get_skids_by_annotation('mw dSEZ')
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
uPN = pymaid.get_skids_by_annotation('mw uPN')
tPN = pymaid.get_skids_by_annotation('mw tPN')
vPN = pymaid.get_skids_by_annotation('mw vPN')
mPN = pymaid.get_skids_by_annotation('mw mPN')
PN = uPN + tPN + vPN + mPN
FBN = pymaid.get_skids_by_annotation('mw FBN')
FB2N = pymaid.get_skids_by_annotation('mw FB2N')
FBN_all = FBN + FB2N
CN = list(np.setdiff1d(CN, LHN + FBN_all)) # 'CN' means exclusive CNs that are not FBN or LHN
pre_dVNC2 = list(np.setdiff1d(pre_dVNC, MBON + MBIN + LHN + CN + KC + dSEZ + dVNC + PN + FBN_all)) # 'pre_dVNC' must have no other category assignment
#pymaid.add_annotations(pre_dVNC, 'mw pre-dVNC 1%')
pre_dSEZ, pre_dSEZ_edges = brain_adj.upstream(dSEZ, threshold, exclude = dSEZ)
_, pre_dSEZ = brain_adj.edge_threshold(pre_dSEZ_edges, threshold, direction='upstream')
#pymaid.add_annotations(pre_dSEZ, 'mw pre-dSEZ 1%')
pre_RGN, pre_RGN_edges = brain_adj.upstream(RGN, threshold, exclude = RGN)
_, pre_RGN = brain_adj.edge_threshold(pre_RGN_edges, threshold, direction='upstream')
#pymaid.add_annotations(pre_RGN, 'mw pre-RGN 1%')
# %%
# plotting number of connections to and from descendings
### *****align/bin issues here***** ####
### see upstream_MNs.py for solution ###
fig, axs = plt.subplots(
3, 2, figsize=(2.5, 3)
)
fig.tight_layout(pad = 2.5)
threshold = 0.05 # average 5% input threshold
binwidth = 1
x_range = list(range(0, 11))
align = 'left'
ax = axs[0, 0]
count_per_us_neuron = (sumMat_dVNC.values>threshold).sum(axis=1)
data = count_per_us_neuron[count_per_us_neuron>0]
ax.hist(data, bins=range(min(data), max(data) + binwidth, binwidth), align = align)
ax.set_ylabel('Neuron pairs')
ax.set_xlabel('Connection(s) to dVNCs')
ax.set_xticks(x_range)
ax.set(xlim = (0.5, 10))
ax = axs[0, 1]
count_per_descending = (sumMat_dVNC.values>threshold).sum(axis=0)
data = count_per_descending
ax.hist(data, bins=range(min(data), max(data) + binwidth, binwidth), align = align)
ax.set_ylabel('dVNC pairs')
ax.set_xlabel('Connection(s) received')
ax.set_xticks(x_range)
ax.set(xlim = (-0.5, 7))
ax = axs[1, 0]
count_per_us_neuron = (sumMat_dSEZ.values>threshold).sum(axis=1)
data = count_per_us_neuron[count_per_us_neuron>0]
ax.hist(data, bins=range(min(data), max(data) + binwidth, binwidth), align = align)
ax.set_ylabel('Neuron pairs')
ax.set_xlabel('Connection(s) to dSEZs')
ax.set_xticks(x_range)
ax.set(xlim = (0.5, 10))
ax = axs[1, 1]
count_per_descending = (sumMat_dSEZ.values>threshold).sum(axis=0)
data = count_per_descending
ax.hist(data, bins=range(min(data), max(data) + binwidth, binwidth), align = align)
ax.set_ylabel('dSEZ pairs')
ax.set_xlabel('Connection(s) received')
ax.set_xticks(x_range)
ax.set(xlim = (-0.5, 7))
ax = axs[2, 0]
count_per_us_neuron = (sumMat_RG.values>threshold).sum(axis=1)
data = count_per_us_neuron[count_per_us_neuron>0]
ax.hist(data, bins=range(min(data), max(data) + binwidth, binwidth), align = align)
ax.set_ylabel('Neuron pairs')
ax.set_xlabel('Connection(s) to RGNs')
ax.set_xticks(x_range)
ax.set(xlim = (0.5, 10))
ax = axs[2, 1]
count_per_descending = (sumMat_RG.values>threshold).sum(axis=0)
data = count_per_descending
ax.hist(data, bins=range(min(data), max(data) + binwidth, binwidth), align = align)
ax.set_ylabel('RGN pairs')
ax.set_xlabel('Connection(s) received')
ax.set_xticks(x_range)
ax.set(xlim = (-0.5, 7))
plt.savefig('cascades/feedback_through_brain/plots/connections_from_to_descendings_5percent_threshold.pdf', bbox_inches='tight', transparent = True)
# %%
# export pre-descending neurons
pre_dVNC = (sumMat_dVNC.values>threshold).sum(axis=1)>0
pre_dSEZ = (sumMat_dSEZ.values>threshold).sum(axis=1)>0
pre_RGN = (sumMat_RG.values>threshold).sum(axis=1)>0
# identify indices == True, i.e. left skids of pairs that are pre-descendings
indices_pre_dVNC = np.where(pre_dVNC)[0]
indices_pre_dSEZ = np.where(pre_dSEZ)[0]
indices_pre_RGN = np.where(pre_RGN)[0]
pre_dVNC_skidleft = [sumMat_dVNC.index[x] for x in indices_pre_dVNC]
pre_dSEZ_skidleft = [sumMat_dSEZ.index[x] for x in indices_pre_dSEZ]
pre_RGN_skidleft = [sumMat_RG.index[x] for x in indices_pre_RGN]
# select pre-descending pair skids
brain_skids_pairs.index = brain_skids_pairs.leftid
pre_dVNC_skids = brain_skids_pairs.loc[pre_dVNC_skidleft, :]
pre_dSEZ_skids = brain_skids_pairs.loc[pre_dSEZ_skidleft, :]
pre_RGN_skids = brain_skids_pairs.loc[pre_RGN_skidleft, :]
#pre_dVNC_skids.to_csv('cascades/feedback_through_brain/plots/pre_dVNC_skids.csv', index = False)
#pre_dSEZ_skids.to_csv('cascades/feedback_through_brain/plots/pre_dSEZ_skids.csv', index = False)
#pre_RGN_skids.to_csv('cascades/feedback_through_brain/plots/pre_RGN_skids.csv', index = False)
# %%
# plot connectivity matrices of pre-output to output
from tqdm import tqdm
pre_dVNC_dSEZ_RGN = list(np.intersect1d(np.intersect1d(pre_dVNC_skidleft, pre_dSEZ_skidleft), pre_RGN_skidleft))
pre_dVNC_dSEZ = list(np.setdiff1d(np.intersect1d(pre_dVNC_skidleft, pre_dSEZ_skidleft), pre_dVNC_dSEZ_RGN))
pre_dVNC_RGN = list(np.setdiff1d(np.intersect1d(pre_dVNC_skidleft, pre_RGN_skidleft), pre_dVNC_dSEZ_RGN))
pre_dSEZ_RGN = list(np.setdiff1d(np.intersect1d(pre_dSEZ_skidleft, pre_RGN_skidleft), pre_dVNC_dSEZ_RGN))
combos = pre_dVNC_dSEZ_RGN + pre_dVNC_dSEZ + pre_dVNC_RGN + pre_dSEZ_RGN
pre_dVNC = list(np.setdiff1d(pre_dVNC_skidleft, combos))
pre_dSEZ = list(np.setdiff1d(pre_dSEZ_skidleft, combos))
pre_RGN = list(np.setdiff1d(pre_RGN_skidleft, combos))
output_mat = pd.concat([sumMat_dVNC, sumMat_dSEZ, sumMat_RG], axis = 1)
plt.savefig('cascades/feedback_through_brain/plots/preoutput_to_output.pdf')
# full interlaced adj matrix, summed pairs
# FUTURE: add colored bars to side of matrix to indicate cell type
interlaced_mat = pd.DataFrame(adj, index = mg.meta.index, columns = mg.meta.index)
interlaced_mat = interlaced_mat.loc[brain_pair_order, brain_pair_order]
# convert to %input
for column in interlaced_mat.columns:
dendrite_input = mg.meta.loc[column].dendrite_input
if(dendrite_input>0):
interlaced_mat.loc[:, column] = interlaced_mat.loc[:, column]/dendrite_input
if(dendrite_input==0):
interlaced_mat.loc[:, column] = 0
oddRows = np.arange(0, len(interlaced_mat.index), 2)
oddCols = np.arange(0, len(interlaced_mat.columns), 2)
# summing partners
sumMat = np.zeros(shape=(len(oddRows),len(oddCols)))
sumMat = | pd.DataFrame(sumMat, columns = interlaced_mat.columns[oddCols], index = interlaced_mat.index[oddRows]) | pandas.DataFrame |
import numpy as np
import scipy.sparse
import pandas as pd
import logging
import rpy2.robjects as ro
import rpy2.rinterface_lib.callbacks
import anndata2ri
import scanpy as sc
from scIB.utils import checkAdata, checkBatch
from .utils import diffusion_conn, diffusion_nn
rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR) # Ignore R warning messages
def kBET_single(
matrix,
batch,
type_=None,
k0=10,
knn=None,
subsample=0.5,
heuristic=True,
verbose=False
):
"""
params:
matrix: expression matrix (at the moment: a PCA matrix, so do.pca is set to FALSE
batch: series or list of batch assignemnts
subsample: fraction to be subsampled. No subsampling if `subsample=None`
returns:
kBET observed rejection rate
"""
ro.r("library(kBET)")
if verbose:
print("importing expression matrix")
ro.globalenv['data_mtrx'] = matrix
ro.globalenv['batch'] = batch
# print(matrix.shape)
# print(len(batch))
if verbose:
print("kBET estimation")
# k0 = len(batch) if len(batch) < 50 else 'NULL'
anndata2ri.activate()
ro.globalenv['knn_graph'] = knn
ro.globalenv['k0'] = k0
ro.r(
"batch.estimate <- kBET("
" data_mtrx,"
" batch,"
" knn=knn_graph,"
" k0=k0,"
" plot=FALSE,"
" do.pca=FALSE,"
" heuristic=FALSE,"
" adapt=FALSE,"
f" verbose={str(verbose).upper()}"
")"
)
anndata2ri.deactivate()
try:
ro.r("batch.estimate$summary$kBET.observed")[0]
except rpy2.rinterface_lib.embedded.RRuntimeError:
return np.nan
else:
return ro.r("batch.estimate$summary$kBET.observed")[0]
def kBET(
adata,
batch_key,
label_key,
embed='X_pca',
type_=None,
hvg=False,
subsample=0.5, #non-functional
heuristic=False,
verbose=False
):
"""
Compare the effect before and after integration
params:
matrix: matrix from adata to calculate on
return:
pd.DataFrame with kBET observed rejection rates per cluster for batch
"""
kBET_scores = {'cluster': [], 'kBET': []}
try:
ro.r("library(kBET)")
except rpy2.rinterface_lib.embedded.RRuntimeError as e:
print(e)
print("Couldn't compute kBET, returning NaN")
return pd.DataFrame.from_dict(kBET_scores)
checkAdata(adata)
checkBatch(batch_key, adata.obs)
checkBatch(label_key, adata.obs)
# compute connectivities for non-knn type data integrations
# and increase neighborhoods for knn type data integrations
if type_ != 'knn':
adata_tmp = sc.pp.neighbors(adata, n_neighbors=50, use_rep=embed, copy=True)
else:
# check if pre-computed neighbours are stored in input file
adata_tmp = adata.copy()
if 'diffusion_connectivities' not in adata.uns['neighbors']:
if verbose:
print(f"Compute: Diffusion neighbours.")
adata_tmp = diffusion_conn(adata, min_k=50, copy=True)
adata_tmp.obsp['connectivities'] = adata_tmp.uns['neighbors']['diffusion_connectivities']
if verbose:
print(f"batch: {batch_key}")
# set upper bound for k0
size_max = 2 ** 31 - 1
#prepare call of kBET per cluster
for clus in adata_tmp.obs[label_key].unique():
adata_sub = adata_tmp[adata_tmp.obs[label_key] == clus, :].copy()
# check if neighborhood size too small or only one batch in subset
if np.logical_or(adata_sub.n_obs < 10,
len(adata_sub.obs[batch_key].cat.categories) == 1):
print(f"{clus} consists of a single batch or is too small. Skip.")
score = np.nan
else:
quarter_mean = np.floor(np.mean(adata_sub.obs[batch_key].value_counts()) / 4).astype('int')
k0 = np.min([70, np.max([10, quarter_mean])])
# check k0 for reasonability
if (k0 * adata_sub.n_obs) >= size_max:
k0 = np.floor(size_max / adata_sub.n_obs).astype('int')
matrix = np.zeros(shape=(adata_sub.n_obs, k0 + 1))
if verbose:
print(f"Use {k0} nearest neighbors.")
n_comp, labs = scipy.sparse.csgraph.connected_components(adata_sub.obsp['connectivities'],
connection='strong')
if n_comp > 1:
# check the number of components where kBET can be computed upon
comp_size = pd.value_counts(labs)
# check which components are small
comp_size_thresh = 3 * k0
idx_nonan = np.flatnonzero(np.in1d(labs,
comp_size[comp_size >= comp_size_thresh].index))
# check if 75% of all cells can be used for kBET run
if len(idx_nonan) / len(labs) >= 0.75:
# create another subset of components, assume they are not visited in a diffusion process
adata_sub_sub = adata_sub[idx_nonan, :].copy()
nn_index_tmp = np.empty(shape=(adata_sub.n_obs, k0))
nn_index_tmp[:] = np.nan
nn_index_tmp[idx_nonan] = diffusion_nn(adata_sub_sub, k=k0).astype('float')
#call kBET
score = kBET_single(
matrix=matrix,
batch=adata_sub.obs[batch_key],
knn=nn_index_tmp + 1, # nn_index in python is 0-based and 1-based in R
subsample=subsample,
verbose=verbose,
heuristic=False,
k0=k0,
type_=type_
)
else:
# if there are too many too small connected components, set kBET score to 1
# (i.e. 100% rejection)
score = 1
else: # a single component to compute kBET on
nn_index_tmp = diffusion_nn(adata_sub, k=k0).astype('float')
#call kBET
score = kBET_single(
matrix=matrix,
batch=adata_sub.obs[batch_key],
knn=nn_index_tmp + 1, # nn_index in python is 0-based and 1-based in R
subsample=subsample,
verbose=verbose,
heuristic=False,
k0=k0,
type_=type_
)
kBET_scores['cluster'].append(clus)
kBET_scores['kBET'].append(score)
kBET_scores = | pd.DataFrame.from_dict(kBET_scores) | pandas.DataFrame.from_dict |
## 1. Count the level of gauges
## 2. Sout the gauges by level
## 3. Automatic bias correction from upstreams to downstreams at monthly scale
## 4. Take each gauge delta Q added to downstreams
## 5. Bias correction for the ungauged river at monthly scale
## 6. Bias scale mapping at daily scle
## Input: fast_connectivity1.csv; Qout_61_18_daily.nc, Qmon.nc, Qbc_61_18_daily.nc, Qbc_month.nc
## gauge_id.csv; Q_obs.csv.
## Output: revised Qbc_month.nc and Qbc_61_18_daily.nc
## writed by <NAME> 2022-05-05
import os
import pandas as pd
import xarray as xr
import numpy as np
from UDSBC.Q_pre import Qmonth
from UDSBC.Rivers import fast_connectivity,revise,upstream,downstream
from UDSBC.util import filter_nan
from UDSBC.BC import EQM,SDM
############### input_output_file ###############
river_shp = './input/liao.shp'
basin_id = './input/basin_id.csv'
rapid_Qout = './input/Qout.nc'
gauge_file = './input/gauge_id.csv'
obs_file = './input/Q_obs.csv'
river_connect = './output/fast_connectivity.csv'
river_connect1 = './output/fast_connectivity1.csv'
Q_daily_file = './output/Qout_61_18_daily.nc'
Q_month_file = './output/Qmon.nc'
bc_daily_file = './output/Qbc_61_18_daily.nc'
bc_monthly_file = './output/Qbc_month.nc'
############### creat_Qfile ###############
Qmonth.creat_Q(basin_id,rapid_Qout,Q_daily_file,Q_month_file)
sysComm1 = "cdo monmean ./output/Qout_61_18_daily.nc ./output/Qmon.nc"
os.system(sysComm1)
############### creat_Qbcfile ###############
sysComm2 = "cp ./output/Qout_61_18_daily.nc ./output/Qbc_61_18_daily.nc"
sysComm3 = "cp ./output/Qmon.nc ./output/Qbc_month.nc"
os.system(sysComm2)
os.system(sysComm3)
############### River_connet ###############
fast_connectivity.connectivity(river_shp,river_connect)
revise.river_revise(river_connect,river_connect1)
############### BC producer ##############
Qout_m = xr.open_dataset(Q_month_file).load()['qout'] ##original
Rivers = xr.open_dataset(Q_month_file).load()['rivers']
dates = xr.open_dataset(Q_month_file).load()['time']
Qbc_m = xr.open_dataset(Q_month_file).load()['qout'] ##need to BC
print('**** Read gauges file *****')
gauges = | pd.read_csv(gauge_file) | pandas.read_csv |
#!/usr/bin/env python3
import sys, math, gzip
import numpy as np
import pandas as pd
from time import time
# calculate Wen/Stephens shrinkage LD estimate
gmapfile = sys.argv[1] # genetic map
indfile = sys.argv[2] #list of individuals
# NE = 11418.0
NE = float(sys.argv[3])
# CUTOFF = 1e-7
CUTOFF = float(sys.argv[4])
outfile = sys.argv[5] # outfile file
inds = pd.read_table(indfile, header=None, squeeze=True).to_list()
haps= list()
theta = 0
nind = len(inds)
s = 0
for i in range(1, 2*nind):
s = s+ 1.0/float(i)
nind = float(nind)
s = 1/s
#print "s", s
theta = s/(2.0*float(nind)+s)
print(theta)
pos2gpos = pd.read_table(gmapfile, index_col=0, header=None, sep=" ", squeeze=True)
# pos2gpos = pos2gpos.to_dict()
print(list(pos2gpos.items())[:5])
print(len(pos2gpos))
df = pd.read_table(sys.stdin, header=None)
allpos = df.pop(0) # .tolist()
allrs = df.pop(1) # .tolist()
haps = df.astype(np.int8)
pos2gpos = pos2gpos[allpos]
records = []
len_g1 = float(haps.shape[1])
ee_const = NE * 4.0 / (2.0 * nind)
for i in range(len(allpos)):
# if i == 1:
# raise Exception("Hi")
# print("-----")
# print("i", i, i/len(allpos), len(allpos))
# print(time())
pos1 = allpos[i]
# print(pos1)
gpos1 = pos2gpos[pos1]
# print(" gpos1", gpos1)
toofar = False
j = i
# print(" len(allpos)", len(allpos))
# print("j, len(allpos)")
# print(j, len(allpos))
while j < len(allpos) and toofar == False:
# whole_start = time()
# print(" i", i, "j", j)
pos2 = allpos[j]
gpos2 = pos2gpos[pos2]
# print(" gpos2", gpos2)
df = gpos2-gpos1
# print(" df", df)
# print(" NE", NE)
# print(" inds", len(inds))
ee = math.exp( - df * ee_const)
# print(" ee", ee)
# print(" CUTOFF", CUTOFF)
if ee < CUTOFF:
toofar = True
j = j+1
continue
g1 = haps.iloc[i]
# print(" g1", g1)
g2 = haps.iloc[j]
haps_compare = pd.concat([g1, g2], axis=1)
haps_compare.columns = [0, 1]
haps_compare = haps_compare.groupby([0, 1]).size().astype(float).to_dict()
n11 = haps_compare.get((1, 1), 0)
n10 = haps_compare.get((1, 0), 0)
n01 = haps_compare.get((0, 1), 0)
# end = time()
# print("took", end - start)
# print(" g1", g1)
# for k in range(len(g1)):
# # print(" k", k)
# if g1[k] == "1" and g2[k] == "1":
# n11 = n11+1
# elif g1[k] == "0" and g2[k] == "1":
# n01 = n01 +1
# elif g1[k] == "1" and g2[k] == "0":
# n10 = n10 +1
f11 = n11/len_g1
f1 = (n11+n10)/len_g1
f2 = (n11+n01)/len_g1
D = f11 - f1*f2
Ds = D*ee
Ds2 = (1-theta)*(1-theta)*Ds
# whole_end = time()
# print("whole took", whole_end - whole_start)
if math.fabs(Ds2) < CUTOFF:
j = j+1
continue
if i == j:
Ds2 = Ds2 + (theta/2.0)*(1-theta/2.0)
result = (allrs[i], allrs[j], pos1, pos2, gpos1, gpos2, D, Ds2)
print(result)
records.append(result)
# print(" j", j)
j = j+1
df = | pd.DataFrame.from_records(records) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 17 19:51:21 2018
@author: Bob
"""
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import DBSCAN
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
from sqlalchemy import create_engine
from config import config
import pandas as pd
import numpy as np
import unidecode
import psycopg2
import re
import click
from tqdm import tqdm
from mpproj.routefinder.StyleInformation import *
def MPAnalyzer():
'''Finishes cleaning routes using formulas that require information about
the whole database.
The Bayesian rating system, route clustering algorithm and calculation of
TFIDF values require information about all routes, and not just one that is
of interest. Therefore, this file must be run after all data collection
has finished. This function is a handler for six functions:
- bayesian_rating: Calculates the weighted quality rating for each
route
- route_clusters: Groups routes together based on geographic distance
- idf: Calculates inverse-document-frequency for words in the route
descriptions
- tfidf: Calclates term-frequency-inverse-document-frequency for words
in route descriptions
- normalize: Normalizes vectors for TFIDF values
- find_route_styles: Compares routes to the ideal to help categorize
Returns:
Updated SQL Database
'''
print('Connecting to the PostgreSQL database...', end='')
engine = create_engine(
'postgresql+psycopg2://postgres:postgres@localhost:5432/routes')
params = config.config()
conn = psycopg2.connect(**params)
cursor = conn.cursor()
print('Connected')
tqdm.pandas()
def tfidf(min_occur=0.001, max_occur=0.9):
''' Calculates Term-Frequency-Inverse-Document-Frequency for a body of
documents.
Term-Frequency-Inverse-Document-Frequency(TFIDF) is a measure of the
importance of words in a body of work measured by how well they help to
distinguish documents. Words that appear frequently in documents score
high on the Term-Frequency metric, but if they are common across the
corpus, they will have low Inverse-Document-Frequency scores. TFIDF
can then be used to compare documents to each other, or, in this case,
to documents with known topics.
TFIDF = TF * IDF
TF = Term Frequency
IDF = Inverse Document Frequency
Args:
min_occur(int): The minimum number of documents that a word has to
appear in to be counted. Included to ignore words that only
appear in a few documents, and are therefore not very useful
for categorization.
max_occur(int): The maximum number of documents that a word can
appear in to be counted. This is included to ignore highly
common words that don't help with categorization.
Returns:
routes(pandas Dataframe): Holds route-document information,
including term-frequency, inverse-document-frequency, TFIDF,
and normalized TFIDF values
Updated SQL Database: Updates the TFIDF table on main DB with the
routes dataframe
'''
print('Getting number of routes', end=' ', flush=True)
cursor.execute('SELECT COUNT(route_id) FROM Routes')
num_docs = cursor.fetchone()[0]
print(num_docs)
print('Getting route text data', flush=True)
min_occur *= num_docs
max_occur *= num_docs
query = 'SELECT route_id, word, tf FROM Words'
routes = pd.read_sql(query, con=conn, index_col='route_id')
print('Removing non-essential words.', flush=True)
routes = routes.groupby('word', group_keys=False)
routes = routes.progress_apply(
weed_out,
min_occur=min_occur,
max_occur=max_occur)\
.set_index('route_id')
print('Getting IDF', flush=True)
routes = routes.groupby('word', group_keys=False)
routes = routes.progress_apply(
idf,
num_docs=num_docs).set_index('route_id')
print('Calculating TFIDF', flush=True)
routes['tfidf'] = routes['tf'] * routes['idf']
print('Normalizing TFIDF values', flush=True)
routes = routes.groupby(routes.index, group_keys=False)
routes = routes.progress_apply(lambda x: normalize('tfidf', table=x))
print('Writing TFIDF scores to SQL', flush=True)
routes = routes.set_index('route_id')
routes = routes[['word', 'idf', 'tfidfn']]
# This will take a long time
routes.to_sql('TFIDF', con=engine, if_exists='replace', chunksize=1000)
def weed_out(table, min_occur, max_occur):
'''Removes words that are too common or too rare
Args:
table(Series): Instances of a word
min_occur: Fewest number acceptable
max_occur: Greatest number acceptable
Returns:
table: updated series'''
if min_occur < len(table) < max_occur:
return table.reset_index()
def idf(word, num_docs):
''' Finds inverse document frequency for each word in the selected
corpus.
Inverse document frequency(IDF) is a measure of how often a word
appears in a body of documents. The value is calculated by:
IDF = 1 + log(N / dfj)
N = Total number of documents in the corpus
dfj = Document frequency of a certain word, i.e., the number of
documents that the word appears in.
Args:
word(pandas dataframe): A dataframe composed of all instances of a
word in a corpus.
num_docs(int): The total number of documents in the corpus
Returns:
word(pandas dataframe): The same document with the calculated IDF
score appended.
'''
word['idf'] = 1 + np.log(num_docs / len(word))
return word.reset_index()
def normalize(*columns, table, inplace=False):
''' Normalizes vector length.
Vector values must be normalized to a unit vector to control for
differences in length. This process is done by calculating the length
of a vector and dividing each term by that value. The resulting
'unit-vector' will have a length of 1.
Args:
table(pandas dataframe): Table hosting vector to be normalized
*columns(str): Names of columns to be normalized
inplace(Boolean, default = False):
If inplace=False, adds new columns with normalized values.
If inplace=True, replaces the columns.
Returns:
table(pandas dataframe): Updated dataframe with normalized values.
'''
for column in columns:
if not inplace:
column_name = column + 'n'
elif inplace:
column_name = column
length = np.sqrt(np.sum(table[column] ** 2))
table[column_name] = table[column] / length
return table.reset_index()
def fill_null_loc():
"""Fills empty route location data.
Not all routes have latitude and longitude coordinates, so we must use
the coordinates of their parent area instead as a rough estimate. This
function first grabs all routes with no data, then fills in the data
with the lowest level area it can, going up as many areas as needed
until it finds one with proper coordinates.
Returns:
Updated SQL Database
"""
print('Filling in empty locations', flush=True)
# Select a route without location data
cursor.execute('''
SELECT route_id, area_id, name FROM Routes
WHERE latitude is Null OR longitude is Null
LIMIT 1''')
route = cursor.fetchone()
while route is not None:
# Route ID
rid = route[0]
# From ID
fid = route[1]
name = route[2]
print(f'Finding location information for {name}')
# Loops until it finds proper data
lat, long = None, None
while lat == None or long == None:
# Gets latitude and longitude from parent area
cursor.execute(f'''
SELECT
latitude,
longitude,
from_id
FROM Areas
WHERE id = {fid}
LIMIT 1''')
loc = cursor.fetchone()
lat, long = loc[0], loc[1]
fid = loc[2]
# Updates DB
cursor.execute(f'''
UPDATE Routes
SET
latitude = {lat},
longitude = {long}
WHERE route_id = {rid}''')
conn.commit()
cursor.execute('''
SELECT
route_id,
area_id,
name
FROM Routes
WHERE
latitude is Null
OR longitude is Null
LIMIT 1''')
route = cursor.fetchone()
def route_clusters(routes):
''' Clusters routes into area groups that are close enough to travel
between when finding climbing areas.
Routes can be sorted into any number of sub-areas below the 'region'
parent. By clustering the routes based on latitude and longitude
instead of the name of the areas and parent areas, the sorting
algorithm will be able to more accurately determine which routes are
close together. This function uses SciKit's Density Based Scan
clustering algorithm. The algorithm works by grouping points together
in space based on upper-limits of distance and minimum numbers of
members of a cluster. More generally, the algorithm first finds the
epsilon neighborhood of a point. This is the set of all points whose
distance from a given point is less than a specified value epsilon.
Then, it finds the connected core-points, which are the points that
have at least the minimum number of connected points in its
neighborhood. Non-core points are ignored here. Finally, the
algorithm assigns each non-core point to a nearby cluster if is within
epsilon, or assigns it to noise if it is not.
The advantages of this is that the scan clusters data of any shape, has
a robust response to outliers and noise, and that the epsilon and min
points variables can be adjusted.
This function returns the label/name for the cluster that a route
appears in, as well as the number of other routes in that same cluster.
This will allow the sorting algorithm to more heavily weight routes
that are clustered near others.
Args:
routes(pandas df): Pulled from cleaned route SQL DB with columns:
- route_id (int, unique): Unique route identifies
- latitude (float)
- longitude (float)
Returns:
routes(pandas df): Updated with clustered area group number:
- route_id (int, unique): Unique route identifies
- area_group (int): Cluster id
'''
# Route location
lats = routes['latitude']
longs = routes['longitude']
locs = []
for x in range(len(lats)):
locs.append((lats.iloc[x], longs.iloc[x]))
# Converted into df
locs = StandardScaler().fit_transform(locs)
# Max distance in latitude
epsilon = 0.0007
# Min number of routes in a cluster
min_routes = 3
# Distance baced scan
db = DBSCAN(eps=epsilon, min_samples=min_routes).fit(locs)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
# Cluster names
labels = db.labels_
unique, counts = np.unique(labels, return_counts=True)
counts = dict(zip(unique, counts))
# Number of routes in the same cluster as a given route
area_counts = []
for label in labels:
if label >= 0:
# Counts number of routes
area_counts.append(counts[label])
# Areas are given a cluster id of -1 if the are not part of a
# cluster
elif label == -1:
# If so, there is only 1 route in their 'cluster'
area_counts.append(1)
routes['area_group'] = labels
routes['area_counts'] = area_counts
routes = routes[['area_group', 'area_counts']]
return routes
def bayesian_rating(routes):
''' Updates route quality with weighted average.
The Bayesian average rating system helps to mitigate the effects of
user ratings for routes that only have a few reviews. The weighted
rating works by first finding the average rating for all routes, and
using that to bring low-rated routes up and high-rated routes down.
The result - the Bayes rating - is an updated rating weighted by the
average number of stars across all routes. The weight decreases
according to the number of votes cast.
Bayesian rating = (r * v) + (a * 10) / (v + 10)
r = Route rating
v = Number of votes
a = Average rating across all routes
Essentially, the function gives each route phantom-users who all give
the route the average score. For routes with a high number of ratings
the effect of the additional phantom users is minimal, but for routes
with only one or two actual user ratings, the effect is large. This
keeps 4-star rated routes from dominating the sorting algorithm if they
only have a few votes, and helps promote unrated routes that may be of
high quality.
Args:
routes(pandas df): Pulled from cleaned route SQL DB with columns:
- route_id (int, unique): Unique route identifiers
- stars (float): Raw average rating
- votes (int): Number of user ratings
Returns:
routes(pandas df): Updated dataframe with Bayes rating and columns:
- route_id (int, unique): Unique route identifies
- bayes (float): Weighted average rating
'''
# Average rating of all routes
stars = pd.read_sql('SELECT stars FROM Routes', con=conn)
avg_stars = np.mean(stars)['stars']
# Weighted Bayesian rating
routes['bayes'] = round((((routes['votes'] * routes['stars'])
+ avg_stars * 10) / (routes['votes'] + 10)), 1)
return routes['bayes'].to_frame()
def find_route_styles(*styles, path='Descriptions/'):
''' Returns weighted scores that represent a route's likelihood of
containing any of a series of features, e.g., a roof, arete, or crack.
Route names, descriptions, and user comments can indicate the presence
of rock and route features. Term-Frequency-Inverse-Document-Frequency
(TFIDF) values for the blocks of text gathered for each route can be
compared to 'archetypal' routes to glean insight into these features.
This comparison is further clarified using Bayesian statistics to
measure the credibility of the comparision, and is then adjusted to
reflect that. At present, each route is compared against archetypal
routes with the following features:
Aretes - A sharp vertical edge of a block, cliff or boulder
Chimney - A large vertical crack that a climber can fit in and
climb using opposing pressure
Crack - Smaller cracks ranging from finger-sized to a few inches
wide (off-width)
Slab - Low-angle rock faces (less than vertical)
Overhang - Roofs, caves or more-than-vertical rock faces
More styles or archetypes can be added in the future by creating .txt
files and adding them to the 'Descriptions' sub-folder, then adding the
style to the *styles argument.
Args:
*styles(str): The name of the files that each route will be
compared against.
path(str): Folder location of the Database
Returns:
Updated SQL Database with weighted route scores
'''
def text_splitter(text):
'''Splits text into words and removes punctuation.
Once the text has been scraped it must be split into individual
words for further processing. The text is all put in lowercase,
then stripped of punctuation and accented letters. Tokenizing helps
to further standardize the text, then converts it to a list of
words. Each word is then stemmed using a Porter stemmer. This
removes suffixes that make similar words look different, turning,
for example, 'walking' or 'walked' into 'walk'. Stop words are
also filtered out at this stage.
Args:
text(str): Single string of text to be handled
Returns:
text(list): List of processed words.'''
# Converts to lowercase
text = text.lower()
# Strips punctuation and converts accented characters to unaccented
text = re.sub(r"[^\w\s]", '', text)
text = unidecode.unidecode(text)
# Tokenizes words and returns a list
text = word_tokenize(text)
# Remove stopwords
stop_words = set(stopwords.words('english'))
# Stems each word in the list
ps = PorterStemmer()
text = [ps.stem(word) for word in text if word not in stop_words]
return text
def archetypal_tf(*styles, path):
''' Returns term-frequency data for descriptions of archetypal
climbing routes and styles. This will be used later to categorize
routes.
Term-Frequency = t / L
t = Number of appearances for a word in a document
L = Number of total words in the document
Args:
*styles(str): Name of .txt file to parse. Can either be the
plain name or have the .txt suffix
path(str): Path to folder with route descriptions
Returns:
tf.csv(CSV File): CSV File of term frequency for each style.
This will help determine if TF values are what is expected
when adding new styles.
archetypes(Pandas Dataframe): Holds words term-frequency values
for words in the files.'''
# Initializes Dataframe
archetypes = pd.DataFrame()
for style in styles:
# Formats suffix
if style.endswith('.txt'):
# Opens .txt file
try:
file = open(path + style)
style = style[:-4]
# Returns errors
except OSError as e:
return e
else:
try:
file = open(path + style + '.txt')
except OSError as e:
return e
# Creates single block of text
text = ''
for line in file:
text += line
# Splits and processes text
text = text_splitter(text)
# Length of document in words
length = len(text)
# Counts appearances of each word
text = | pd.DataFrame({'word': text}) | pandas.DataFrame |
# Copyright © 2019 <NAME>
"""
Test for the ``preprocess._aggregate_columns._difference`` module.
"""
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import unittest
# Tests for:
from ...clean_variables import VariableCleaner
class PreprocessConstantDifferenceTests(unittest.TestCase):
"""
Tests for the ``preprocess._aggregate_columns._difference`` module. Assert final data frames match expectations.
"""
@staticmethod
def test_clean_difference_ints_0():
"""Test subtracting 0 from a column."""
_input = DataFrame({"A": [1, 2, 3]})
_expected = DataFrame({"A": [1, 2, 3]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": 0}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_ints_1():
"""Test subtracting 1 from a column."""
_input = DataFrame({"A": [1, 2, 3]})
_expected = DataFrame({"A": [0, 1, 2]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": 1}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_floats_0():
"""Test subtracting 0.0 from a column."""
_input = DataFrame({"A": [1.0, 2.0, 3.0]})
_expected = DataFrame({"A": [1.0, 2.0, 3.0]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": 0.0}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_floats_negative_1():
"""Test subtracting -1.0 from a column."""
_input = DataFrame({"A": [1.0, 2.0, 3.0]})
_expected = DataFrame({"A": [2.0, 3.0, 4.0]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": -1.0}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
class PreprocessVariableDifferenceTests(unittest.TestCase):
"""
Tests for the ``preprocess._aggregate_columns._difference`` module with column subtraction.
"""
@staticmethod
def test_clean_difference_int_column():
"""Test subtracting the right column from the left."""
_input = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]})
_expected = DataFrame({"A": [-1, -1, -1], "B": [2, 3, 4]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": "B"}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_right_string_column():
"""Test subtracting the right column from the left. Right column has strings."""
_input = DataFrame({"A": [1, 2, 3], "B": ["2", "3", "4"]})
_expected = DataFrame({"A": [-1.0, -1.0, -1.0], "B": ["2", "3", "4"]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": "B"}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_left_string_column():
"""Test subtracting the right column from the left. Left column has strings."""
_input = DataFrame({"A": ["1", "2", "3"], "B": [2, 3, 4]})
_expected = DataFrame({"A": [-1.0, -1.0, -1.0], "B": [2, 3, 4]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": "B"}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_both_string_column():
"""Test subtracting the right column from the left. Both left and right have strings."""
_input = DataFrame({"A": ["1", "2", "3"], "B": ["2", "3", "4"]})
_expected = DataFrame({"A": [-1.0, -1.0, -1.0], "B": ["2", "3", "4"]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": "B"}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
| assert_frame_equal(_expected, _vc.frame) | pandas.util.testing.assert_frame_equal |
#! /usr/bin/env python3
from argparse import ArgumentParser
from collections import defaultdict
from IPython import embed
import itertools
import json
from enum import Enum
from math import sqrt
from multiprocessing import Pool, cpu_count
import pandas as pd
import numpy as np
from pathlib import Path
from pprint import pprint
import re
import Utils
from SpecBench import *
from Graph import Grapher
from Grapher import Grapher
from DataObject import DataObject
import pandas as pd
pd.set_option('display.float_format', lambda x: '%.3f' % x)
#pd.set_option('display.max_rows', None)
from Results import *
class Report:
''' Aggregrate Results into a unified document. '''
CHECKPOINT_REGEX = re.compile(r'([0-9]+\.[a-zA-Z]+_r)_(.*)_([0-9]+)_check\.cpt.*')
def __init__(self, args):
''' Gather the summary files from sim results. '''
if args.simresult_dir is None:
raise Exception('Must give valid simresult dir as an argument (not None)!')
res_dir = Path(args.simresult_dir)
assert res_dir.exists()
self.verbatim = args.verbatim
self.do_intersection = not args.include_all
self.smt = args.smt
files = []
present = defaultdict(lambda: defaultdict(dict))
self.summary_data = []
for dirent in Utils.get_directory_entries_by_time(res_dir):
if dirent.is_file() and 'summary.json' in dirent.name:
if args.bench is not None and not args.bench in dirent.name:
continue
try:
with dirent.open('r') as f:
self.summary_data += [json.load(f)]
except:
print('Could not open {}'.format(str(dirent)))
# benchmark -> configs -> dict{ checkpoint -> series }
self.sim_series = defaultdict(lambda: defaultdict(dict))
for summary in self.summary_data:
if 'checkpoints' not in summary:
# Means the run was terminated early
continue
chk_prefix = '{}_{}'.format(summary['bench'], summary['mode'])
result_dirs = {}
for c, status in summary['checkpoints'].items():
chk_name = Path(c).name
try:
num = int(chk_name.split('_')[0])
except:
continue
if status == 'successful':
result_dirs[num] = res_dir / '{}_{}'.format(chk_prefix, chk_name)
for checkpoint_num, dirent in result_dirs.items():
if not dirent.exists():
present[benchmark][mode][checkpoint_num] = 0
continue
benchmark = summary['bench']
mode = summary['mode']
f = dirent / 'res.json'
if f.exists():
files += [f]
present[benchmark][mode][checkpoint_num] = 1
try:
series = pd.read_json(f, typ='series')
self.sim_series[benchmark][mode][checkpoint_num] = series
except:
present[benchmark][mode][checkpoint_num] = 0
else:
present[benchmark][mode][checkpoint_num] = 0
present_list = defaultdict(lambda: defaultdict(list))
for benchmark_name, per_config in present.items():
for config_name, checkpoint_mappings in per_config.items():
checkpoints = sorted(checkpoint_mappings.keys())
num_checkpoints = summary['total_checkpoints']
for i in range(num_checkpoints):
present_list[benchmark_name][config_name] += \
[ present[benchmark_name][config_name][i]
if i in present[benchmark_name][config_name]
else 0 ]
if len(files) == 0:
raise Exception('No valid result files in given directory!.')
self.outfile = args.output_file
self.files = files
self.present = present_list
@staticmethod
def _get_file_data(fpath):
with fpath.open() as fd:
return json.load(fd)
def _get_all_results(self):
all_res = []
with Pool(cpu_count) as p:
all_res = p.map(self._get_file_data, self.files)
print('Collected results from {} files.'.format(len(self.files)))
res_by_type = defaultdict(lambda: defaultdict(list))
for res in all_res:
assert RunType.__name__ in res and 'benchmark' in res
t = res[RunType.__name__]
b = res['benchmark']
res_by_type[t][b] += [res]
return res_by_type
def _construct_data_frames(self):
'''
I want the following:
Per benchmark:
stat1 stat2 stat3
config_name: val val val
'''
self.sim_data_frames = defaultdict(dict)
for benchmark, config_series in self.sim_series.items():
checkpoint_sets = []
for config_name, checkpoint_results in config_series.items():
checkpoint_sets.append(checkpoint_results.keys())
all_checkpoints = set(checkpoint_sets[0])
if self.do_intersection:
all_checkpoints.intersection_update(*checkpoint_sets[1:])
print('{} shares {} checkpoints across all configs.'.format(
benchmark, len(all_checkpoints)))
else:
print('For {}...'.format(benchmark))
for config_name, checkpoint_results in config_series.items():
print('\t{} has {} checkpoints.'.format(
config_name, len(checkpoint_results)))
only_use = list(all_checkpoints)
for config_name, checkpoint_results in config_series.items():
if not self.do_intersection:
only_use = [k for k in checkpoint_results.keys()]
df = pd.DataFrame(checkpoint_results)[only_use].T
stat_means = df.mean().rename('mean')
stat_stdev = df.std(ddof=0).rename('std')
stat_nums = df.count().rename('count')
assert stat_nums.min() >= 0
stat_ci = ((1.96 * df.std(ddof=0)) / np.sqrt(stat_nums)).rename('ci')
summary_df = | pd.DataFrame([stat_means, stat_stdev, stat_ci, stat_nums]) | pandas.DataFrame |
# coding: utf-8
# In[1]:
import pandas as pd ## biblioteca de estruturação e analise de dados
import numpy as np ## biblioteca de algebra linear entre outras utilidades
## --------------------- ##
## plotly/dash libraries ##
## --------------------- ##
import dash
import plotly.graph_objs as go
from dash.dependencies import Input, Output, Event
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
from flask import Flask
## --------------------- ##
import datetime ## biblioteca para criar/trabalhar datas
#############################
## load files
vendas = pd.read_csv('UauOffice_Vendas.csv')
contratos = pd.read_csv('hubspot_Vendas.csv')
vendas['agent_id'] = vendas.id_contrato.map(lambda x: int(x[:3]))
vendas['data'] = vendas['data'].map(lambda data: pd.to_datetime(data).date())
vendas['valor'] = vendas.valor.map(lambda x: np.round(float(x),2))
agents = np.sort(contratos.id_representante.unique())
########################
server = Flask(__name__)
radio_itens = ['Todos','Selecionados']
app = dash.Dash(name = __name__, server = server, url_base_pathname='/')
app.config.supress_callback_exceptions = True
app.layout = html.Div(
html.Div([
html.H1(u'Dashboard Desafio Pagar.me', style={'textAlign': 'center'}),
html.H3(u' ', style={'textAlign': 'center'}),
html.H3(u'Data mais recente dos dados: {}'.format(datetime.datetime.today().date()), style={'textAlign': 'center'}),
html.Div([
html.Label('Agente', style={'fontsize':20}),
dcc.Dropdown(
id = 'dropdown_agent',
options = [{'label':value, 'value':value} for (value) in agents],
multi=True,
),
dcc.RadioItems(
id='radio_agent',
options=[{'label': i, 'value': i} for i in radio_itens],
value=radio_itens[1],
),
], style={'width': '20%', 'display': 'inline-block'}),
html.Div([
html.Label('Contrato', style={'fontsize':20}),
dcc.Dropdown(
id = 'dropdown_contracts',
options = [{}],
multi=True,
),
dcc.RadioItems(
id='radio_contracts',
options=[{'label': i, 'value': i} for i in radio_itens],
value=radio_itens[1],
),
], style={'width': '20%', 'display': 'inline-block'}),
html.Div([
html.Label('Grafico por', style={'fontsize':20}),
dcc.Dropdown(
id = 'dropdown_graphtype',
options = [{'label': i, 'value': i} for i in ['Agente','Contrato']],
value='Agente'
),
dcc.RadioItems(
id='radio_graphtype',
options=[{'label': i, 'value': i} for i in ['Acumulado','Discretizado']],
value='Acumulado',
),
], style={'width': '40%', 'display': 'inline-block'}),
dcc.Graph(id='graph'),
dt.DataTable(
rows=[dict(zip([X for X in vendas.columns],['' for X in vendas.columns]))],
columns=[X for X in vendas.columns],
row_selectable=False,
filterable=False,
sortable=True,
selected_row_indices=[],
id='datatable'
),
],))
@app.callback(
dash.dependencies.Output('dropdown_contracts', 'options'),
[dash.dependencies.Input('dropdown_agent', 'value')])
def update_dropdown_agent (agents):
global contratos
if agents:
dummy = contratos.loc[contratos.id_representante.isin(agents)]
contracts = np.sort(dummy.id_contrato.unique())
return [{'label':value, 'value':value} for (value) in contracts]
else:
return [{}]
@app.callback(
dash.dependencies.Output('graph', 'figure'),
[dash.dependencies.Input('dropdown_agent', 'value'),dash.dependencies.Input('radio_agent', 'value'),
dash.dependencies.Input('dropdown_contracts', 'value'),dash.dependencies.Input('radio_contracts', 'value'),
dash.dependencies.Input('dropdown_graphtype', 'value'),dash.dependencies.Input('radio_graphtype', 'value'),])
def update_graph(dropdown_agents, radio_agents, dropdown_contracts, radio_contracts, dropdown_graphtype, radio_graphtype):
global vendas
vendas_copy = vendas.copy()
graph_data = []
d0 = | pd.to_datetime('2017-01-01') | pandas.to_datetime |
import logging
from tools.EventGeneration import convert_date, generate_random_time, generate_random_node_id
logger = logging.getLogger(__name__.split('.')[-1])
from features.ResponseTypeFeature import ResponseTypeFeature
from features.ReplayTimeSeriesFeature import ReplayTimeSeriesFeature
import tools.Cache as Cache
import random
import pandas as pd
import warnings
from scipy.sparse import SparseEfficiencyWarning
warnings.simplefilter('ignore', SparseEfficiencyWarning)
random.seed(1234)
class PoissonSimulation:
'''
Simple event simulation. Given a replay of base events
and probabilities of responses, generate arbitrary single-layer
event cascades.
Parameters
----------
Parameters here
'''
def __init__(self, cfg, generate_replies=None, **kwargs):
self.start_date = cfg.get("limits.start_date", type=convert_date)
self.end_date = cfg.get("limits.end_date", type=convert_date)
self.time_delta = cfg.get("limits.time_delta", type=pd.Timedelta).total_seconds()
if generate_replies is None:
self.generate_replies = cfg.get("poisson_simulation.generate_replies", True)
else:
self.generate_replies = generate_replies
self.cfg = cfg
@Cache.amalia_cache
def compute(self, dfs, train_dfs=None):
# Retrieve replay time-series feature and response type feature
ts = ReplayTimeSeriesFeature(self.cfg).compute(dfs)
responses = ResponseTypeFeature(self.cfg).compute(dfs)
res = []
platforms = dfs.get_platforms()
logger.warning('Very slow for dense data generation. Use ParallelPoissonSimulation to reduce runtime.')
for platform in platforms:
ts = ts[platform]
responses = responses[platform]
node_map = dfs.get_node_map(platform)
# For all users that have a nonzero row in their ts, generate events
logger.info('Generating new events.')
nonzero_rows, __ = ts.nonzero()
res = res + _generate_base_event(ts, node_map, nonzero_rows, self.start_date, responses, self.generate_replies, platform)
# Return a pandas DataFrame sorted by time
# Feed into the output module for actual result generation
res = | pd.DataFrame(res) | pandas.DataFrame |
import tensorflow as tf
import numpy as np
import os
import time
from utils import Feeder, normalize, similarity, loss_cal, optim, test_batch
from configuration import get_config
import sys
sys.path.append(os.getcwd())
from tacotron.models.modules import ReferenceEncoder
from tacotron.utils import ValueWindow
from tensorflow.contrib import rnn
import datetime
from hparams import hparams
import pandas as pd
import scikitplot as skplt
import matplotlib.pyplot as plt
VAL_ITERS = 5
config = get_config()
def time_string():
return datetime.datetime.now().strftime('%Y.%m.%d_%H-%M-%S')
def triple_lstm(batch):
# embedding lstm (3-layer default)
with tf.variable_scope("spk_emb_lstm"):
lstm_cells = [tf.contrib.rnn.LSTMCell(num_units=config.hidden, num_proj=config.proj) for i in range(config.num_layer)]
lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells) # define lstm op and variables
outputs, _ = tf.nn.dynamic_rnn(cell=lstm, inputs=batch, dtype=tf.float32, time_major=True) # for TI-VS must use dynamic rnn
embedded = outputs[-1] # the last ouput is the embedded d-vector
embedded = normalize(embedded) # normalize
print("embedded size: ", embedded.shape)
return(embedded)
def train(path, args):
tf.reset_default_graph() # reset graph
timestamp = time_string() if args.time_string == None else args.time_string
# draw graph
feeder = Feeder(args.train_filename, args, hparams)
output_classes = max([int(f) for f in feeder.total_emt])+1 if args.model_type in ['emt', 'accent'] else max([int(f) for f in feeder.total_spk])+1
batch = tf.placeholder(shape= [args.N*args.M, None, config.n_mels], dtype=tf.float32) # input batch (time x batch x n_mel)
labels = tf.placeholder(shape=[args.N * args.M],dtype=tf.int32)
lr = tf.placeholder(dtype= tf.float32) # learning rate
global_step = tf.Variable(0, name='global_step', trainable=False)
w = tf.get_variable("w", initializer= np.array([10], dtype=np.float32))
b = tf.get_variable("b", initializer= np.array([-5], dtype=np.float32))
# embedded = triple_lstm(batch)
print("Training {} Discriminator Model".format(args.model_type))
encoder = ReferenceEncoder(filters=hparams.reference_filters, kernel_size=(3, 3),
strides=(2, 2),is_training=True,
scope='Tacotron_model/inference/pretrained_ref_enc_{}'.format(args.model_type), depth=hparams.reference_depth) # [N, 128])
embedded = encoder(batch)
embedded = normalize(embedded)
if args.discriminator:
logit = tf.layers.dense(embedded, output_classes, name='Tacotron_model/inference/pretrained_ref_enc_{}_dense'.format(args.model_type))
labels_one_hot = tf.one_hot(tf.to_int32(labels), output_classes)
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit,labels=labels_one_hot))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit,labels=labels_one_hot))
acc, acc_op = tf.metrics.accuracy(labels=tf.argmax(labels_one_hot, 1),predictions=tf.argmax(logit, 1))
val_acc, val_acc_op = tf.metrics.accuracy(labels=tf.argmax(labels_one_hot, 1), predictions=tf.argmax(logit, 1))
else:
# loss
sim_matrix = similarity(embedded, w, b, args.N, args.M, P=hparams.reference_depth)
print("similarity matrix size: ", sim_matrix.shape)
loss = loss_cal(sim_matrix, args.N, args.M, type=config.loss)
val_acc_op = tf.constant(1.)
# optimizer operation
trainable_vars= tf.trainable_variables() # get variable list
optimizer= optim(lr) # get optimizer (type is determined by configuration)
grads, vars= zip(*optimizer.compute_gradients(loss)) # compute gradients of variables with respect to loss
if args.discriminator:
grads_rescale = grads
else:
grads_clip, _ = tf.clip_by_global_norm(grads, 3.0) # l2 norm clipping by 3
grads_rescale= [0.01*grad for grad in grads_clip[:2]] + grads_clip[2:] # smaller gradient scale for w, b
train_op= optimizer.apply_gradients(zip(grads_rescale, vars), global_step= global_step) # gradient update operation
# check variables memory
variable_count = np.sum(np.array([np.prod(np.array(v.get_shape().as_list())) for v in trainable_vars]))
print("total variables :", variable_count)
# record loss
loss_summary = tf.summary.scalar("loss", loss)
merged = tf.summary.merge_all()
saver = tf.train.Saver(max_to_keep=20)
loss_window = ValueWindow(100)
acc_window = ValueWindow(100)
val_loss_window = ValueWindow(5)
val_acc_window = ValueWindow(5)
# training session
with tf.Session() as sess:
tf.local_variables_initializer().run()
tf.global_variables_initializer().run()
checkpoint_folder = os.path.join(path, "checkpoints",timestamp)
logs_folder = os.path.join(path, "logs", timestamp)
os.makedirs(checkpoint_folder, exist_ok=True) # make folder to save model
os.makedirs(logs_folder, exist_ok=True) # make folder to save log
model_name = '{}_disc_model.ckpt'.format(args.model_type)
checkpoint_path = os.path.join(checkpoint_folder, model_name)
if args.restore:
checkpoint_state = tf.train.get_checkpoint_state(checkpoint_folder)
if (checkpoint_state and checkpoint_state.model_checkpoint_path):
print('Loading checkpoint {}'.format(checkpoint_state.model_checkpoint_path))
saver.restore(sess, checkpoint_state.model_checkpoint_path)
else:
print('No model to load at {}'.format(checkpoint_folder))
saver.save(sess, checkpoint_path, global_step=global_step)
else:
print('Starting new training!')
saver.save(sess, checkpoint_path, global_step=global_step)
writer = tf.summary.FileWriter(logs_folder, sess.graph)
lr_factor = 1 # lr decay factor ( 1/2 per 10000 iteration)
iterations = 30000 if args.model_type == 'emt' else config.iteration
for iter in range(iterations):
if args.discriminator:
batch_iter, _, labels_iter = feeder.random_batch_disc()
else:
batch_iter, _, labels_iter = feeder.random_batch()
# run forward and backward propagation and update parameters
step, _, loss_cur, summary, acc_cur = sess.run([global_step, train_op, loss, merged, acc_op],
feed_dict={batch:batch_iter, labels:labels_iter, lr: config.lr*lr_factor})
loss_window.append(loss_cur)
acc_window.append(acc_cur)
if step % 10 == 0:
writer.add_summary(summary, step) # write at tensorboard
if (step+1) % 20 == 0:
val_loss_cur_batch = 0
val_acc_cur_batch = 0
for iter in range(VAL_ITERS):
if args.discriminator:
batch_iter, _, labels_iter = feeder.random_batch_disc(TEST=True)
else:
batch_iter, _, labels_iter = feeder.random_batch(TEST=True)
# run forward and backward propagation and update parameters
val_loss_cur, val_acc_cur = sess.run([loss, val_acc_op], feed_dict={batch: batch_iter, labels: labels_iter})
val_loss_cur_batch += val_loss_cur
val_acc_cur_batch += val_acc_cur
val_loss_cur_batch /= VAL_ITERS
val_acc_cur_batch /= VAL_ITERS
val_loss_window.append(val_loss_cur_batch)
val_acc_window.append(val_acc_cur_batch)
message = "(iter : %d) loss: %.4f" % ((step+1),loss_window.average)
if args.discriminator:
message += ', acc: {:.2f}%'.format(acc_window.average)
message += ", val_loss: %.4f" % (val_loss_window.average)
if args.discriminator:
message += ', val_acc: {:.2f}%'.format(val_acc_window.average)
print(message)
lr_changed=False
if args.model_type == 'emt':
if step > 6000:
lr_changed = True if lr_factor != .01 else False
lr_factor = .01
elif step > 4000:
lr_changed = True if lr_factor != .1 else False
lr_factor = .1
if lr_changed:
print("learning rate is decayed! current lr : ", config.lr * lr_factor)
elif args.model_type == 'spk':
if step > 300:#4000:
lr_changed = True if lr_factor != .01 else False
lr_factor = .01
elif step > 180:#2500:
lr_changed = True if lr_factor != .1 else False
lr_factor = .1
if lr_changed:
print("learning rate is decayed! current lr : ", config.lr * lr_factor)
if step % config.save_checkpoint_iters == 0:
saver.save(sess, checkpoint_path, global_step=global_step)
def test_disc(path_model, path_meta, path_data, args):
#dataset|audio_filename|mel_filename|linear_filename|spk_emb_filename|time_steps|mel_frames|text|emt_label|spk_label|basename|emt_name|emt_file|spk_name|spk_file
df = pd.read_csv(path_meta, sep='|')
batch_size_np = 100#len(df.index)
tf.reset_default_graph() # reset graph
# draw graph
feeder = Feeder(args.train_filename, args, hparams)
output_classes = max([int(f) for f in feeder.total_emt]) + 1 if args.model_type in ['emt', 'accent'] else max(
[int(f) for f in feeder.total_spk]) + 1
batch = tf.placeholder(shape=[None, None, config.n_mels],
dtype=tf.float32) # input batch (time x batch x n_mel)
labels = tf.placeholder(shape=[None], dtype=tf.int32)
# embedded = triple_lstm(batch)
print("Testing {} Discriminator Model".format(args.model_type))
encoder = ReferenceEncoder(filters=hparams.reference_filters, kernel_size=(3, 3),
strides=(2, 2), is_training=True, scope='Tacotron_model/inference/pretrained_ref_enc_{}'.format(args.model_type),
depth=hparams.reference_depth) # [N, 128])
embedded = encoder(batch)
embedded = normalize(embedded)
logit = tf.layers.dense(embedded, output_classes,name='Tacotron_model/inference/pretrained_ref_enc_{}_dense'.format(args.model_type))
logit_sm = tf.nn.softmax(logit)
labels_one_hot = tf.one_hot(tf.to_int32(labels), output_classes)
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit,labels=labels_one_hot))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=labels_one_hot))
acc, acc_op = tf.metrics.accuracy(labels=tf.argmax(labels_one_hot, 1), predictions=tf.argmax(logit, 1))
batch_size = tf.shape(labels)[0]
#acc_e, acc_op_e = tf.metrics.accuracy(labels=tf.argmax(labels_one_hot[:batch_size], 1), predictions=tf.argmax(logit[:batch_size], 1))
saver = tf.train.Saver()
# training session
with tf.Session() as sess:
tf.local_variables_initializer().run()
tf.global_variables_initializer().run()
checkpoint_state = tf.train.get_checkpoint_state(path_model)
print('Loading checkpoint {}'.format(checkpoint_state.model_checkpoint_path))
saver.restore(sess, checkpoint_state.model_checkpoint_path)
emb_full=None
loss_avg=0
acc_avg=0
cnt=0
df_results = pd.DataFrame([],columns=['dataset','preds','true'])
for start in np.arange(0,len(df.index),batch_size_np):
end = start + batch_size_np
df_batch = df.iloc[start:end]
batch_iter, _, labels_iter = test_batch(path_data, df_batch, args)
# run forward and backward propagation and update parameters
loss_cur, acc_cur, lbls, log, emb = sess.run(
[loss, acc_op, labels, logit_sm, embedded],
feed_dict={batch: batch_iter, labels: labels_iter})
print('loss: {:.4f}, acc: {:.2f}%'.format(loss_cur, acc_cur*100))
#print(np.max(log, 1))
#print(np.mean(np.max(log, 1)))
emb_full = np.vstack((emb_full, emb)) if emb_full is not None else emb
preds = np.argmax(log,1)
#print(preds)
#print(lbls)
df_results_new = | pd.DataFrame([]) | pandas.DataFrame |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from io import StringIO
import os
import platform
from tempfile import TemporaryFile
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = "Expected 3 fields in line 6, saw 5"
reader = parser.read_csv(
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
)
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_wrong_num_columns(all_parsers):
# Too few columns.
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
parser = all_parsers
msg = "Expected 6 fields in line 3, saw 7"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"A,B\nTrue,1\nFalse,2\nTrue,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]),
DataFrame(
[[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
columns=["A", "B"],
),
),
(
"A,B\nTRUE,1\nFALSE,2\nTRUE,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nfoo,bar\nbar,foo",
dict(true_values=["foo"], false_values=["bar"]),
DataFrame([[True, False], [False, True]], columns=["A", "B"]),
),
],
)
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2)
expected = DataFrame(
[
["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15],
],
columns=["index", "A", "B", "C", "D"],
)
expected = expected.set_index("index")
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
reader = parser.read_csv(StringIO(data), chunksize=2)
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs)
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4)
result = | concat(reader) | pandas.concat |
"""Functions for downloading data from API."""
import datetime as dt
import logging
from typing import Dict, Tuple
import pandas as pd
from constants import URLS, REGION_TO_POPULATION
logger = logging.getLogger("data_client")
logger.setLevel(logging.INFO)
class DataCache:
def __init__(self):
self.cache: Dict[str, Tuple[dt.datetime, pd.DataFrame]] = {}
self.expiry_time = dt.timedelta(hours=6)
def will_download(self, dataset_name: str):
dataset_dt, dataset = self.cache.get(dataset_name, (None, None))
current_dt = dt.datetime.now()
return (
dataset is None
or dataset_dt is None
or current_dt - dataset_dt > self.expiry_time
)
def get_with_timestamp(self, dataset_name: str) -> Tuple[dt.datetime, pd.DataFrame]:
if dataset_name.endswith("_per_k"):
base_dataset_name = dataset_name.replace("_per_k", "")
base_dataset_dt, base_dataset = self.get_with_timestamp(base_dataset_name)
per_k_dataset = base_dataset.apply(
lambda col: col * 1_000.0 / REGION_TO_POPULATION[col.name]
)
return (base_dataset_dt, per_k_dataset)
if dataset_name == "casos_nuevos_per_test":
dt1, df_casos_nuevos_cumulativo_t = self.get_with_timestamp(
"casos_nuevos_cumulativo_t"
)
dt2, df_pcr_t = self.get_with_timestamp("pcr_t")
ans = (df_casos_nuevos_cumulativo_t / df_pcr_t).loc["2020-04-09":, :] # type: ignore
return (min(dt1, dt2), ans)
dataset_dt, dataset = self.cache.get(dataset_name, (None, None))
current_dt = dt.datetime.now()
if (
dataset is None
or dataset_dt is None
or current_dt - dataset_dt > self.expiry_time
):
logging.warn(f"Cache miss for {dataset_name}: {dataset_dt}")
dataset = self._download(dataset_name)
self.cache[dataset_name] = (current_dt, dataset)
return self.cache[dataset_name]
def get(self, dataset_name: str):
return self.get_with_timestamp(dataset_name)[1]
def __getitem__(self, dataset_name: str):
return self.get(dataset_name)
def _download(self, dataset_name: str):
name_to_func = {
"p1_casos_acumulados_comuna": download_p1_casos_acumulados_comuna,
"casos_totales_cumulativo_t": download_casos_totales_cumulativo_t,
"fallecidos_cumulativo_t": download_fallecidos_cumulativo_t,
"fallecidos_etario_t": download_fallecidos_etario_t,
"uci_t": download_uci_t,
"casos_nuevos_cumulativo_t": download_casos_nuevos_cumulativo_t,
"numero_ventiladores_t": download_numero_ventiladores_t,
"pcr_t": download_pcr_t,
}
func = name_to_func[dataset_name]
return func()
def download_p1_casos_acumulados_comuna():
ans = pd.read_csv(URLS["p1_casos_acumulados_comuna"], index_col="Region")
ans.columns = ans.loc["Comuna"]
ans = ans.iloc[4:-1]
ans = ans.astype(float)
return ans
def download_casos_totales_cumulativo_t():
ans = pd.read_csv(
URLS["casos_totales_cumulativo_t"], index_col="Region", parse_dates=True
)
return ans
def download_fallecidos_cumulativo_t():
ans = pd.read_csv(
URLS["fallecidos_cumulativo_t"], index_col="Region", parse_dates=True
)
return ans
def download_fallecidos_etario_t():
ans = pd.read_csv(URLS["fallecidos_etario_t"], index_col="Grupo de edad")
return ans
def download_uci_t():
ans = pd.read_csv(URLS["uci_t"], index_col="Region")
ans = ans.loc["2020-04-01":, :] # type: ignore
ans.index = pd.to_datetime(ans.index)
ans["Total"] = ans.sum(axis=1)
return ans
def download_casos_nuevos_cumulativo_t():
ans = pd.read_csv(
URLS["casos_nuevos_cumulativo_t"], index_col="Region", parse_dates=True
)
return ans
def download_numero_ventiladores_t():
ans = pd.read_csv(
URLS["numero_ventiladores_t"], index_col="Ventiladores", parse_dates=True
)
ans = ans.drop(columns=["disponibles"])
return ans
def download_pcr_t():
ans = pd.read_csv(URLS["pcr_t"], index_col="Region", na_values="-")
ans = ans.loc["2020-04-09":, :] # type: ignore
ans.index = | pd.to_datetime(ans.index) | pandas.to_datetime |
import pandas as pd
import pytest
from rdtools.normalization import normalize_with_expected_power
@pytest.fixture()
def times_15():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='15T')
@pytest.fixture()
def times_30():
return | pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='30T') | pandas.date_range |
"""title
https://adventofcode.com/2021/day/19
"""
import numpy as np
import pandas as pd
import itertools
import re
SMALL_INPUT = open('small_input.txt').read()
ORIENTATIONS = """
x, y, z
x, z,-y
x,-y,-z
x,-z, y
y,-x, z
y, z, x
y, x,-z
y,-z,-x
z, y,-x
z,-x,-y
z,-y, x
z, x, y
-x, y,-z
-x, z, y
-x,-y, z
-x,-z,-y
-y,-x,-z
-y, z,-x
-y, x, z
-y,-z, x
-z, y, x
-z,-x, y
-z,-y,-x
-z, x,-y
"""
ORIENTATIONS = re.findall(r'(.)(.),(.)(.),(.)(.)', ORIENTATIONS)
def parse(data):
result = {}
scanners = data.strip().split('\n\n')
for i, s in enumerate(scanners):
coords = []
for row in re.findall(r'(-?\d+),(-?\d+),(-?\d+)', s):
coords.append(list(map(int, row)))
coords.sort()
a = np.array(coords)
result[i] = a
return result
def get_axis(a, sign, axis):
axis_index = 'xyz'.find(axis)
sign = -1 if sign == '-' else 1
return sign * a[:, axis_index]
def get_orientations(scanner):
for xsig, xax, ysig, yax, zsig, zax in ORIENTATIONS:
b = np.zeros(scanner.shape, scanner.dtype)
b[:, 0] = get_axis(scanner, xsig, xax)
b[:, 1] = get_axis(scanner, ysig, yax)
b[:, 2] = get_axis(scanner, zsig, zax)
yield b
def match(s1, s2):
for origin1 in s1[-10:]: # one of these has to match because they are sorted
for origin2 in s2:
translation = origin2 - origin1
s2_trans = s2 - translation
merged = np.vstack([s1, s2_trans])
uni = np.unique(merged, axis=0)
overlap = merged.shape[0] - uni.shape[0]
if overlap >= 12:
s2_trans = pd.DataFrame(s2_trans).sort_values(by=0).values
return translation, s2_trans
return None, None
def match_pair(s1, s2):
for s2 in get_orientations(s2):
r, s = match(s1, s2)
if r is not None:
return r, s
return None, None
def solve(data):
scanners = parse(data)
aligned = [0]
vectors = [(0, 0, 0)]
checked = set()
while len(aligned) < len(scanners):
print(f'{len(aligned)} / {len(scanners)} scanners matched')
merge_found = False
for s1, s2 in itertools.product(aligned, scanners):
if (s1, s2) in checked or s2 in aligned:
continue
checked.add((s1, s2))
print(f'comparing {s1} vs {s2}')
vec, s2_trans = match_pair(scanners[s1], scanners[s2])
if vec is not None:
aligned.append(s2)
vectors.append(vec)
scanners[s2] = s2_trans
print('match found!\n')
merge_found = True
break
if not merge_found:
raise Exception("something went wrong")
df = pd.DataFrame(scanners[0])
a = np.vstack(list(scanners.values()))
a = np.unique(a, axis=0)
df = pd.DataFrame(a)
df.sort_values(by=0, ascending=True).to_csv('out.csv', index=False)
v = pd.DataFrame(vectors)
v.to_csv('vectors.csv', index=False)
return df.shape[0]
def solve2(fn):
df = | pd.read_csv(fn) | pandas.read_csv |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = | Period.now('q') | pandas.tseries.period.Period.now |
""" I/O functions of the aecg package: tools for annotated ECG HL7 XML files
This module implements helper functions to parse and read annotated
electrocardiogram (ECG) stored in XML files following HL7
specification.
See authors, license and disclaimer at the top level directory of this project.
"""
# Imports =====================================================================
from typing import Dict, Tuple
from lxml import etree
from aecg import validate_xpath, new_validation_row, VALICOLS, \
TIME_CODES, SEQUENCE_CODES, \
Aecg, AecgLead, AecgAnnotationSet
import copy
import logging
import pandas as pd
import re
import zipfile
# Python logging ==============================================================
logger = logging.getLogger(__name__)
def parse_annotations(xml_filename: str,
zip_filename: str,
aecg_doc: etree._ElementTree,
aecgannset: AecgAnnotationSet,
path_prefix: str,
annsset_xmlnode_path: str,
valgroup: str = "RHYTHM",
log_validation: bool = False) -> Tuple[
AecgAnnotationSet, pd.DataFrame]:
"""Parses `aecg_doc` XML document and extracts annotations
Args:
xml_filename (str): Filename of the aECG XML file.
zip_filename (str): Filename of zip file containint the aECG XML file.
If '', then xml file is not stored in a zip file.
aecg_doc (etree._ElementTree): XML document of the aECG XML file.
aecgannset (AecgAnnotationSet): Annotation set to which append found
annotations.
path_prefix (str): Prefix of xml path from which start searching for
annotations.
annsset_xmlnode_path (str): Path to xml node of the annotation set
containing the annotations.
valgroup (str, optional): Indicates whether to search annotations in
rhythm or derived waveform. Defaults to "RHYTHM".
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Tuple[AecgAnnotationSet, pd.DataFrame]: Annotation set updated with
found annotations and dataframe with results of validation.
"""
anngrpid = 0
# Annotations stored within a beat
beatnodes = aecg_doc.xpath((
path_prefix +
"/component/annotation/code[@code=\'MDC_ECG_BEAT\']").replace(
'/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'})
beatnum = 0
valpd = pd.DataFrame()
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {len(beatnodes)} annotated beats found')
for beatnode in beatnodes:
for rel_path in ["../component/annotation/"
"code[contains(@code, \"MDC_ECG_\")]"]:
annsnodes = beatnode.xpath(rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotation code
valrow2 = validate_xpath(
annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename, valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame(
[valrow2], columns=VALICOLS), ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
# Annotations type
valrow2 = validate_xpath(
annsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path + \
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
subannsnodes = annsnode.xpath(
rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
else:
subannsnodes += [annsnode]
# Exclude annotations reporting interval values only
subannsnodes = [
sa for sa in subannsnodes
if not sa.get("code").startswith("MDC_ECG_TIME_PD_")]
for subannsnode in subannsnodes:
# Annotations type
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
rel_path3 = "../support/supportingROI/component/"\
"boundary/value"
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/"\
"boundary/code"
roinodes = subannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(
roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
# Annotations type
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path +\
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/" \
"boundary/code"
roinodes = annsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4],
columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
anngrpid = anngrpid + 1
beatnum = beatnum + 1
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {beatnum} annotated beats and {anngrpid} '
f'annotations groups found')
anngrpid_from_beats = anngrpid
# Annotations stored without an associated beat
for codetype_path in ["/component/annotation/code["
"(contains(@code, \"MDC_ECG_\") and"
" not (@code=\'MDC_ECG_BEAT\'))]"]:
annsnodes = aecg_doc.xpath(
(path_prefix + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotations code
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
subannsnodes = annsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
for subannsnode in subannsnodes:
subsubannsnodes = subannsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
tmpnodes = [subannsnode]
if len(subsubannsnodes) > 0:
tmpnodes = tmpnodes + subsubannsnodes
for subsubannsnode in tmpnodes:
ann["wavecomponent"] = ""
ann["wavecomponent2"] = ""
ann["timecode"] = ""
ann["value"] = ""
ann["value_unit"] = ""
ann["low"] = ""
ann["low_unit"] = ""
ann["high"] = ""
ann["high_unit"] = ""
roi_base = "../support/supportingROI/component/boundary"
rel_path3 = roi_base + "/value"
valrow2 = validate_xpath(
subsubannsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/code"
if valrow2["VALIOUT"] == "PASSED":
if not ann["codetype"].endswith("WAVE"):
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations type
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
# if ann["wavecomponent"] == "":
# ann["wavecomponent"] = valrow2["VALUE"]
# else:
# ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value as attribute
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
else:
roi_base = "../component/annotation/support/"\
"supportingROI/component/boundary"
# Annotations type
valrow2 = validate_xpath(subsubannsnode,
"../component/annotation/"
"value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + \
"../component/annotation/value"
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotation values
if n != "":
rp = roi_base + "/value/" + n
else:
rp = roi_base + "/value"
valrow3 = validate_xpath(subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT"
"_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used by
# value and supporting ROI
for rel_path4 in ["../support/supportingROI/component/"
"boundary",
"../component/annotation/support/"
"supportingROI/component/boundary"]:
roinodes = subsubannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
"./code",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
anngrpid = anngrpid + 1
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {anngrpid-anngrpid_from_beats} annotations groups'
f' without an associated beat found')
return aecgannset, valpd
def parse_generalinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts general information
This function parses the `aecg_doc` xml document searching for general
information that includes in the returned `Aecg`: unique identifier (UUID),
ECG date and time of collection (EGDTC), and device information.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# UUID
# =======================================
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"root",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID found: {valrow["VALUE"]}')
aecg.UUID = valrow["VALUE"]
else:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID not found')
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"extension",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension found: {valrow["VALUE"]}')
aecg.UUID += valrow["VALUE"]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID updated to: {aecg.UUID}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension not found')
# =======================================
# EGDTC
# =======================================
valpd = pd.DataFrame()
egdtc_found = False
for n in ["low", "center", "high"]:
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"effectiveTime\"]/"
"*[local-name() = \"" + n + "\"]",
"",
"value",
new_validation_row(aecg.filename, "GENERAL",
"EGDTC_" + n),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
egdtc_found = True
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC {n} found: {valrow["VALUE"]}')
aecg.EGDTC[n] = valrow["VALUE"]
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if not egdtc_found:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# DEVICE
# =======================================
# DEVICE = {"manufacturer": "", "model": "", "software": ""}
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturerOrganization/name",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_manufacturer"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer found: {tmp}')
aecg.DEVICE["manufacturer"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"manufacturerModelName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_model"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model found: {tmp}')
aecg.DEVICE["model"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"softwareName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_software"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software found: {tmp}')
aecg.DEVICE["software"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_subjectinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for subject
information that includes in the returned `Aecg`: subject unique identifier
(USUBJID), gender, birthtime, and race.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# USUBJID
# =======================================
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"SUBJECTINFO",
"USUBJID_" + n))
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} found: {valrow["VALUE"]}')
aecg.USUBJID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if (aecg.USUBJID["root"] == "") and (aecg.USUBJID["extension"] == ""):
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID cannot be established.')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# SEX / GENDER
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/"
"administrativeGenderCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"SEX"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX found: {valrow["VALUE"]}')
aecg.SEX = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# BIRTHTIME
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/birthTime",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "SUBJECTINFO",
"BIRTHTIME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME found.')
aecg.BIRTHTIME = valrow["VALUE"]
# age_in_years = aecg.subject_age_in_years()
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# RACE
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/raceCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"RACE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE found: {valrow["VALUE"]}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE not found')
aecg.RACE = valrow["VALUE"]
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_trtainfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for treatment
information that includes in the returned `Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/definition/"
"treatmentGroupAssignment/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TRTA"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information found: {valrow["VALUE"]}')
aecg.TRTA = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_studyinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts study information
This function parses the `aecg_doc` xml document searching for study
information that includes in the returned `Aecg`: study unique identifier
(STUDYID), and study title.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"STUDYID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} found: {valrow["VALUE"]}')
aecg.STUDYID[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/title",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "STUDYINFO",
"STUDYTITLE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE found: {tmp}')
aecg.STUDYTITLE = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_timepoints(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts timepoints information
This function parses the `aecg_doc` xml document searching for timepoints
information that includes in the returned `Aecg`: absolute timepoint or
study event information (TPT), relative timepoint or study event relative
to a reference event (RTPT), and protocol timepoint information (PTPT).
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# TPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/code",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/reasonCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TPT_reasonCode"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode found: {valrow["VALUE"]}')
aecg.TPT["reasonCode"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/"
"effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
# =======================================
# RTPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename,
"STUDYINFO",
"RTPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT {n} found: {valrow["VALUE"]}')
aecg.RTPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"pauseQuantity",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "STUDYINFO",
"RTPT_pauseQuantity"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity value found: {valrow["VALUE"]}')
aecg.RTPT["pauseQuantity"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity value not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"pauseQuantity",
"urn:hl7-org:v3",
"unit",
new_validation_row(aecg.filename, "STUDYINFO",
"RTPT_pauseQuantity_unit"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity unit found: {valrow["VALUE"]}')
aecg.RTPT["pauseQuantity_unit"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity unit not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
# =======================================
# PTPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/"
"componentOf/protocolTimepointEvent/code",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"PTPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT {n} found: {valrow["VALUE"]}')
aecg.PTPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"protocolTimepointEvent/component/"
"referenceEvent/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"PTPT_referenceEvent"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent code found: {valrow["VALUE"]}')
aecg.PTPT["referenceEvent"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent code not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"protocolTimepointEvent/component/"
"referenceEvent/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "STUDYINFO",
"PTPT_referenceEvent_"
"displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent displayName found: '
f'{valrow["VALUE"]}')
aecg.PTPT["referenceEvent_displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent displayName not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
return aecg
def parse_rhythm_waveform_info(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts rhythm waveform information
This function parses the `aecg_doc` xml document searching for rhythm
waveform information that includes in the returned `Aecg`: waveform
identifier, code, display name, and date and time of collection.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./component/series/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "RHYTHM",
"ID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} found: {valrow["VALUE"]}')
aecg.RHYTHMID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "RHYTHM",
"CODE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM code found: {valrow["VALUE"]}')
aecg.RHYTHMCODE["code"] = valrow["VALUE"]
if aecg.RHYTHMCODE["code"] != "RHYTHM":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM unexpected code found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected value found"
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "RHYTHM",
"CODE_displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM displayName found: {valrow["VALUE"]}')
aecg.RHYTHMCODE["displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM displayName not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./component/series/effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "RHYTHM",
"EGDTC_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHMEGDTC {n} found: {valrow["VALUE"]}')
aecg.RHYTHMEGDTC[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHMEGDTC {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_derived_waveform_info(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts derived waveform information
This function parses the `aecg_doc` xml document searching for derived
waveform information that includes in the returned `Aecg`: waveform
identifier, code, display name, and date and time of collection.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"ID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} found: {valrow["VALUE"]}')
aecg.DERIVEDID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "DERIVED",
"CODE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED code found: {valrow["VALUE"]}')
aecg.DERIVEDCODE["code"] = valrow["VALUE"]
if aecg.DERIVEDCODE["code"] != "REPRESENTATIVE_BEAT":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED unexpected code found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected value found"
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "DERIVED",
"CODE_displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED displayName found: {valrow["VALUE"]}')
aecg.DERIVEDCODE["displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED displayName not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "DERIVED",
"EGDTC_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVEDEGDTC {n} found: {valrow["VALUE"]}')
aecg.DERIVEDEGDTC[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVEDEGDTC {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_rhythm_waveform_timeseries(aecg_doc: etree._ElementTree,
aecg: Aecg,
include_digits: bool = False,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts rhythm's timeseries
This function parses the `aecg_doc` xml document searching for rhythm
waveform timeseries (sequences) information that includes in the returned
:any:`Aecg`. Each found sequence is stored as an :any:`AecgLead` in the
:any:`Aecg.RHYTHMLEADS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
include_digits (bool, optional): Indicates whether to include the
digits information in the returned `Aecg`.
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
path_prefix = './component/series/component/sequenceSet/' \
'component/sequence'
seqnodes = aecg_doc.xpath((path_prefix + '/code').replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(seqnodes) > 0:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet(s) found: '
f'{len(seqnodes)} sequenceSet nodes')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet not found')
for xmlnode in seqnodes:
xmlnode_path = aecg_doc.getpath(xmlnode)
valrow = validate_xpath(aecg_doc,
xmlnode_path,
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "RHYTHM",
"SEQUENCE_CODE"),
failcat="WARNING")
valpd = pd.DataFrame()
if valrow["VALIOUT"] == "PASSED":
if not valrow["VALUE"] in SEQUENCE_CODES:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM unexpected sequenceSet code '
f'found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected sequence code found"
if valrow["VALUE"] in TIME_CODES:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code found: {valrow["VALUE"]}')
aecg.RHYTHMTIME["code"] = valrow["VALUE"]
# Retrieve time head info from value node
rel_path = "../value/head"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"value",
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_TIME_HEAD"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_HEAD found: {valrow2["VALUE"]}')
aecg.RHYTHMTIME["head"] = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_HEAD not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrieve time increment info from value node
rel_path = "../value/increment"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_TIME_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_{n} found: '
f'{valrow2["VALUE"]}')
if n == "value":
aecg.RHYTHMTIME["increment"] = float(
valrow2["VALUE"])
else:
aecg.RHYTHMTIME[n] = valrow2["VALUE"]
if log_validation:
valpd = \
valpd.append(pd.DataFrame([valrow2],
columns=VALICOLS),
ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code found: '
f'{valrow["VALUE"]}')
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'LEADNAME from RHYTHM sequenceSet code: '
f'{valrow["VALUE"]}')
# Assume is a lead
aecglead = AecgLead()
aecglead.leadname = valrow["VALUE"]
# Inherit last parsed RHYTHMTIME
aecglead.LEADTIME = copy.deepcopy(aecg.RHYTHMTIME)
# Retrive lead origin info
rel_path = "../value/origin"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM",
"SEQUENCE_LEAD_ORIGIN_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_ORIGIN_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.origin = float(valrow2["VALUE"])
except Exception as ex:
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_"\
"ORIGIN is not a "\
"number"
else:
aecglead.origin_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_ORIGIN_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrive lead scale info
rel_path = "../value/scale"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM",
"SEQUENCE_LEAD_SCALE_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.scale = float(valrow2["VALUE"])
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE '
f'value is not a valid number: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_"\
"SCALE is not a "\
"number"
else:
aecglead.scale_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Include digits if requested
if include_digits:
rel_path = "../value/digits"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"",
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_LEAD_DIGITS"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
try:
# Convert string of digits to list of integers
# remove new lines
sdigits = valrow2["VALUE"].replace("\n", " ")
# remove carriage retruns
sdigits = sdigits.replace("\r", " ")
# remove tabs
sdigits = sdigits.replace("\t", " ")
# collapse 2 or more spaces into 1 space char
# and remove leading/trailing white spaces
sdigits = re.sub("\\s+", " ", sdigits).strip()
# Convert string into list of integers
aecglead.digits = [int(s) for s in
sdigits.split(' ')]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS added to lead'
f' {aecglead.leadname} (n: '
f'{len(aecglead.digits)})')
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'Error parsing DIGITS from '
f'string to list of integers: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "Error parsing SEQUENCE_"\
"LEAD_DIGITS from string"\
" to list of integers"
else:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS not found for lead {aecglead.leadname}')
if log_validation:
valpd = valpd.append(
| pd.DataFrame([valrow2], columns=VALICOLS) | pandas.DataFrame |
# import sys
import os
import os.path as path
import shutil
import fnmatch as fm
import numpy as np
import pandas as pd
from scipy.io import loadmat
from PyQt4.QtCore import QThread
from PyQt4.QtCore import QObject, pyqtSignal
class DataProcessor(QObject):
print_out = pyqtSignal(str)
prog_out = pyqtSignal(int)
fin_out = pyqtSignal()
prog = 0
def __init__(self):
QObject.__init__(self)
self.__headDir = None
self.__testDir = None
self.interrupt = False
def setPath(self, hd=None, td=None):
self.__headDir = hd
self.__testDir = td
def printOut(self, strLine):
self.print_out.emit(strLine)
def progUpt(self, value):
self.prog += value
self.prog_out.emit(self.prog)
def verifyExistDir(self, inDir):
# Return the absolute path of inDir
absPath = None
if path.isdir(inDir):
# Given path exists
if path.isabs(inDir):
absPath = inDir
self.printOut('Test data directory %s already exists.' % absPath)
else:
absPath = path.abspath(inDir)
self.printOut('Test data directory %s already exists.' % inDir)
else:
# Given path do not exist
self.printOut('Directory %s do not exist.' % inDir)
self.printOut('Please specify a valid directory for the test data.\n')
return absPath
def scaleTime_csvDat(self, f_dir):
self.printOut('\nScaling the Time<ms> to Time<s> of the SMART Terminal Recorded Data.')
# Read the file
with open(f_dir, 'rb') as f:
temp = f.readlines()
with open(f_dir, 'wb') as f:
for row in temp:
if temp[0] == row:
f.write(row)
else:
char = str(chr(row[0]))
if char.isdigit() and str(row).find(':') == 4:
f.write(row)
# Open the file
with open(f_dir, 'rb') as f:
dat_df = pd.read_csv(f, index_col=False, encoding='iso-8859-1', skipinitialspace=True)
# Drop 'System Time' column
# dat_df = dat_df.drop('System Time', 1)
n_rows = dat_df.shape[0]
n_cols = dat_df.shape[1]
self.printOut('There are %d rows and %d columns of data in %s.' % (n_rows, n_cols, path.abspath(f_dir)))
# Convert milliseconds to seconds
dat_df['Time<ms>'] /= 1000.0000
# Change column label
dat_df = dat_df.rename(columns={'Time<ms>': 'Time<s>'})
# Write to a file
sf_dir = str(f_dir[:-4]) + '_SCALED.csv'
dat_df.to_csv(sf_dir, index=False)
self.printOut('Finished writing scaled data into %s.' % path.abspath(sf_dir))
def dat2csv(self, testName, datDir, csvDir):
self.printOut('\nProcessing Smart Terminal DAT file conversion to CSV file...')
datFiles = [f for f in os.listdir(datDir) if f.lower().endswith('.dat')]
# Determine the quality of DAT files process
n_datFiles = len(datFiles)
if n_datFiles == 0:
self.printOut('\nThere are no DAT files detected.')
self.printOut('Please make sure you have the copy of all the files to be processed.')
self.printOut('Conversion aborted!')
else:
# Prints out the quantity of DAT files to be processed.
self.printOut('\nNumber of DAT files to process = %d' % n_datFiles)
if n_datFiles > 1:
self.printOut('Too many DAT files!')
self.printOut('Conversion aborted!')
else:
# Copy DAT file to CSV folder
self.printOut('Converting DAT files...')
self.printOut(csvDir)
for n in range(0, n_datFiles):
shutil.copy((datDir + '\\' + str(datFiles[n])), csvDir)
# Remove whitespaces from the file names
# nf = csvDir + '\\' + str(datFiles[n])
# os.rename(nf, nf.replace(' ', '_'))
# Renaming the copied DAT files - directly converts CSV format
dat_csv = [f for f in os.listdir(csvDir) if f.lower().endswith('.dat')]
for n in dat_csv:
src = csvDir + '\\' + n
# dst = csvDir + '\\' + n[:-4] + '.csv'
dst = csvDir + '\\' + testName + '__ST_RecordedData' + '.csv'
if path.exists(dst):
os.remove(dst)
self.printOut("Removed old files.")
# Copy the converted file to the CSV output folder
os.rename(src, dst)
self.printOut('Conversion process complete.')
# Scale Smart Terminal CSV file Time<ms> to Time<s>
self.scaleTime_csvDat(dst)
def mat2csv(self, testName, headDir, matDir, csvDir):
# Header Files directory
headDir += '\\'
# Assign header file names
headerfile_names = ['APS2600E_header_1.csv',
'APS2600E_header_2.csv',
'APS2600E_header_3.csv',
'APS2600E_header_4.csv']
# DO NOT CHANGE UNLESS NECESSARY
# Number of samples per channel in each of the split files
max_samples_per_channel = 10000000000
self.printOut('\nProcessing MATLAB files conversion to CSV files...\n')
# Check if header files exists before proceeding
headers_exist = 1
for h in range(0, len(headerfile_names)):
# Get each header file paths
headerfile_path = path.abspath(headDir + headerfile_names[h])
if not os.path.isfile(headerfile_path):
headers_exist = 0
self.printOut('%s is missing!' % headerfile_names[h])
if headers_exist:
# Find MATLAB data folder
n_mat_files = 0
if not path.exists(matDir):
self.printOut('MATLAB data folder %s is missing!' % matDir)
else:
# Find MATLAB files
mat_files = [f for f in os.listdir(matDir) if f.endswith('.mat')]
# Determine the quantity of MATLAB files to process
n_mat_files = len(mat_files)
if n_mat_files == 0:
self.printOut('There are no MAT files detected.')
self.printOut('Please make sure you have the copy of all the mat files to be processed.')
self.printOut('Conversion aborted!')
else:
# Prints out the quantity of mat files to be processed.
self.printOut('Number of MAT files to process = %d' % n_mat_files)
# Generate MATLAB filename variable
for mat_ix in range(0, n_mat_files):
filename_mat = matDir + '\\' + mat_files[mat_ix]
# Load file to be split
self.printOut('\nLoading %s' % filename_mat)
mat = loadmat(filename_mat)
# Python 3 change
mat_keys = list(mat.keys())
# Assign 'opvar' data to mat_a
mat_a = mat[mat_keys[0]]
# Transpose data
mat_a = np.transpose(mat_a)
# Obtain number of rows of data
n_rows = mat_a.shape[1]
mat_file_dummy = mat_files[mat_ix]
# Load headers
filename_number = int(mat_file_dummy[14]) - 1
headerfile_path = path.abspath(headDir + headerfile_names[filename_number])
header_names = np.loadtxt(headerfile_path, delimiter=',', dtype=str)
# Obtain number of rows of data
n_rows_header = header_names.shape[0]
self.printOut('Number of rows in header file = %d' % n_rows_header)
self.printOut('Number of rows in MAT file = %d' % n_rows)
# Check if the number of rows in header matches that of mat file
if n_rows == (n_rows_header + 1):
# Generate header string
header_string = 'Sim Time'
for x in range(1, n_rows):
signal_name = str(header_names[x - 1, 1]).strip('b\'')
signal_name = str(signal_name).strip('\'')
header_string = header_string + ',' + signal_name
# Acquire number of samples per channel
# Obtain number of rows of data
n_samples_per_channel = mat_a.shape[0]
self.printOut('Number of samples per channel = %d' % n_samples_per_channel)
# Check whether splitting is required
if n_samples_per_channel > max_samples_per_channel:
self.printOut(
'Number of samples per channel (' + str(n_samples_per_channel) + ') exceeds ' + str(
max_samples_per_channel) + '.')
self.printOut('Splitting required.')
# Calculate number of files needed to be produced after splitting
n_files = int(n_samples_per_channel / max_samples_per_channel)
self.printOut(filename_mat + ' to be split into ' + str(n_files) + ' files.')
# Splitting Algorithm
for ix in range(0, n_files):
row_start = ix * max_samples_per_channel
if ix == n_files:
row_end = n_samples_per_channel
else:
row_end = (ix + 1) * max_samples_per_channel - 1
temp_mat = mat_a[row_start:row_end, 0:65]
self.printOut('Writing file ' + str(ix + 1) + ' out of ' + str(n_files) + ': ')
# Writing to CSV
# Generate CSV filename
filename_csv = filename_mat.split('/')[-1]
filename_csv = csvDir + '\\' + testName + '__' + filename_csv[:-4] + '.csv'
with open(filename_csv, 'wb') as f:
# Write header string
f.write(header_string)
# Skip a line
f.write('\n')
# Write array data
np.savetxt(f, mat_a, fmt='%.4f', delimiter=',')
self.printOut(filename_csv + ' has been written.')
temp_mat = None
self.printOut('Splitting and MAT to CSV conversion of ' + filename_mat + ' complete.')
# Splitting not required
else:
self.printOut(
'Number of samples per channel (' + str(n_samples_per_channel) + ') is less than ' +
str(max_samples_per_channel) + '. No splitting required.')
self.printOut('Converting ' + filename_mat)
# Writing to CSV
# Generate CSV filename
filename_csv = filename_mat.split('\\')[-1]
filename_csv = csvDir + '\\' + testName + '__' + filename_csv[:-4] + '.csv'
with open(filename_csv, 'wb') as f:
# Write header string
f.write(bytes(header_string, 'UTF-8'))
# Skip a line
f.write(bytes('\n', 'UTF-8'))
# Write array data
np.savetxt(f, mat_a, fmt='%.4f', delimiter=',')
self.printOut(filename_csv + ' has been written.')
# mat_a = None
else:
self.printOut('Number of rows of data does not match number of entries in ' +
headerfile_names[filename_number] + '. Conversion aborted. Next file.')
# Emit progress bar update
self.progUpt(5)
self.printOut('\nConversion process completed.')
else:
self.printOut('Header file(s) cannot be found. Please specify the correct path. Conversion aborted.')
def alignData(self, csvDir):
os.chdir(csvDir)
self.printOut('\nAligning SMART Terminal Recorded Data...\n')
csvList = os.listdir(csvDir)
# Search the SCALED data file
datScaled_f = [f for f in csvList if fm.fnmatch(f, '*_SCALED.csv')]
# Search the CSV-converted MAT data file
matCsv_f = [f for f in csvList if fm.fnmatch(f, '*_APS2600E_data_*_*.csv')]
# Read the CSV-converted scaled DAT file
with open(datScaled_f[0], 'rb') as f:
scaledDat_df = pd.read_csv(f, index_col=False, encoding='iso-8859-1', skipinitialspace=True)
# Read the CSV-converted MAT files
with open(matCsv_f[0], 'r') as f:
matData_df = pd.read_csv(f, index_col=False, encoding='iso-8859-1', skipinitialspace=True)
self.printOut('STOP_CMD is at Column %d of %s.' % (matData_df.columns.get_loc('STOP_CMD'), matCsv_f[0]))
self.printOut('Sim Time is at Column %d of %s.\n' % (matData_df.columns.get_loc('Sim Time'), matCsv_f[0]))
# Determine the offset of 'STOP_CMD'
matOffset = 0
for i in range(1, matData_df.shape[0]):
prev = matData_df['STOP_CMD'].loc[i - 1]
curr = matData_df['STOP_CMD'].loc[i]
if (0 == prev) & (1 == curr):
matOffset = i
break
if 'a429data.stop' not in scaledDat_df.columns:
if 'Stop' in scaledDat_df.columns:
self.printOut('Stop variable is DETECTED instead of a429data.stop!!!')
self.printOut('a429data.stop is at Column %d of %s.\n' % (scaledDat_df.columns.get_loc('Stop'), datScaled_f[0]))
self.printOut('Time<sec> is at Column %d of %s.' % (scaledDat_df.columns.get_loc('Time<s>'), datScaled_f[0]))
# Determine the offset of 'a429data.stop'
datOffset = 0
for i in range(1, scaledDat_df.shape[0]):
prev = scaledDat_df['Stop'].loc[i - 1]
curr = scaledDat_df['Stop'].loc[i]
if (0 == prev) & (1 == curr):
datOffset = i
break
else:
self.printOut('The SMART Terminal data variable \'a429data.stop\' was not recorded!')
# return
else:
self.printOut('a429data.stop is at Column %d of %s.\n' % (scaledDat_df.columns.get_loc('a429data.stop'), datScaled_f[0]))
self.printOut('Time<sec> is at Column %d of %s.' % (scaledDat_df.columns.get_loc('Time<s>'), datScaled_f[0]))
# Determine the offset of 'a429data.stop'
datOffset = 0
for i in range(1, scaledDat_df.shape[0]):
prev = scaledDat_df['a429data.stop'].loc[i - 1]
curr = scaledDat_df['a429data.stop'].loc[i]
if (0 == prev) & (1 == curr):
datOffset = i
break
self.printOut('DAT Offset: %d' % datOffset)
self.printOut('MAT Offset: %d' % matOffset)
# self.printOut('Transition[Previous] %d' % scaledDat_df['a429data.stop'].loc[datOffset - 1])
# self.printOut('Transition[Current] %d' % scaledDat_df['a429data.stop'].loc[datOffset])
# Calculate time difference
# Get the 'Time<s>' value at the transition point [DAT]
xDat = float(scaledDat_df['Time<s>'].loc[datOffset])
# Get the 'Sim Time' value at the transition point [MAT]
xMat = float(matData_df['Sim Time'].loc[matOffset])
timeDiff = xMat - xDat
self.printOut('Time Difference: %f seconds' % timeDiff)
# Alignment
scaledDat_df['Time<s>'] += timeDiff
# Write to a file
af_dir = str(datScaled_f[0])[:-10] + 'ALIGNED.csv'
scaledDat_df.to_csv(af_dir, index=False)
self.printOut('\nFinished writing the aligned data file: %s' % af_dir)
def mergeData(self, csvDir):
os.chdir(csvDir)
self.printOut('\nMerging all MATLAB data into a single file...\n')
csvList = os.listdir(csvDir)
# Search the CSV-converted MAT data file
matCsv_f = [f for f in csvList if fm.fnmatch(f, '*_APS2600E_data_*_*.csv')]
# Merge files
result_df = pd.DataFrame(columns=['Sim Time'])
for f in matCsv_f:
r_df = pd.read_csv(f, index_col=False, encoding='iso-8859-1', skipinitialspace=True)
self.printOut('Merging %s...' % f)
result_df = pd.merge(left=result_df, right=r_df, how='right', left_on='Sim Time', right_on='Sim Time')
# Emit progress bar update
self.progUpt(5)
m_dir = (csvDir.split('\\')[-2]) + '__APS2600E_data_MERGED.csv'
result_df.to_csv(m_dir, index=False)
self.printOut('\nFinished merging all MATLAB data files!')
self.printOut('%s' % m_dir)
def plotMerge(self, csvDir):
os.chdir(csvDir)
testName = csvDir.split('\\')[-2]
datCsv_f = testName + '__ST_RecordedData_ALIGNED.csv'
matCsv_f = testName + '__APS2600E_data_MERGED.csv'
dat_df = pd.read_csv(datCsv_f, index_col=False, encoding='iso-8859-1', skipinitialspace=True)
mat_df = | pd.read_csv(matCsv_f, index_col=False, encoding='iso-8859-1', skipinitialspace=True) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 08:04:31 2020
@author: <NAME>
Functions to run the station characterization notebook on exploredata.
"""
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import math
import numpy as np
from netCDF4 import Dataset
import textwrap
import datetime as dt
import os
import six
import requests
from icoscp.station import station as station_data
#for the widgets
from IPython.core.display import display, HTML
from ipywidgets import Dropdown, SelectMultiple, HBox, VBox, Button, Output, IntText, RadioButtons,IntProgress, GridspecLayout
from IPython.display import clear_output, display
# import required libraries
#%pylab inline
import netCDF4 as cdf
#import pickle
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import warnings
warnings.filterwarnings('ignore')
#added - not show the figure that I am saving (different size than the one displayed
#for land cover bar graph)
matplotlib.pyplot.ioff()
#stations that have footprints as well as year and months with footprints. Also altitude.
#path to footprints
pathFP='/data/stiltweb/stations/'
#Earth's radius in km (for calculating distances between the station and cells)
R = 6373.8
#saved distances to the 192 000 cells for all the labeled atmospheric stations
#if the selected station is not found in this document, the distances are calculated
approved_stations_distances = pd.read_csv('approved_stations_distances.csv')
#saved degree angles from the stations to all 192 000 cells for all the labeled atmospheric stations
approved_stations_degrees = pd.read_csv('approved_stations_degrees.csv')
#functions from Ute
#function to read and aggregate footprints for given time range
def read_aggreg_footprints(station, date_range, timeselect='all'):
# loop over all dates and read netcdf files
# path to footprint files in new stiltweb directory structure
pathFP='/data/stiltweb/stations/'
# print ('date range: ',date_range)
fp=[]
nfp=0
first = True
for date in date_range:
filename=(pathFP+station+'/'+str(date.year)+'/'+str(date.month).zfill(2)+'/'
+str(date.year)+'x'+str(date.month).zfill(2)+'x'+str(date.day).zfill(2)+'x'+str(date.hour).zfill(2)+'/foot')
#print (filename)
if os.path.isfile(filename):
f_fp = cdf.Dataset(filename)
if (first):
fp=f_fp.variables['foot'][:,:,:]
lon=f_fp.variables['lon'][:]
lat=f_fp.variables['lat'][:]
first = False
else:
fp=fp+f_fp.variables['foot'][:,:,:]
f_fp.close()
nfp+=1
#else:
#print ('file does not exist: ',filename)
if nfp > 0:
fp=fp/nfp
else:
print ('no footprints found')
#print (np.shape(fp))
#print (np.max(fp))
title = 'not used'
#title = (start_date.strftime('%Y-%m-%d')+' - '+end_date.strftime('%Y-%m-%d')+'\n'+
# 'time selection: '+timeselect)
return nfp, fp, lon, lat, title
def get_station_class():
# Query the ICOS SPARQL endpoint for a station list
# query stationId, class, lng name and country
# output is an object "data" containing the results in JSON
url = 'https://meta.icos-cp.eu/sparql'
query = """
prefix st: <http://meta.icos-cp.eu/ontologies/stationentry/>
select distinct ?stationId ?stationClass ?country ?longName
from <http://meta.icos-cp.eu/resources/stationentry/>
where{
?s a st:AS .
?s st:hasShortName ?stationId .
?s st:hasStationClass ?stationClass .
?s st:hasCountry ?country .
?s st:hasLongName ?longName .
filter (?stationClass = "1" || ?stationClass = "2")
}
ORDER BY ?stationClass ?stationId
"""
r = requests.get(url, params = {'format': 'json', 'query': query})
data = r.json()
# convert the the result into a table
# output is an array, where each row contains
# information about the station
cols = data['head']['vars']
datatable = []
for row in data['results']['bindings']:
item = []
for c in cols:
item.append(row.get(c, {}).get('value'))
datatable.append(item)
# print the table
df_datatable = pd.DataFrame(datatable, columns=cols)
#df_datatable.head(5)
return df_datatable
def available_STILT_dictionary():
# store availability of STILT footprints in a dictionary
# get all ICOS station IDs by listing subdirectories in stiltweb
# extract availability from directory structure
#new:
pathStations='/data/stiltweb/stations/'
#pathStations='/opt/stiltdata/fsicos2/stiltweb/stations/'
allStations = os.listdir(pathStations)
# empty dictionary
available = {}
# fill dictionary with station name, years and months for each year
for ist in sorted(list(set(allStations))):
if os.path.exists(pathStations+'/'+ist):
#print ('directory '+pathStations+'/'+ist+' exits')
available[ist] = {}
years = os.listdir(pathStations+'/'+ist)
available[ist]['years'] = years
for yy in sorted(available[ist]['years']):
available[ist][yy] = {}
months = os.listdir(pathStations+'/'+ist+'/'+yy)
available[ist][yy]['months'] = months
available[ist][yy]['nmonths'] = len(available[ist][yy]['months'])
#else:
# print ('directory '+pathStations+'/'+ist+' does not exit')
# Get list of ICOS class 1 and class 2 stations from Carbon Portal
df_datatable = get_station_class()
# add information if ICOS class 1 or class 2 site
for ist in sorted(available):
available[ist]['stationClass'] = np.nan
for istICOS in df_datatable['stationId']:
ic = int(df_datatable[df_datatable['stationId']==istICOS].index.values)
if istICOS in ist:
available[ist]['stationClass'] = df_datatable['stationClass'][ic]
# print availability
#for ist in sorted(available):
# print ('station:', ist)
# for k in available[ist]:
# print (k,':', available[ist][k])
return available
def create_STILT_dictionary():
# store all STILT station information in a dictionary
# get all ICOS station IDs by listing subdirectories in stiltweb
# extract location from filename of link
#UPDATE
pathStations='/data/stiltweb/stations/'
#pathStations='/opt/stiltdata/fsicos2/stiltweb/stations/'
allStations = os.listdir(pathStations)
# empty dictionary
stations = {}
# fill dictionary with ICOS station id, latitude, longitude and altitude
for ist in sorted(list(set(allStations))):
stations[ist] = {}
# get filename of link (original stiltweb directory structure)
# and extract location information
if os.path.exists(pathStations+ist):
loc_ident = os.readlink(pathStations+ist)
clon = loc_ident[-13:-6]
lon = np.float(clon[:-1])
if clon[-1:] == 'W':
lon = -lon
clat = loc_ident[-20:-14]
lat = np.float(clat[:-1])
if clat[-1:] == 'S':
lat = -lat
alt = np.int(loc_ident[-5:])
stations[ist]['lat']=lat
stations[ist]['lon']=lon
stations[ist]['alt']=alt
stations[ist]['locIdent']=os.path.split(loc_ident)[-1]
# add information on station name (and new STILT station id) from stations.csv file used in stiltweb
url="https://stilt.icos-cp.eu/viewer/stationinfo"
df = | pd.read_csv(url) | pandas.read_csv |
import pandas as pd
from dateutil import parser
from pm4pymdl.objects.mdl.exporter import exporter as mdl_exporter
from pm4pymdl.objects.mdl.importer import importer as mdl_importer
import os
def execute_script():
stream1 = [{"event_id": "1", "event_activity": "A", "event_timestamp": parser.parse("1970-01-01 00:00:00"),
"order": ["O1"]},
{"event_id": "2", "event_activity": "B", "event_timestamp": parser.parse("1970-01-01 00:00:00"),
"order": ["O1"], "item": ["I1, I2"]}]
df = | pd.DataFrame(stream1) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameIsIn:
def test_isin(self):
# GH#4211
df = DataFrame(
{
"vals": [1, 2, 3, 4],
"ids": ["a", "b", "f", "n"],
"ids2": ["a", "n", "c", "n"],
},
index=["foo", "bar", "baz", "qux"],
)
other = ["a", "b", "c"]
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
def test_isin_empty(self, empty):
# GH#16991
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
d = {"A": ["a"]}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, "A"] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
df.columns = ["A", "A"]
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, "A"] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH#4763
df = DataFrame(
{
"vals": [1, 2, 3, 4],
"ids": ["a", "b", "f", "n"],
"ids2": ["a", "n", "c", "n"],
},
index=["foo", "bar", "baz", "qux"],
)
with pytest.raises(TypeError):
df.isin("a")
with pytest.raises(TypeError):
df.isin("aaa")
def test_isin_df(self):
df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
df2 = DataFrame({"A": [0, 2, 12, 4], "B": [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected["A"].loc[[1, 3]] = True
expected["B"].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ["A", "C"]
result = df1.isin(df2)
expected["B"] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH#16394
df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "f"]})
df["C"] = list(zip(df["A"], df["B"]))
result = df["C"].isin([(1, "a")])
tm.assert_series_equal(result, Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]], columns=["B", "B"])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame(
[[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=["A", "B"],
index=[0, 0, 1, 1],
)
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ["B", "B"]
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({"A": [1, 0, 1, 0], "B": [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=["A", "A"])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame(
{"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]}, index=["a", "b", "c", "d"]
)
s = pd.Series([1, 3, 11, 4], index=["a", "b", "c", "d"])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected["A"].loc["a"] = True
expected.loc["d"] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples(
[
(0, "a", "foo"),
(0, "a", "bar"),
(0, "b", "bar"),
(0, "b", "baz"),
(2, "a", "foo"),
(2, "a", "bar"),
(2, "c", "bar"),
(2, "c", "baz"),
(1, "b", "foo"),
(1, "b", "bar"),
(1, "c", "bar"),
(1, "c", "baz"),
]
)
df1 = DataFrame({"A": np.ones(12), "B": np.zeros(12)}, index=idx)
df2 = DataFrame(
{
"A": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
"B": [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1],
}
)
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=["A", "B"], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH#15473
df1_ts = DataFrame({"date": pd.to_datetime(["2014-01-01", "2014-01-02"])})
df1_td = DataFrame({"date": [ | pd.Timedelta(1, "s") | pandas.Timedelta |
import re
import os
import string
import ipdb
import pickle
import matplotlib
matplotlib.use('Agg')
from matplotlib import rcParams
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import TimeSeriesSplit
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import normalize
from sklearn.preprocessing import RobustScaler
from sklearn import linear_model
from wordcloud import WordCloud
from nltk import pos_tag, word_tokenize
import gensim.downloader as api
MIN_DF = 10
MAX_DF = 100
WORD_CLOUD_NUMBER = 50
BOW = "bow"
TFIDF = "tfidf"
WORD2VEC = "word2vec"
SKIPTHOUGHT = "skipThought"
def select_by_pos_tag(sentence, tags):
word_tokens = word_tokenize(sentence)
tagged_word_token = pos_tag(word_tokens)
selected_words = [word for word, tag in tagged_word_token if tag in tags]
return ' '.join(selected_words)
def clean_sentence(s):
s = re.sub("\n", " ", s)
s = re.sub("[" + string.punctuation + "]", " ", s)
s = re.sub("\?", " ", s)
s = re.sub("[0-9]+", " ", s)
s = re.sub(" +", " ", s)
return s.strip()
def generate_bag_of_words(train, test, feature_args):
vectorizer = CountVectorizer(min_df=MIN_DF, max_df=MAX_DF, **feature_args)
train_bag_of_words = vectorizer.fit_transform(train['text'].apply(clean_sentence)).toarray()
test_bag_of_words = vectorizer.transform(test['text'].apply(clean_sentence)).toarray()
train_bag_of_words = normalize(train_bag_of_words)
test_bag_of_words = normalize(test_bag_of_words)
word_list = vectorizer.get_feature_names()
train_text_df = pd.DataFrame(train_bag_of_words, index=train.index, columns=word_list)
test_text_df = pd.DataFrame(test_bag_of_words, index=test.index, columns=word_list)
bag_of_words_df = pd.concat([train_text_df, test_text_df], axis=0)
return bag_of_words_df, vectorizer
def generate_tfidf(train, test, feature_args):
vectorizer = TfidfVectorizer(min_df=MIN_DF, max_df=MAX_DF, **feature_args)
train_bag_of_words = vectorizer.fit_transform(train['text'].apply(clean_sentence)).toarray()
test_bag_of_words = vectorizer.transform(test['text'].apply(clean_sentence)).toarray()
word_list = vectorizer.get_feature_names()
train_text_df = pd.DataFrame(train_bag_of_words, index=train.index, columns=word_list)
test_text_df = pd.DataFrame(test_bag_of_words, index=test.index, columns=word_list)
bag_of_words_df = pd.concat([train_text_df, test_text_df], axis=0)
return bag_of_words_df, vectorizer
def average_word2vec(sentence, model):
sentence = clean_sentence(sentence)
word2vecs = []
for word in sentence.split(" "):
word = word.lower()
if word in model:
word2vecs.append(model[word])
return pd.Series(np.average(word2vecs, axis=0))
def generate_word2vec(train, test, feature_args):
path = 'word2vec/' + feature_args['model']
word2vec = pd.read_csv(path)
return word2vec[train.index[0]:test.index[-1] + 1], None
def generate_skip_thoughts(train, test, feature_args):
skip_thoughts = pd.read_csv(feature_args['path'])
return skip_thoughts[train.index[0]:test.index[-1] + 1], None
def generate_price_features(data):
price_feature_name = ['previous_price_{0:d}'.format(d) for d in range(1, 6)]
price_features = data[price_feature_name].values
return price_features
def generate_classification_label(data):
y = np.zeros(data.shape[0], np.float)
y[data['predicted_price'] > data['price']] = 1.0
return y
def generate_regression_label(data):
return (data['predicted_price'] - data['price']).values
def evaluate_return(open_price, y_hat, y):
revenue = 0
index = 0
buy_action = []
for price, predict, actual in zip(open_price, y_hat, y):
if predict >= 0.0 * price:
revenue += actual
buy_action.append(index)
index += 1
return revenue, buy_action
def run(data, split, feature_args, exp_label):
published_time = pd.to_datetime(data['published_time'])
y = generate_regression_label(data)
y_class = generate_classification_label(data)
X_price = data['price'].values
record = {
'classification':{
'train':pd.DataFrame(),
'test':pd.DataFrame()
},
'regression':{
'train':pd.DataFrame(),
'test':pd.DataFrame()
},
'pnl':{
'train':pd.DataFrame(),
'test':pd.DataFrame()
},
'buy_actions':{
},
'feature_size':{
}
}
feature_list = [BOW, TFIDF, WORD2VEC, SKIPTHOUGHT]
feature_functions = {
BOW:generate_bag_of_words,
TFIDF:generate_tfidf,
WORD2VEC:generate_word2vec,
SKIPTHOUGHT:generate_skip_thoughts
}
fold_index = 0
tscv = TimeSeriesSplit(n_splits=split)
for train_index, test_index in tscv.split(data.values):
fold_index += 1
start_index = data.index[train_index[0]]
split_index = data.index[test_index[0]]
end_index = data.index[test_index[-1]] + 1
train = data[start_index:split_index]
test = data[split_index:end_index]
X_list = []
for feature_name in feature_list:
if feature_name in feature_args:
features, vectorizer = feature_functions[feature_name](train, test, feature_args[feature_name])
X_list.append(features)
if len(X_list) > 1:
array_list = [features.values for features in X_list]
X = np.concatenate(array_list, axis=1)
else:
X = X_list[0].values
feature_size = X.shape[1]
print("feature size:", feature_size)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
y_class_train, y_class_test = y_class[train_index], y_class[test_index]
X_train_price = X_price[train_index]
X_test_price = X_price[test_index]
# Normalization and Scaling
scaler = RobustScaler()
scaler.fit(y_train.reshape(-1, 1))
y_train_t = scaler.transform(y_train.reshape(-1, 1)).reshape(-1, )
x_train_t = X_train
x_test_t = X_test
# Modeling
classifiers_dict = {
'Logistic Regression':LogisticRegression(penalty='l2', C=0.05, verbose=0, max_iter=10000)
}
regressors_dict = {
'SVR':SVR(kernel='linear', C=1.0, verbose=0),
'Ridge Regression':linear_model.Ridge(alpha=5.0)
}
train_class_err = {}
test_class_err = {}
train_regre_err = {}
test_regre_err = {}
train_pnl_err = {}
test_pnl_err = {}
test_buy_times = []
for label, clf in classifiers_dict.items():
clf.fit(x_train_t, y_class_train)
y_class_train_pred = clf.predict(x_train_t)
y_class_test_pred = clf.predict(x_test_t)
# classification error
train_acc = accuracy_score(y_class_train, y_class_train_pred)
test_acc = accuracy_score(y_class_test, y_class_test_pred)
train_class_err[label] = train_acc
test_class_err[label] = test_acc
# PNL error
train_return, train_buy_action = evaluate_return(X_train_price, y_class_train_pred, y_train)
test_return, test_buy_action = evaluate_return(X_test_price, y_class_test_pred, y_test)
train_pnl_err[label] = train_return
test_pnl_err[label] = test_return
if label not in record['buy_actions']:
record['buy_actions'][label] = []
for action_time in test_buy_action:
record['buy_actions'][label].append(action_time + len(X_train))
for label, clf in regressors_dict.items():
clf.fit(x_train_t, y_train_t)
y_train_pred = clf.predict(x_train_t)
y_test_pred = clf.predict(x_test_t)
# classification error
y_class_train_pred = np.zeros(y_train_pred.shape[0], np.float)
y_class_train_pred[y_train_pred >= 0.0] = 1.0
y_class_test_pred = np.zeros(y_test_pred.shape[0], np.float)
y_class_test_pred[y_test_pred >= 0.0] = 1.0
train_acc = accuracy_score(y_class_train, y_class_train_pred)
test_acc = accuracy_score(y_class_test, y_class_test_pred)
train_class_err[label] = train_acc
test_class_err[label] = test_acc
# regression error
y_train_pred = scaler.inverse_transform(y_train_pred.reshape(-1, 1)).reshape(-1, )
y_test_pred = scaler.inverse_transform(y_test_pred.reshape(-1, 1)).reshape(-1, )
train_mse = mean_squared_error(y_train, y_train_pred)
test_mse = mean_squared_error(y_test, y_test_pred)
train_regre_err[label] = train_mse
test_regre_err[label] = test_mse
# PNL error
train_return, train_buy_action = evaluate_return(X_train_price, y_train_pred, y_train)
test_return, test_buy_action = evaluate_return(X_test_price, y_test_pred, y_test)
train_pnl_err[label] = train_return
test_pnl_err[label] = test_return
if label not in record['buy_actions']:
record['buy_actions'][label] = []
for action_time in test_buy_action:
record['buy_actions'][label].append(action_time + len(X_train))
record['classification']['train'] = record['classification']['train'].append(pd.Series(data=train_class_err), ignore_index=True)
record['classification']['test'] = record['classification']['test'].append(pd.Series(data=test_class_err), ignore_index=True)
record['regression']['train'] = record['regression']['train'].append(pd.Series(data=train_regre_err), ignore_index=True)
record['regression']['test'] = record['regression']['test'].append(pd.Series(data=test_regre_err), ignore_index=True)
record['pnl']['train'] = record['pnl']['train'].append( | pd.Series(data=train_pnl_err) | pandas.Series |
import os
import torch
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
from collections import defaultdict
from torchvision.datasets.folder import default_loader
from torchvision.datasets.utils import download_url
from torch.utils.data import Dataset
from torchvision import transforms
class CUB200(Dataset):
def __init__(self, root, is_train, transform=None, ori_size=False, input_size=224, center_crop=True):
self.root = root
self.is_train = is_train
self.ori_size = ori_size
if not ori_size and center_crop:
image_size = int(256/224*input_size) #TODO check
crop_size = input_size #TODO check
shift = (image_size - crop_size) // 2
elif not ori_size and not center_crop:
image_size = input_size
crop_size = input_size
shift = 0
self.data = self._load_data(image_size, crop_size, shift, center_crop)
self.transform = transform
def _load_data(self, image_size, crop_size, shift, center_crop=True):
self._labelmap_path = os.path.join(self.root, 'CUB_200_2011', 'classes.txt')
paths = pd.read_csv(
os.path.join(self.root, 'CUB_200_2011', 'images.txt'),
sep=' ', names=['id', 'path'])
labels = pd.read_csv(
os.path.join(self.root, 'CUB_200_2011', 'image_class_labels.txt'),
sep=' ', names=['id', 'label'])
splits = pd.read_csv(
os.path.join(self.root, 'CUB_200_2011', 'train_test_split.txt'),
sep=' ', names=['id', 'is_train'])
orig_image_sizes = pd.read_csv(
os.path.join(self.root, 'CUB_200_2011', 'image_sizes.txt'),
sep=' ', names=['id', 'width', 'height'])
bboxes = pd.read_csv(
os.path.join(self.root, 'CUB_200_2011', 'bounding_boxes.txt'),
sep=' ', names=['id', 'x', 'y', 'w', 'h'])
if self.ori_size:
resized_bboxes = pd.DataFrame({'id': paths.id,
'xmin': bboxes.x,
'ymin': bboxes.y,
'xmax': bboxes.x + bboxes.w,
'ymax': bboxes.y + bboxes.h})
else:
if center_crop:
resized_xmin = np.maximum(
(bboxes.x / orig_image_sizes.width * image_size - shift).astype(int), 0)
resized_ymin = np.maximum(
(bboxes.y / orig_image_sizes.height * image_size - shift).astype(int), 0)
resized_xmax = np.minimum(
((bboxes.x + bboxes.w - 1) / orig_image_sizes.width * image_size - shift).astype(int),
crop_size - 1)
resized_ymax = np.minimum(
((bboxes.y + bboxes.h - 1) / orig_image_sizes.height * image_size - shift).astype(int),
crop_size - 1)
else:
min_length = pd.concat([orig_image_sizes.width, orig_image_sizes.height], axis=1).min(axis=1)
resized_xmin = (bboxes.x / min_length * image_size).astype(int)
resized_ymin = (bboxes.y / min_length * image_size).astype(int)
resized_xmax = ((bboxes.x + bboxes.w - 1) / min_length * image_size).astype(int)
resized_ymax = ((bboxes.y + bboxes.h - 1) / min_length * image_size).astype(int)
resized_bboxes = pd.DataFrame({'id': paths.id,
'xmin': resized_xmin.values,
'ymin': resized_ymin.values,
'xmax': resized_xmax.values,
'ymax': resized_ymax.values})
data = paths.merge(labels, on='id')\
.merge(splits, on='id')\
.merge(resized_bboxes, on='id')
if self.is_train:
data = data[data.is_train == 1]
else:
data = data[data.is_train == 0]
return data
def __len__(self):
return len(self.data)
# def _preprocess_bbox(self, origin_bbox, orig_image_size, center_crop=True):
# xmin, ymin, xmax, ymax = origin_bbox
# orig_width, orig_height = orig_image_size
# if center_crop:
# resized_xmin = np.maximum(
# (bboxes.x / orig_image_sizes.width * image_size - shift).astype(int), 0)
# resized_ymin = np.maximum(
# (bboxes.y / orig_image_sizes.height * image_size - shift).astype(int), 0)
# resized_xmax = np.minimum(
# ((bboxes.x + bboxes.w - 1) / orig_image_sizes.width * image_size - shift).astype(int),
# crop_size - 1)
# resized_ymax = np.minimum(
# ((bboxes.y + bboxes.h - 1) / orig_image_sizes.height * image_size - shift).astype(int),
# crop_size - 1)
# else:
# print(f'width: {orig_image_sizes.width}, height: {orig_image_sizes.height}')
# min_length = min(orig_image_sizes.width , orig_image_sizes.height)
# resized_xmin = int(bb / min_length * self.image_size)
# resized_ymin = int(ymin / min_length * self.image_size)
# resized_xmax = int(xmax / min_length * self.image_size)
# resized_ymax = int(ymax / min_length * self.image_size)
# resized_bboxes = pd.DataFrame({'id': paths.id,
# 'xmin': resized_xmin.values,
# 'ymin': resized_ymin.values,
# 'xmax': resized_xmax.values,
# 'ymax': resized_ymax.values})
def __getitem__(self, idx):
sample = self.data.iloc[idx]
path = os.path.join(self.root, 'CUB_200_2011/images', sample.path)
image = Image.open(path).convert('RGB')
label = sample.label - 1 # label starts from 1
gt_box = torch.tensor(
[sample.xmin, sample.ymin, sample.xmax, sample.ymax])
if self.transform is not None:
image = self.transform(image)
return (image, label, gt_box)
@property
def class_id_to_name(self):
if hasattr(self, '_class_id_to_name'):
return self._class_id_to_name
labelmap = | pd.read_csv(self._labelmap_path, sep=' ', names=['label', 'name']) | pandas.read_csv |
"""
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from pandas._libs.tslibs import timezones
from pandas._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import pandas._testing as tm
from pandas.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
assert ts == stamp
@td.skip_if_windows_python_3
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettz("US/Eastern"), freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance, freq="D")
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05", freq="D")
assert timestamp_instance == ts
class TestDateRanges:
def test_date_range_nat(self):
# GH#11587
msg = "Neither `start` nor `end` can be NaT"
with pytest.raises(ValueError, match=msg):
date_range(start="2016-01-01", end=pd.NaT, freq="D")
with pytest.raises(ValueError, match=msg):
date_range(start=pd.NaT, end="2016-01-01", freq="D")
def test_date_range_multiplication_overflow(self):
# GH#24255
# check that overflows in calculating `addend = periods * stride`
# are caught
with tm.assert_produces_warning(None):
# we should _not_ be seeing a overflow RuntimeWarning
dti = date_range(start="1677-09-22", periods=213503, freq="D")
assert dti[0] == Timestamp("1677-09-22")
assert len(dti) == 213503
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("1969-05-04", periods=200000000, freq="30000D")
def test_date_range_unsigned_overflow_handling(self):
# GH#24255
# case where `addend = periods * stride` overflows int64 bounds
# but not uint64 bounds
dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")
dti2 = date_range(start=dti[0], periods=len(dti), freq="D")
assert dti2.equals(dti)
dti3 = date_range(end=dti[-1], periods=len(dti), freq="D")
assert dti3.equals(dti)
def test_date_range_int64_overflow_non_recoverable(self):
# GH#24255
# case with start later than 1970-01-01, overflow int64 but not uint64
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(start="1970-02-01", periods=106752 * 24, freq="H")
# case with end before 1970-01-01, overflow int64 but not uint64
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1969-11-14", periods=106752 * 24, freq="H")
def test_date_range_int64_overflow_stride_endpoint_different_signs(self):
# cases where stride * periods overflow int64 and stride/endpoint
# have different signs
start = Timestamp("2262-02-23")
end = Timestamp("1969-11-14")
expected = date_range(start=start, end=end, freq="-1H")
assert expected[0] == start
assert expected[-1] == end
dti = date_range(end=end, periods=len(expected), freq="-1H")
tm.assert_index_equal(dti, expected)
start2 = Timestamp("1970-02-01")
end2 = Timestamp("1677-10-22")
expected2 = date_range(start=start2, end=end2, freq="-1H")
assert expected2[0] == start2
assert expected2[-1] == end2
dti2 = date_range(start=start2, periods=len(expected2), freq="-1H")
tm.assert_index_equal(dti2, expected2)
def test_date_range_out_of_bounds(self):
# GH#14187
msg = "Cannot generate range"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("2016-01-01", periods=100000, freq="D")
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1763-10-12", periods=100000, freq="D")
def test_date_range_gen_error(self):
rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min")
assert len(rng) == 4
@pytest.mark.parametrize("freq", ["AS", "YS"])
def test_begin_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
freq=freq,
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["A", "Y"])
def test_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["BA", "BY"])
def test_business_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq
)
tm.assert_index_equal(rng, exp)
def test_date_range_negative_freq(self):
# GH 11018
rng = date_range("2011-12-31", freq="-2A", periods=3)
exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2A"
rng = date_range("2011-01-31", freq="-2M", periods=3)
exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2M"
def test_date_range_bms_bug(self):
# #1645
rng = date_range("1/1/2000", periods=10, freq="BMS")
ex_first = Timestamp("2000-01-03")
assert rng[0] == ex_first
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq="2D")
offset = timedelta(2)
values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset)
tm.assert_index_equal(rng, values)
rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B")
the_time = time(8, 15)
for val in rng:
assert val.time() == the_time
def test_date_range_fy5252(self):
dr = date_range(
start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"),
)
assert dr[0] == Timestamp("2013-01-31")
assert dr[1] == Timestamp("2014-01-30")
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start, end, periods=10, freq="s")
def test_date_range_convenience_periods(self):
# GH 20808
result = date_range("2018-04-24", "2018-04-27", periods=3)
expected = DatetimeIndex(
["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"],
freq=None,
)
tm.assert_index_equal(result, expected)
# Test if spacing remains linear if tz changes to dst in range
result = date_range(
"2018-04-01 01:00:00",
"2018-04-01 04:00:00",
tz="Australia/Sydney",
periods=3,
)
expected = DatetimeIndex(
[
Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"),
Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"),
Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"),
]
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start,end,result_tz",
[
["20180101", "20180103", "US/Eastern"],
[datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"],
[Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"],
[
Timestamp("20180101", tz="US/Eastern"),
| Timestamp("20180103", tz="US/Eastern") | pandas.Timestamp |
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas._libs.tslibs.period import IncompatibleFrequency
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import PeriodArray, period_array
@pytest.mark.parametrize(
"data, freq, expected",
[
([pd.Period("2017", "D")], None, [17167]),
([pd.Period("2017", "D")], "D", [17167]),
([2017], "D", [17167]),
(["2017"], "D", [17167]),
([pd.Period("2017", "D")], | pd.tseries.offsets.Day() | pandas.tseries.offsets.Day |
import parms
import pandas as pd
def do(pd_series, sheet_title, default):
description = str(pd_series[parms.COLUMN_DESCRIPTION()])
if sheet_title == "Example":
return description
elif sheet_title == "Example2":
description = adopt_text(pd_series["Next Location"])
else:
return default
return description
def adopt_text(text):
return str.encode(text).replace(b'\015', b'\n').decode(errors='strict')
if __name__ == "__main__":
d = {parms.COLUMN_TITLE: ["Item"], parms.COLUMN_DESCRIPTION: ["Example Item"]}
print(do( | pd.Series(["Description Example"], index=["Description"]) | pandas.Series |
import argparse
import logging
import logging.config
import os
from os.path import dirname, exists, join
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, roc_auc_score, f1_score
from sklearn.preprocessing import StandardScaler
from qac.simq import simq_features
from qac.evaluation import evaluation
from qac.experiments import preprocessing
logger = logging.getLogger(__name__) # pylint: disable=locally-disabled, invalid-name
DEBUG = False
THRESHOLDS = np.linspace(0, 1, num=1000)
class ThresholdClassifier():
def __init__(self, T):
self.T = T
def predict(self, X):
return np.where(X >= self.T, 0, 1)
def decision_function(self, X):
y = self.predict(X)
return np.where(y == 0, self.T - X, X - self.T)
def _scaled_scores(scores, feat, train_ids, val_ids, test_ids):
X_train = scores[feat].loc[train_ids].values.reshape(-1, 1)
X_val = scores[feat].loc[val_ids].values.reshape(-1, 1)
X_test = scores[feat].loc[test_ids].values.reshape(-1, 1)
scaler = StandardScaler().fit(X_train)
X_val = scaler.transform(X_val)[:, 0]
X_test = scaler.transform(X_test)[:, 0]
return X_val, X_test
def make_predictions(args):
community = preprocessing.load_community(args.community, preprocess=False, with_dev=True)
train_ids, val_ids, test_ids = community.train_ids, community.val_ids, community.test_ids
y_val = community.y_val
feature_handler = simq_features.FeatureHandler(args.community, run=args.simq_run)
feat = args.feature
scores = feature_handler.read(feature_name=feat)
X_val, X_test = _scaled_scores(scores, args.feature, train_ids, val_ids, test_ids)
best_t = evaluate_thresholds(THRESHOLDS, X_val, y_val)
clf = ThresholdClassifier(best_t)
y_pred = clf.predict(X_test)
y_pred_proba = clf.decision_function(X_test)
return np.stack([test_ids, y_pred, y_pred_proba], axis=1)
def evaluate_thresholds(thresholds, X_val, y_val):
logger.info('Start threshold search...')
scores = []
for T in thresholds:
clf = ThresholdClassifier(T)
y_pred = clf.predict(X_val)
y_pred_proba = clf.decision_function(X_val)
acc_score = accuracy_score(y_val, y_pred)
auc_score = roc_auc_score(y_val, y_pred_proba)
f1 = f1_score(y_val, y_pred)
if DEBUG:
logger.info('Acc - %.4f AUC - %.4f F1 - %.4f', acc_score, auc_score, f1)
scores.append(auc_score)
i = np.argmax(scores)
logger.info('Best threshold = %s', str(thresholds[i]))
logger.info('Best score = %.4f', scores[i])
return thresholds[i]
def save_predictions(pred, out_dir, run_id):
os.makedirs(out_dir, exist_ok=True)
y_pred = pred[:, 1].astype(int)
y_pred = preprocessing.LABEL_ENCODER.inverse_transform(y_pred)
| pd.DataFrame.from_dict({'id': pred[:, 0], 'y_pred': y_pred, 'y_pred_proba': pred[:, 2]}) | pandas.DataFrame.from_dict |
from __future__ import absolute_import, division, print_function
import matplotlib.pylab as plt
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.python.keras import layers
import numpy as np
import os
tf.VERSION
import cv2
import sys
import json
import pandas as pd
from sklearn.metrics import precision_recall_fscore_support, classification_report
from imageTransformFunctions import recenterImageOnEyes
from createValidationVideoWithNewZZversion import createValidationVideoWithNewZZversion
# import pdb
def detectRolloverFramesWithNewZZversion(videoName, path, medianRollingMean, recenterImageWindow, comparePredictedResultsToManual, validationVideo, pathToInitialVideo):
if (medianRollingMean % 2 == 0):
sys.exit("medianRollingMean must be an odd number")
### Loading the classifier
classifier_url = "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/2"
feature_extractor_url = "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/2"
def classifier(x):
classifier_module = hub.Module(classifier_url)
return classifier_module(x)
IMAGE_SIZE = hub.get_expected_image_size(hub.Module(classifier_url))
classifier_layer = layers.Lambda(classifier, input_shape = IMAGE_SIZE+[3])
classifier_model = tf.keras.Sequential([classifier_layer])
classifier_model.summary()
from tensorflow.python.keras import backend as K
sess = K.get_session()
init = tf.global_variables_initializer()
sess.run(init)
####
def feature_extractor(x):
feature_extractor_module = hub.Module(feature_extractor_url)
return feature_extractor_module(x)
IMAGE_SIZE = hub.get_expected_image_size(hub.Module(feature_extractor_url))
features_extractor_layer = layers.Lambda(feature_extractor, input_shape=IMAGE_SIZE+[3])
features_extractor_layer.trainable = False
model = tf.keras.Sequential([
features_extractor_layer,
layers.Dense(2, activation='softmax')
])
model.summary()
init = tf.global_variables_initializer()
sess.run(init)
###
model.compile(
optimizer=tf.train.AdamOptimizer(),
loss='categorical_crossentropy',
metrics=['accuracy'])
checkpoint_path = "model/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
model.load_weights(checkpoint_path)
### Loading the images and applying the classifier on them
videoPath = os.path.join(os.path.join(path, videoName), 'results_' + videoName + '.txt')
if (os.path.isfile(videoPath)):
# Applying rollover classifier to each frame and saving the results in a txt file
file = open(videoPath, 'r')
j = json.loads(file.read())
wellPoissMouv = j['wellPoissMouv']
wellPositions = j['wellPositions']
nbWell = len(wellPositions)
rolloversAllWells = []
rolloversMedFiltAllWells = []
rolloverPercentageAllWells = []
# going through each well in super structure
for i in range(0,nbWell):
xwell = wellPositions[i]['topLeftX']
ywell = wellPositions[i]['topLeftY']
if xwell < 0:
xwell = 0
if ywell < 0:
ywell = 0
videoPath2 = pathToInitialVideo
if (len(wellPoissMouv[i])):
if (len(wellPoissMouv[i][0])):
cap = cv2.VideoCapture(videoPath2)
videoLength = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
rollovers = np.zeros((videoLength))
rolloverPercentage = np.zeros((videoLength))
frames = []
framesNumber = []
nbMouv = len(wellPoissMouv[i][0])
# going through each movement for the well
for j in range(0,nbMouv):
if (len(wellPoissMouv[i][0][j])):
item = wellPoissMouv[i][0][j]
BoutStart = item['BoutStart']
BoutEnd = item['BoutEnd']
k = BoutStart
cap.set(cv2.CAP_PROP_POS_FRAMES,BoutStart)
while (k <= BoutEnd):
ret, frame = cap.read()
yStart = int(ywell+item['HeadY'][k-BoutStart]-30)
yEnd = int(ywell+item['HeadY'][k-BoutStart]+30)
xStart = int(xwell+item['HeadX'][k-BoutStart]-30)
xEnd = int(xwell+item['HeadX'][k-BoutStart]+30)
frame = frame[yStart:yEnd, xStart:xEnd]
if ret == True:
if recenterImageWindow:
frame = recenterImageOnEyes(frame,recenterImageWindow)
rows = len(frame)
cols = len(frame[0])
scaleD = int(cols/6)
frame = frame[scaleD:(rows-scaleD), scaleD:(rows-scaleD)]
frame = cv2.resize(frame,(224,224))
frame = np.array(frame, dtype=np.float32) / 255.0
frames.append(frame)
framesNumber.append(k)
else:
break
k = k + 1
frames = np.array(frames)
resultRaw = model.predict(frames)
result = np.argmax(resultRaw, axis=-1)
rollovers[framesNumber] = result
rolloverPercentage[framesNumber] = resultRaw[:,1]
rolloversMedFiltSeries = ( | pd.Series(rollovers) | pandas.Series |
import csv
from collections import defaultdict, Counter
import hashlib
import tempfile
import os
from os.path import join
import subprocess
import shutil
import logging
import socket
from traceback import format_exc
import sys
import click
import numpy.random
import numpy
import biom
import skbio.io
from pandas import DataFrame, Series
from qiime2 import Artifact
from qiime2.plugins import clawback
from mpiutils import dispatcher, mpi_logging
@click.command()
@click.option('--biom-file', required=True, type=click.Path(exists=True),
help='Sample table with SVs for observation ids (biom)')
@click.option('--missed-sample-file', required=True,
type=click.Path(exists=True),
help='Basenames of the sample files that failed generation')
@click.option('--sv-to-ref-seq-file', required=True,
type=click.Path(exists=True),
help='BLAST mapping from SVs to ref seq labels (tsv)')
@click.option('--ref-taxa', required=True, type=click.Path(exists=True),
help='Greengenes reference taxa (tsv)')
@click.option('--ref-seqs', required=True, type=click.Path(exists=True),
help='Greengenes reference sequences (fasta)')
@click.option('--expected-dir', required=True, type=click.Path(exists=True),
help='Output directory for expected taxa Artifacts')
@click.option('--abundances-dir', required=True, type=click.Path(exists=True),
help='Output directory for expected taxa frequency Artifacts')
@click.option('--sequences-dir', required=True, type=click.Path(exists=True),
help='Output directory for the simulated SV Artifacts')
@click.option('--tmp-dir', type=click.Path(exists=False),
help='Temp dir (gets left behind on simulation exception)')
@click.option('--log-file', type=click.Path(), help='Log file')
@click.option('--log-level',
type=click.Choice('DEBUG INFO WARNING ERROR CRITICAL'.split()),
default='WARNING', help='Log level')
def simulate_missed_samples(biom_file, missed_sample_file, sv_to_ref_seq_file,
ref_taxa, ref_seqs, expected_dir, abundances_dir,
sequences_dir, tmp_dir=None, log_file=None,
log_level='DEBUG'):
setup_logging(log_level, log_file)
if dispatcher.am_dispatcher():
logging.info(locals())
all_samples = biom.load_table(biom_file)
missed_samples = load_missed_samples(missed_sample_file)
def process_sample(basename_sample):
basename, sample = basename_sample
try:
exp_filename = join(expected_dir, basename)
abund_filename = join(abundances_dir, basename)
seqs_filename = join(sequences_dir, basename)
generate_triple(
basename[:-4], sample, sv_to_ref_seq_file, ref_taxa, ref_seqs,
exp_filename, abund_filename, seqs_filename, tmp_dir)
logging.info('Done ' + basename)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
logging.warning('Skipping ' + basename + ':\n' + format_exc())
def sample_generator():
for fold, sample_id in missed_samples:
basename = sample_id + '-fold-' + str(fold) + '.qza'
yield basename, extract_sample([sample_id], all_samples)
result = dispatcher.farm(process_sample, sample_generator())
if result:
list(result)
def load_missed_samples(missed_sample_file):
with open(missed_sample_file) as ms_fh:
missed_samples = []
for line in ms_fh:
line = line.strip()
if line.endswith('.qza'):
line = line[:-4]
fold = int(line.rsplit('-', 1)[-1])
sample_id = line.rsplit('-', 2)[0]
missed_samples.append((fold, sample_id))
return missed_samples
@click.command()
@click.option('--biom-file', required=True, type=click.Path(exists=True),
help='Sample table with SVs for observation ids (biom)')
@click.option('--sv-to-ref-seq-file', required=True,
type=click.Path(exists=True),
help='BLAST mapping from SVs to ref seq labels (tsv)')
@click.option('--sv-to-ref-tax-file', required=True,
type=click.Path(exists=True),
help='Naive Bayes mapping from SVs to ref seq taxa (qza)')
@click.option('--ref-taxa', required=True, type=click.Path(exists=True),
help='Greengenes reference taxa (tsv)')
@click.option('--ref-seqs', required=True, type=click.Path(exists=True),
help='Greengenes reference sequences (fasta)')
@click.option('--weights-dir', required=True, type=click.Path(exists=True),
help='Output directory for fold weights Artifacts')
@click.option('--expected-dir', required=True, type=click.Path(exists=True),
help='Output directory for expected taxa Artifacts')
@click.option('--abundances-dir', required=True, type=click.Path(exists=True),
help='Output directory for expected taxa frequency Artifacts')
@click.option('--sequences-dir', required=True, type=click.Path(exists=True),
help='Output directory for the simulated SV Artifacts')
@click.option('--k', type=int, default=10,
help='Number of folds for cross validation (default 10)')
@click.option('--tmp-dir', type=click.Path(exists=False),
help='Temp dir (gets left behind on simulation exception)')
@click.option('--log-file', type=click.Path(), help='Log file')
@click.option('--log-level',
type=click.Choice('DEBUG INFO WARNING ERROR CRITICAL'.split()),
default='WARNING', help='Log level')
def simulate_all_samples(biom_file, sv_to_ref_seq_file, sv_to_ref_tax_file,
ref_taxa, ref_seqs, weights_dir,
expected_dir, abundances_dir, sequences_dir, k=10,
tmp_dir=None, log_file=None, log_level='DEBUG'):
setup_logging(log_level, log_file)
if dispatcher.am_dispatcher():
logging.info(locals())
all_samples = biom.load_table(biom_file)
# shuffle the sample ids, assign folds, and generate weights
sample_ids = numpy.array(all_samples.ids())
logging.info('Found ' + str(len(sample_ids)) + ' samples')
numpy.random.shuffle(sample_ids)
folds = numpy.array([i % k for i in range(len(sample_ids))])
reference_taxonomy = Artifact.import_data(
'FeatureData[Taxonomy]', ref_taxa,
view_type='HeaderlessTSVTaxonomyFormat')
reference_sequences = Artifact.import_data(
'FeatureData[Sequence]', ref_seqs)
taxonomy_classification = Artifact.load(sv_to_ref_tax_file)
for fold in range(k):
training_set = extract_sample(
sample_ids[folds != fold], all_samples)
table = Artifact.import_data(
'FeatureTable[Frequency]', training_set)
unobserved_weight = 1e-6
normalise = False
weights = clawback.methods.generate_class_weights(
reference_taxonomy, reference_sequences,
table, taxonomy_classification)
weights = weights.class_weight
weights_filename = \
'weights-normalise-%s-unobserved-weight-%g-fold-%d.qza' %\
(normalise, unobserved_weight, fold)
weights.save(join(weights_dir, weights_filename))
def process_sample(basename_sample):
basename, sample = basename_sample
try:
exp_filename = join(expected_dir, basename)
abund_filename = join(abundances_dir, basename)
seqs_filename = join(sequences_dir, basename)
generate_triple(
basename[:-4], sample, sv_to_ref_seq_file, ref_taxa, ref_seqs,
exp_filename, abund_filename, seqs_filename, tmp_dir)
logging.info('Done ' + basename)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
logging.warning('Skipping ' + basename + ':\n' + format_exc())
def sample_generator():
for fold, sample_id in zip(folds, sample_ids):
basename = sample_id + '-fold-' + str(fold) + '.qza'
yield basename, extract_sample([sample_id], all_samples)
result = dispatcher.farm(process_sample, sample_generator())
if result:
list(result)
def setup_logging(log_level=None, log_file=None):
try:
if log_file:
log_dir = os.path.dirname(log_file)
dispatcher.checkmakedirs(log_dir)
handler = mpi_logging.MPIFileHandler(log_file)
else:
handler = logging.StreamHandler()
log_level = getattr(logging, log_level.upper())
handler.setLevel(log_level)
hostpid = ''
if dispatcher.USING_MPI:
hostpid = socket.gethostname()+':'+str(os.getpid())+':'
formatter = logging.Formatter('%(asctime)s:' + hostpid +
'%(levelname)s:%(message)s')
handler.setFormatter(formatter)
logging.root.addHandler(handler)
logging.root.setLevel(log_level)
except Exception:
sys.stderr.write(' Unable to set up logging:\n'+format_exc())
dispatcher.exit(1)
def extract_sample(sample_ids, samples):
subsample = samples.filter(sample_ids, inplace=False)
subsample.filter(
lambda v, _, __: v.sum() > 1e-9, axis='observation', inplace=True)
return subsample
def load_sv_map(sv_to_ref_seq_file):
with open(sv_to_ref_seq_file) as blast_results:
blast_reader = csv.reader(blast_results, csv.excel_tab)
sv_map = {sv: ref_seq for sv, ref_seq in blast_reader}
return sv_map
def load_ref_seqs_map(ref_seqs):
with open(ref_seqs) as ref_fh:
fasta_reader = skbio.io.read(ref_fh, 'fasta')
ref_seqs = {s.metadata['id']: str(s) for s in fasta_reader}
return ref_seqs
def generate_triple(sample_id, sample, sv_to_ref_seq_file, ref_taxa, ref_seqs,
exp_filename, abund_filename, seqs_filename, tmp_dir):
sv_map = load_sv_map(sv_to_ref_seq_file)
ref_seqs_map = load_ref_seqs_map(ref_seqs)
tax_map = load_taxonomy_map(ref_taxa)
with tempfile.TemporaryDirectory() as tmpdir:
try:
dada_in_dirs = simulate(sample, tmpdir, sv_map, ref_seqs_map)
dada_out_dirs, dada_tmp_dirs = denoise(tmpdir, dada_in_dirs)
result = traceback(dada_out_dirs, dada_tmp_dirs, tax_map)
save_result(
sample_id, result, exp_filename, abund_filename, seqs_filename)
except Exception:
if tmp_dir is not None and not os.path.exists(tmp_dir):
shutil.copytree(tmpdir, tmp_dir)
raise
def load_taxonomy_map(ref_taxa):
with open(ref_taxa) as tax_fh:
tax_map = {r[0].encode('utf-8'): r[1]
for r in csv.reader(tax_fh, csv.excel_tab)}
return tax_map
def simulate(sample, tmpdir, sv_map, ref_seqs):
'add some noise to the reference sequences'
# output the amplicon sequences to fasta, labelled by greengenes sequence
# label, with abundance that `vsearch --rereplicate` will understand
abundance_filename = join(tmpdir, 'abundance.fasta')
with open(abundance_filename, 'w') as a_fh:
for row in sample.iter(axis='observation'):
abundance, sv, _ = row
abundance = int(abundance[0])
if sv in sv_map:
label = sv_map[sv]
a_fh.write('>' + label + ';size=' + str(abundance) + '\n')
a_fh.write(ref_seqs[sv_map[sv]] + '\n')
# repreplicate according to abundance and run ART to simulate amplicons
prior_art_filename = join(tmpdir, 'prior_art.fasta')
cmd = ('vsearch', '--rereplicate', abundance_filename, '--output',
prior_art_filename)
subprocess.run(cmd, check=True)
post_art_filename = join(tmpdir, 'post_art')
cmd = ('art_illumina -ss MSv1 -amp -i ' + prior_art_filename +
' -l 250 -o ' + post_art_filename + ' -c 1 -na -p').split()
subprocess.run(cmd, check=True)
dada_in_dirs = []
for i in ('1', '2'):
cmd = 'gzip', post_art_filename + i + '.fq'
subprocess.run(cmd, check=True)
dada_in_dir = join(tmpdir, 'dada_in' + i)
os.mkdir(dada_in_dir)
dst = join(dada_in_dir, 'post_art' + i + '.fastq.gz')
shutil.move(post_art_filename + i + '.fq.gz', dst)
dada_in_dirs.append(dada_in_dir)
return dada_in_dirs
def denoise(tmpdir, dada_in_dirs):
'take the nose away, in a way that horribly mangles the sample provenance'
post_dada_filename = join(tmpdir, 'post_dada.tsv')
dada_tmp_dirs = [join(tmpdir, 'dada_tmp' + i) for i in ('1', '2')]
dada_out_dirs = [join(tmpdir, 'dada_out' + i) for i in ('1', '2')]
list(map(os.mkdir, dada_tmp_dirs + dada_out_dirs))
cmd = 'run_traceable_dada_paired.R'.split() +\
dada_in_dirs + [post_dada_filename] + dada_tmp_dirs +\
'250 250 0 0 Inf 0 none 1 1 1000000'.split() + dada_out_dirs
subprocess.run(cmd, check=True)
return dada_out_dirs, dada_tmp_dirs
def traceback(dada_out_dirs, dada_tmp_dirs, tax_map):
'reconstruct the taxa to which each denoised sequence corresponds'
unique_maps = []
for i, dada_out_dir in enumerate(dada_out_dirs, 1):
with open(join(dada_out_dir, 'post_art%d.merge.map' % i)) as merg_fh:
reader = csv.reader(merg_fh, csv.excel_tab)
merg_map = {dada_sv: merg_sv for dada_sv, merg_sv in reader}
with open(join(dada_out_dir, 'post_art%d.dada.map' % i)) as dada_fh:
reader = csv.reader(dada_fh, csv.excel_tab)
unique_map = defaultdict(list)
for unique, dada_sv in reader:
if dada_sv in merg_map:
unique_map[unique].append(merg_map[dada_sv])
unique_maps.append(unique_map)
# this is where the magic happens
filtered_taxa = []
single_maps = []
for i, dada_tmp_dir in enumerate(dada_tmp_dirs, 1):
single_map = defaultdict(set)
taxa = []
fastq_filename = join(dada_tmp_dir, 'post_art%d.fastq.gz' % i)
with skbio.io.open(fastq_filename) as pa_fh:
fastq_reader = skbio.io.read(pa_fh, 'fastq', phred_offset=33)
for j, seq in enumerate(fastq_reader):
taxa.append(tax_map[seq.metadata['id'][:-4].encode('utf-8')])
for sv in unique_maps[i-1][str(seq)]:
single_map[sv].add(j)
single_maps.append(single_map)
filtered_taxa.append(taxa)
assert filtered_taxa[0] == filtered_taxa[1]
filtered_taxa = filtered_taxa[0]
merged_map = {sv: single_maps[0][sv].intersection(single_maps[1][sv])
for sv in single_maps[0]}
merged_map = {sv: Counter(filtered_taxa[i] for i in tlist)
for sv, tlist in merged_map.items()}
result = [(s, t, c) for s in merged_map for t, c in merged_map[s].items()]
return result
def save_result(
sample_id, result, exp_filename, abund_filename, seqs_filename):
'save the results in three Artifacts'
svs, taxa, abundances = zip(*result)
hashes = [hashlib.md5((s+t).encode('utf-8')).hexdigest()
for s, t, c in result]
expected = DataFrame({'Taxon': taxa}, index=hashes, columns=['Taxon'])
expected.index.name = 'Feature ID'
expected = Artifact.import_data('FeatureData[Taxonomy]', expected)
expected.save(exp_filename)
abundanced = DataFrame({h: a for h, a in zip(hashes, abundances)},
index=[sample_id], columns=hashes)
abundanced = Artifact.import_data('FeatureTable[Frequency]', abundanced)
abundanced.save(abund_filename)
sequences = | Series(svs, index=hashes) | pandas.Series |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Module houses `TextFileDispatcher` class.
`TextFileDispatcher` contains utils for text formats files, inherits util functions for
files from `FileDispatcher` class and can be used as base class for dipatchers of SQL queries.
"""
import warnings
import os
import io
import codecs
from typing import Union, Sequence, Optional, Tuple, Callable
from csv import QUOTE_NONE
import numpy as np
import pandas
import pandas._libs.lib as lib
from pandas.core.dtypes.common import is_list_like
from modin.core.io.file_dispatcher import FileDispatcher, OpenFile
from modin.core.storage_formats.pandas.utils import compute_chunksize
from modin.utils import _inherit_docstrings
from modin.core.io.text.utils import CustomNewlineIterator
from modin.config import NPartitions
from modin.error_message import ErrorMessage
ColumnNamesTypes = Tuple[Union[pandas.Index, pandas.MultiIndex]]
IndexColType = Union[int, str, bool, Sequence[int], Sequence[str], None]
class TextFileDispatcher(FileDispatcher):
"""Class handles utils for reading text formats files."""
# The variable allows to set a function with which one partition will be read;
# Used in dispatchers and parsers
read_callback = None
@classmethod
def get_path_or_buffer(cls, filepath_or_buffer):
"""
Extract path from `filepath_or_buffer`.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
`filepath_or_buffer` parameter of `read_csv` function.
Returns
-------
str or path object
verified `filepath_or_buffer` parameter.
Notes
-----
Given a buffer, try and extract the filepath from it so that we can
use it without having to fall back to pandas and share file objects between
workers. Given a filepath, return it immediately.
"""
if hasattr(filepath_or_buffer, "name"):
buffer_filepath = filepath_or_buffer.name
if cls.file_exists(buffer_filepath):
warnings.warn(
"For performance reasons, the filepath will be "
+ "used in place of the file handle passed in "
+ "to load the data"
)
return cls.get_path(buffer_filepath)
return filepath_or_buffer
@classmethod
def build_partition(cls, partition_ids, row_lengths, column_widths):
"""
Build array with partitions of `cls.frame_partition_cls` class.
Parameters
----------
partition_ids : list
Array with references to the partitions data.
row_lengths : list
Partitions rows lengths.
column_widths : list
Number of columns in each partition.
Returns
-------
np.ndarray
array with shape equals to the shape of `partition_ids` and
filed with partitions objects.
"""
return np.array(
[
[
cls.frame_partition_cls(
partition_ids[i][j],
length=row_lengths[i],
width=column_widths[j],
)
for j in range(len(partition_ids[i]))
]
for i in range(len(partition_ids))
]
)
@classmethod
def pathlib_or_pypath(cls, filepath_or_buffer):
"""
Check if `filepath_or_buffer` is instance of `py.path.local` or `pathlib.Path`.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
`filepath_or_buffer` parameter of `read_csv` function.
Returns
-------
bool
Whether or not `filepath_or_buffer` is instance of `py.path.local`
or `pathlib.Path`.
"""
try:
import py
if isinstance(filepath_or_buffer, py.path.local):
return True
except ImportError: # pragma: no cover
pass
try:
import pathlib
if isinstance(filepath_or_buffer, pathlib.Path):
return True
except ImportError: # pragma: no cover
pass
return False
@classmethod
def offset(
cls,
f,
offset_size: int,
quotechar: bytes = b'"',
is_quoting: bool = True,
encoding: str = None,
newline: bytes = None,
):
"""
Move the file offset at the specified amount of bytes.
Parameters
----------
f : file-like object
File handle that should be used for offset movement.
offset_size : int
Number of bytes to read and ignore.
quotechar : bytes, default: b'"'
Indicate quote in a file.
is_quoting : bool, default: True
Whether or not to consider quotes.
encoding : str, optional
Encoding of `f`.
newline : bytes, optional
Byte or sequence of bytes indicating line endings.
Returns
-------
bool
If file pointer reached the end of the file, but did not find
closing quote returns `False`. `True` in any other case.
"""
if is_quoting:
chunk = f.read(offset_size)
outside_quotes = not chunk.count(quotechar) % 2
else:
f.seek(offset_size, os.SEEK_CUR)
outside_quotes = True
# after we read `offset_size` bytes, we most likely break the line but
# the modin implementation doesn't work correctly in the case, so we must
# make sure that the line is read completely to the lineterminator,
# which is what the `_read_rows` does
outside_quotes, _ = cls._read_rows(
f,
nrows=1,
quotechar=quotechar,
is_quoting=is_quoting,
outside_quotes=outside_quotes,
encoding=encoding,
newline=newline,
)
return outside_quotes
@classmethod
def partitioned_file(
cls,
f,
num_partitions: int = None,
nrows: int = None,
skiprows: int = None,
quotechar: bytes = b'"',
is_quoting: bool = True,
encoding: str = None,
newline: bytes = None,
header_size: int = 0,
pre_reading: int = 0,
):
"""
Compute chunk sizes in bytes for every partition.
Parameters
----------
f : file-like object
File handle of file to be partitioned.
num_partitions : int, optional
For what number of partitions split a file.
If not specified grabs the value from `modin.config.NPartitions.get()`.
nrows : int, optional
Number of rows of file to read.
skiprows : int, optional
Specifies rows to skip.
quotechar : bytes, default: b'"'
Indicate quote in a file.
is_quoting : bool, default: True
Whether or not to consider quotes.
encoding : str, optional
Encoding of `f`.
newline : bytes, optional
Byte or sequence of bytes indicating line endings.
header_size : int, default: 0
Number of rows, that occupied by header.
pre_reading : int, default: 0
Number of rows between header and skipped rows, that should be read.
Returns
-------
list
List with the next elements:
int : partition start read byte
int : partition end read byte
"""
read_rows_counter = 0
outside_quotes = True
if num_partitions is None:
num_partitions = NPartitions.get() - 1 if pre_reading else NPartitions.get()
rows_skipper = cls.rows_skipper_builder(
f, quotechar, is_quoting=is_quoting, encoding=encoding, newline=newline
)
result = []
file_size = cls.file_size(f)
rows_skipper(header_size)
if pre_reading:
pre_reading_start = f.tell()
outside_quotes, read_rows = cls._read_rows(
f,
nrows=pre_reading,
quotechar=quotechar,
is_quoting=is_quoting,
outside_quotes=outside_quotes,
encoding=encoding,
newline=newline,
)
read_rows_counter += read_rows
result.append((pre_reading_start, f.tell()))
# add outside_quotes
if is_quoting and not outside_quotes:
warnings.warn("File has mismatched quotes")
rows_skipper(skiprows)
start = f.tell()
if nrows:
partition_size = max(1, num_partitions, nrows // num_partitions)
while f.tell() < file_size and read_rows_counter < nrows:
if read_rows_counter + partition_size > nrows:
# it's possible only if is_quoting==True
partition_size = nrows - read_rows_counter
outside_quotes, read_rows = cls._read_rows(
f,
nrows=partition_size,
quotechar=quotechar,
is_quoting=is_quoting,
encoding=encoding,
newline=newline,
)
result.append((start, f.tell()))
start = f.tell()
read_rows_counter += read_rows
# add outside_quotes
if is_quoting and not outside_quotes:
warnings.warn("File has mismatched quotes")
else:
partition_size = max(1, num_partitions, file_size // num_partitions)
while f.tell() < file_size:
outside_quotes = cls.offset(
f,
offset_size=partition_size,
quotechar=quotechar,
is_quoting=is_quoting,
encoding=encoding,
newline=newline,
)
result.append((start, f.tell()))
start = f.tell()
# add outside_quotes
if is_quoting and not outside_quotes:
warnings.warn("File has mismatched quotes")
return result
@classmethod
def _read_rows(
cls,
f,
nrows: int,
quotechar: bytes = b'"',
is_quoting: bool = True,
outside_quotes: bool = True,
encoding: str = None,
newline: bytes = None,
):
"""
Move the file offset at the specified amount of rows.
Parameters
----------
f : file-like object
File handle that should be used for offset movement.
nrows : int
Number of rows to read.
quotechar : bytes, default: b'"'
Indicate quote in a file.
is_quoting : bool, default: True
Whether or not to consider quotes.
outside_quotes : bool, default: True
Whether the file pointer is within quotes or not at the time this function is called.
encoding : str, optional
Encoding of `f`.
newline : bytes, optional
Byte or sequence of bytes indicating line endings.
Returns
-------
bool
If file pointer reached the end of the file, but did not find closing quote
returns `False`. `True` in any other case.
int
Number of rows that were read.
"""
if nrows is not None and nrows <= 0:
return True, 0
rows_read = 0
if encoding and (
"utf" in encoding
and "8" not in encoding
or encoding == "unicode_escape"
or encoding.replace("-", "_") == "utf_8_sig"
):
iterator = CustomNewlineIterator(f, newline)
else:
iterator = f
for line in iterator:
if is_quoting and line.count(quotechar) % 2:
outside_quotes = not outside_quotes
if outside_quotes:
rows_read += 1
if rows_read >= nrows:
break
if isinstance(iterator, CustomNewlineIterator):
iterator.seek()
# case when EOF
if not outside_quotes:
rows_read += 1
return outside_quotes, rows_read
@classmethod
def compute_newline(cls, file_like, encoding, quotechar):
"""
Compute byte or sequence of bytes indicating line endings.
Parameters
----------
file_like : file-like object
File handle that should be used for line endings computing.
encoding : str
Encoding of `file_like`.
quotechar : str
Quotechar used for parsing `file-like`.
Returns
-------
bytes
line endings
"""
newline = None
if encoding is None:
return newline, quotechar.encode("UTF-8")
quotechar = quotechar.encode(encoding)
encoding = encoding.replace("-", "_")
if (
"utf" in encoding
and "8" not in encoding
or encoding == "unicode_escape"
or encoding == "utf_8_sig"
):
# trigger for computing f.newlines
file_like.readline()
# in bytes
newline = file_like.newlines.encode(encoding)
boms = ()
if encoding == "utf_8_sig":
boms = (codecs.BOM_UTF8,)
elif "16" in encoding:
boms = (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)
elif "32" in encoding:
boms = (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)
for bom in boms:
if newline.startswith(bom):
bom_len = len(bom)
newline = newline[bom_len:]
quotechar = quotechar[bom_len:]
break
return newline, quotechar
# _read helper functions
@classmethod
def rows_skipper_builder(
cls, f, quotechar, is_quoting, encoding=None, newline=None
):
"""
Build object for skipping passed number of lines.
Parameters
----------
f : file-like object
File handle that should be used for offset movement.
quotechar : bytes
Indicate quote in a file.
is_quoting : bool
Whether or not to consider quotes.
encoding : str, optional
Encoding of `f`.
newline : bytes, optional
Byte or sequence of bytes indicating line endings.
Returns
-------
object
skipper object.
"""
def skipper(n):
if n == 0 or n is None:
return 0
else:
return cls._read_rows(
f,
quotechar=quotechar,
is_quoting=is_quoting,
nrows=n,
encoding=encoding,
newline=newline,
)[1]
return skipper
@classmethod
def _define_header_size(
cls,
header: Union[int, Sequence[int], str, None] = "infer",
names: Optional[Sequence] = lib.no_default,
) -> int:
"""
Define the number of rows that are used by header.
Parameters
----------
header : int, list of int or str, default: "infer"
Original `header` parameter of `read_csv` function.
names : array-like, optional
Original names parameter of `read_csv` function.
Returns
-------
header_size : int
The number of rows that are used by header.
"""
header_size = 0
if header == "infer" and names in [lib.no_default, None]:
header_size += 1
elif isinstance(header, int):
header_size += header + 1
elif hasattr(header, "__iter__") and not isinstance(header, str):
header_size += max(header) + 1
return header_size
@classmethod
def _define_metadata(
cls,
df: pandas.DataFrame,
column_names: ColumnNamesTypes,
) -> Tuple[list, int]:
"""
Define partitioning metadata.
Parameters
----------
df : pandas.DataFrame
The DataFrame to split.
column_names : ColumnNamesTypes
Column names of df.
Returns
-------
column_widths : list
Column width to use during new frame creation (number of
columns for each partition).
num_splits : int
The maximum number of splits to separate the DataFrame into.
"""
# This is the number of splits for the columns
num_splits = min(len(column_names) or 1, NPartitions.get())
column_chunksize = compute_chunksize(df.shape[1], num_splits)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
# split columns into chunks with maximal size column_chunksize, for example
# if num_splits == 4, len(column_names) == 80 and column_chunksize == 32,
# column_widths will be [32, 32, 16, 0]
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
return column_widths, num_splits
@classmethod
def _launch_tasks(cls, splits: list, **partition_kwargs) -> Tuple[list, list, list]:
"""
Launch tasks to read partitions.
Parameters
----------
splits : list
List of tuples with partitions data, which defines
parser task (start/end read bytes and etc.).
**partition_kwargs : dict
`kwargs` that should be passed to the parser function.
Returns
-------
partition_ids : list
array with references to the partitions data.
index_ids : list
array with references to the partitions index objects.
dtypes_ids : list
array with references to the partitions dtypes objects.
"""
partition_ids = [None] * len(splits)
index_ids = [None] * len(splits)
dtypes_ids = [None] * len(splits)
for idx, (start, end) in enumerate(splits):
partition_kwargs.update({"start": start, "end": end})
*partition_ids[idx], index_ids[idx], dtypes_ids[idx] = cls.deploy(
cls.parse,
num_returns=partition_kwargs.get("num_splits") + 2,
**partition_kwargs,
)
return partition_ids, index_ids, dtypes_ids
@classmethod
def check_parameters_support(
cls,
filepath_or_buffer,
read_kwargs: dict,
skiprows_md: Union[Sequence, callable, int],
header_size: int,
) -> bool:
"""
Check support of only general parameters of `read_*` function.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
`filepath_or_buffer` parameter of `read_*` function.
read_kwargs : dict
Parameters of `read_*` function.
skiprows_md : int, array or callable
`skiprows` parameter modified for easier handling by Modin.
header_size : int
Number of rows that are used by header.
Returns
-------
bool
Whether passed parameters are supported or not.
"""
skiprows = read_kwargs.get("skiprows")
if isinstance(filepath_or_buffer, str):
if not cls.file_exists(filepath_or_buffer):
return False
elif not cls.pathlib_or_pypath(filepath_or_buffer):
return False
if read_kwargs["chunksize"] is not None:
return False
skiprows_supported = True
if is_list_like(skiprows_md) and skiprows_md[0] < header_size:
skiprows_supported = False
elif callable(skiprows):
# check if `skiprows` callable gives True for any of header indices
is_intersection = any(
cls._get_skip_mask(pandas.RangeIndex(header_size), skiprows)
)
if is_intersection:
skiprows_supported = False
if not skiprows_supported:
ErrorMessage.single_warning(
"Values of `header` and `skiprows` parameters have intersections. "
+ "This case is unsupported by Modin, so pandas implementation will be used"
)
return False
return True
@classmethod
@_inherit_docstrings(pandas.io.parsers.base_parser.ParserBase._validate_usecols_arg)
def _validate_usecols_arg(cls, usecols):
msg = (
"'usecols' must either be list-like of all strings, all unicode, "
+ "all integers or a callable."
)
if usecols is not None:
if callable(usecols):
return usecols, None
if not is_list_like(usecols):
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
@classmethod
def _manage_skiprows_parameter(
cls,
skiprows: Union[int, Sequence[int], Callable, None] = None,
header_size: int = 0,
) -> Tuple[Union[int, Sequence, Callable], bool, int]:
"""
Manage `skiprows` parameter of read_csv and read_fwf functions.
Change `skiprows` parameter in the way Modin could more optimally
process it. `csv_dispatcher` and `fwf_dispatcher` have two mechanisms of rows skipping:
1) During file partitioning (setting of file limits that should be read
by each partition) exact rows can be excluded from partitioning scope,
thus they won't be read at all and can be considered as skipped. This is
the most effective way of rows skipping (since it doesn't require any
actual data reading and postprocessing), but in this case `skiprows`
parameter can be an integer only. When it possible Modin always uses
this approach by setting of `skiprows_partitioning` return value.
2) Rows for skipping can be dropped after full dataset import. This is
more expensive way since it requires extra IO work and postprocessing
afterwards, but `skiprows` parameter can be of any non-integer type
supported by any pandas read function. These rows is
specified by setting of `skiprows_md` return value.
In some cases, if `skiprows` is uniformly distributed array (e.g. [1,2,3]),
`skiprows` can be "squashed" and represented as integer to make a fastpath.
If there is a gap between the first row for skipping and the last line of
the header (that will be skipped too), then assign to read this gap first
(assign the first partition to read these rows be setting of `pre_reading`
return value). See `Examples` section for details.
Parameters
----------
skiprows : int, array or callable, optional
Original `skiprows` parameter of any pandas read function.
header_size : int, default: 0
Number of rows that are used by header.
Returns
-------
skiprows_md : int, array or callable
Updated skiprows parameter. If `skiprows` is an array, this
array will be sorted. Also parameter will be aligned to
actual data in the `query_compiler` (which, for example,
doesn't contain header rows)
pre_reading : int
The number of rows that should be read before data file
splitting for further reading (the number of rows for
the first partition).
skiprows_partitioning : int
The number of rows that should be skipped virtually (skipped during
data file partitioning).
Examples
--------
Let's consider case when `header`="infer" and `skiprows`=[3,4,5]. In
this specific case fastpath can be done since `skiprows` is uniformly
distributed array, so we can "squash" it to integer and set
`skiprows_partitioning`=3. But if no additional action will be done,
these three rows will be skipped right after header line, that corresponds
to `skiprows`=[1,2,3]. Now, to avoid this discrepancy, we need to assign
the first partition to read data between header line and the first
row for skipping by setting of `pre_reading` parameter, so setting
`pre_reading`=2. During data file partitiong, these lines will be assigned
for reading for the first partition, and then file position will be set at
the beginning of rows that should be skipped by `skiprows_partitioning`.
After skipping of these rows, the rest data will be divided between the
rest of partitions, see rows assignement below:
0 - header line (skip during partitioning)
1 - pre_reading (assign to read by the first partition)
2 - pre_reading (assign to read by the first partition)
3 - skiprows_partitioning (skip during partitioning)
4 - skiprows_partitioning (skip during partitioning)
5 - skiprows_partitioning (skip during partitioning)
6 - data to partition (divide between the rest of partitions)
7 - data to partition (divide between the rest of partitions)
"""
pre_reading = skiprows_partitioning = skiprows_md = 0
if isinstance(skiprows, int):
skiprows_partitioning = skiprows
elif is_list_like(skiprows):
skiprows_md = np.sort(skiprows)
if np.all(np.diff(skiprows_md) == 1):
# `skiprows` is uniformly distributed array.
pre_reading = (
skiprows_md[0] - header_size if skiprows_md[0] > header_size else 0
)
skiprows_partitioning = len(skiprows_md)
skiprows_md = 0
elif skiprows_md[0] > header_size:
skiprows_md = skiprows_md - header_size
elif callable(skiprows):
def skiprows_func(x):
return skiprows(x + header_size)
skiprows_md = skiprows_func
return skiprows_md, pre_reading, skiprows_partitioning
@classmethod
def _define_index(
cls,
index_ids: list,
index_name: str,
) -> Tuple[IndexColType, list]:
"""
Compute the resulting DataFrame index and index lengths for each of partitions.
Parameters
----------
index_ids : list
Array with references to the partitions index objects.
index_name : str
Name that should be assigned to the index if `index_col`
is not provided.
Returns
-------
new_index : IndexColType
Index that should be passed to the new_frame constructor.
row_lengths : list
Partitions rows lengths.
"""
index_objs = cls.materialize(index_ids)
if len(index_objs) == 0 or isinstance(index_objs[0], int):
row_lengths = index_objs
new_index = pandas.RangeIndex(sum(index_objs))
else:
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
new_index.name = index_name
return new_index, row_lengths
@classmethod
def _get_new_qc(
cls,
partition_ids: list,
index_ids: list,
dtypes_ids: list,
index_col: IndexColType,
index_name: str,
column_widths: list,
column_names: ColumnNamesTypes,
skiprows_md: Union[Sequence, callable, None] = None,
header_size: int = None,
**kwargs,
):
"""
Get new query compiler from data received from workers.
Parameters
----------
partition_ids : list
Array with references to the partitions data.
index_ids : list
Array with references to the partitions index objects.
dtypes_ids : list
Array with references to the partitions dtypes objects.
index_col : IndexColType
`index_col` parameter of `read_csv` function.
index_name : str
Name that should be assigned to the index if `index_col`
is not provided.
column_widths : list
Number of columns in each partition.
column_names : ColumnNamesTypes
Array with columns names.
skiprows_md : array-like or callable, optional
Specifies rows to skip.
header_size : int, default: 0
Number of rows, that occupied by header.
**kwargs : dict
Parameters of `read_csv` function needed for postprocessing.
Returns
-------
new_query_compiler : BaseQueryCompiler
New query compiler, created from `new_frame`.
"""
new_index, row_lengths = cls._define_index(index_ids, index_name)
# Compute dtypes by collecting and combining all of the partition dtypes. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids) if len(dtypes_ids) > 0 else None
# Compose modin partitions from `partition_ids`
partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
partition_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
skipfooter = kwargs.get("skipfooter", None)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if skiprows_md is not None:
# skip rows that passed as array or callable
nrows = kwargs.get("nrows", None)
index_range = pandas.RangeIndex(len(new_query_compiler.index))
if is_list_like(skiprows_md):
new_query_compiler = new_query_compiler.view(
index=index_range.delete(skiprows_md)
)
elif callable(skiprows_md):
skip_mask = cls._get_skip_mask(index_range, skiprows_md)
if not isinstance(skip_mask, np.ndarray):
skip_mask = skip_mask.to_numpy("bool")
view_idx = index_range[~skip_mask]
new_query_compiler = new_query_compiler.view(index=view_idx)
else:
raise TypeError(
f"Not acceptable type of `skiprows` parameter: {type(skiprows_md)}"
)
if not isinstance(new_query_compiler.index, pandas.MultiIndex):
new_query_compiler = new_query_compiler.reset_index(drop=True)
if nrows:
new_query_compiler = new_query_compiler.view(
pandas.RangeIndex(len(new_query_compiler.index))[:nrows]
)
if index_col is None:
new_query_compiler._modin_frame.synchronize_labels(axis=0)
return new_query_compiler
@classmethod
def _read(cls, filepath_or_buffer, **kwargs):
"""
Read data from `filepath_or_buffer` according to `kwargs` parameters.
Used in `read_csv` and `read_fwf` Modin implementations.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
`filepath_or_buffer` parameter of read functions.
**kwargs : dict
Parameters of read functions.
Returns
-------
new_query_compiler : BaseQueryCompiler
Query compiler with imported data for further processing.
"""
filepath_or_buffer_md = (
cls.get_path(filepath_or_buffer)
if isinstance(filepath_or_buffer, str)
else cls.get_path_or_buffer(filepath_or_buffer)
)
compression_infered = cls.infer_compression(
filepath_or_buffer, kwargs["compression"]
)
# Getting frequently used kwargs;
# They should be defined in higher level
names = kwargs["names"]
index_col = kwargs["index_col"]
encoding = kwargs["encoding"]
skiprows = kwargs["skiprows"]
header = kwargs["header"]
# Define header size for further skipping (Header can be skipped because header
# information will be obtained further from empty_df, so no need to handle it
# by workers)
header_size = cls._define_header_size(
header,
names,
)
(
skiprows_md,
pre_reading,
skiprows_partitioning,
) = cls._manage_skiprows_parameter(skiprows, header_size)
should_handle_skiprows = skiprows_md is not None and not isinstance(
skiprows_md, int
)
use_modin_impl = cls.check_parameters_support(
filepath_or_buffer,
kwargs,
skiprows_md,
header_size,
)
if not use_modin_impl:
return cls.single_worker_read(
filepath_or_buffer, callback=cls.read_callback, **kwargs
)
is_quoting = kwargs["quoting"] != QUOTE_NONE
# In these cases we should pass additional metadata
# to the workers to match pandas output
pass_names = names in [None, lib.no_default] and (
skiprows is not None or kwargs["skipfooter"] != 0
)
pd_df_metadata = cls.read_callback(
filepath_or_buffer,
**dict(kwargs, nrows=1, skipfooter=0, index_col=index_col),
)
column_names = pd_df_metadata.columns
column_widths, num_splits = cls._define_metadata(pd_df_metadata, column_names)
# kwargs that will be passed to the workers
partition_kwargs = dict(
kwargs,
fname=filepath_or_buffer_md,
num_splits=num_splits,
header_size=header_size if not pass_names else 0,
names=names if not pass_names else column_names,
header=header if not pass_names else "infer",
skipfooter=0,
skiprows=None,
nrows=None,
compression=compression_infered,
)
with OpenFile(filepath_or_buffer_md, "rb", compression_infered) as f:
old_pos = f.tell()
fio = io.TextIOWrapper(f, encoding=encoding, newline="")
newline, quotechar = cls.compute_newline(
fio, encoding, kwargs.get("quotechar", '"')
)
f.seek(old_pos)
splits = cls.partitioned_file(
f,
num_partitions=NPartitions.get(),
nrows=kwargs["nrows"] if not should_handle_skiprows else None,
skiprows=skiprows_partitioning,
quotechar=quotechar,
is_quoting=is_quoting,
encoding=encoding,
newline=newline,
header_size=header_size,
pre_reading=pre_reading,
)
partition_ids, index_ids, dtypes_ids = cls._launch_tasks(
splits, callback=cls.read_callback, **partition_kwargs
)
new_query_compiler = cls._get_new_qc(
partition_ids=partition_ids,
index_ids=index_ids,
dtypes_ids=dtypes_ids,
index_col=index_col,
index_name=pd_df_metadata.index.name,
column_widths=column_widths,
column_names=column_names,
skiprows_md=skiprows_md if should_handle_skiprows else None,
header_size=header_size,
skipfooter=kwargs["skipfooter"],
parse_dates=kwargs["parse_dates"],
nrows=kwargs["nrows"] if should_handle_skiprows else None,
)
return new_query_compiler
@classmethod
def _get_skip_mask(cls, rows_index: pandas.Index, skiprows: Callable):
"""
Get mask of skipped by callable `skiprows` rows.
Parameters
----------
rows_index : pandas.Index
Rows index to get mask for.
skiprows : Callable
Callable to check whether row index should be skipped.
Returns
-------
pandas.Index
"""
try:
# direct `skiprows` call is more efficient than using of
# map method, but in some cases it can work incorrectly, e.g.
# when `skiprows` contains `in` operator
mask = skiprows(rows_index)
assert | is_list_like(mask) | pandas.core.dtypes.common.is_list_like |
#!/usr/bin/env python
# coding: utf-8
from numbers import Number
from typing import Dict
from typing import Callable
from typing import Optional
from typing import Union
from dataclasses import dataclass, fields
import numpy as np
import pandas as pd
from scipy.stats import chi2_contingency
from evidently import ColumnMapping
from evidently.analyzers.base_analyzer import Analyzer
from evidently.analyzers.base_analyzer import BaseAnalyzerResult
from evidently.analyzers.utils import DatasetColumns
from evidently.analyzers.utils import process_columns
@dataclass
class FeatureQualityStats:
"""Class for all features data quality metrics store.
A type of the feature is stored in `feature_type` field.
Concrete stat kit depends on the feature type. Is a metric is not applicable - leave `None` value for it.
Metrics for all feature types:
- feature type - cat for category, num for numeric, datetime for datetime features
- count - quantity of a meaningful values (do not take into account NaN values)
- missing_count - quantity of meaningless (NaN) values
- missing_percentage - the percentage of the missed values
- unique_count - quantity of unique values
- unique_percentage - the percentage of the unique values
- max - maximum value (not applicable for category features)
- min - minimum value (not applicable for category features)
- most_common_value - the most common value in the feature values
- most_common_value_percentage - the percentage of the most common value
- most_common_not_null_value - if `most_common_value` equals NaN - the next most common value. Otherwise - None
- most_common_not_null_value_percentage - the percentage of `most_common_not_null_value` if it is defined.
If `most_common_not_null_value` is not defined, equals None too.
Metrics for numeric features only:
- infinite_count - quantity infinite values (for numeric features only)
- infinite_percentage - the percentage of infinite values (for numeric features only)
- percentile_25 - 25% percentile for meaningful values
- percentile_50 - 50% percentile for meaningful values
- percentile_75 - 75% percentile for meaningful values
- mean - the sum of the meaningful values divided by the number of the meaningful values
- std - standard deviation of the values
Metrics for category features only:
- new_in_current_values_count - quantity of new values in the current dataset after the reference
Defined for reference dataset only.
- new_in_current_values_count - quantity of values in the reference dataset that not presented in the current
Defined for reference dataset only.
"""
# feature type - cat for category, num for numeric, datetime for datetime features
feature_type: str
# quantity on
count: int = 0
infinite_count: Optional[int] = None
infinite_percentage: Optional[float] = None
missing_count: Optional[int] = None
missing_percentage: Optional[float] = None
unique_count: Optional[int] = None
unique_percentage: Optional[float] = None
percentile_25: Optional[float] = None
percentile_50: Optional[float] = None
percentile_75: Optional[float] = None
max: Optional[Union[Number, str]] = None
min: Optional[Union[Number, str]] = None
mean: Optional[float] = None
most_common_value: Optional[Union[Number, str]] = None
most_common_value_percentage: Optional[float] = None
std: Optional[float] = None
most_common_not_null_value: Optional[Union[Number, str]] = None
most_common_not_null_value_percentage: Optional[float] = None
new_in_current_values_count: Optional[int] = None
unused_in_current_values_count: Optional[int] = None
def is_datetime(self):
"""Checks that the object store stats for a datetime feature"""
return self.feature_type == "datetime"
def is_numeric(self):
"""Checks that the object store stats for a numeric feature"""
return self.feature_type == "num"
def is_category(self):
"""Checks that the object store stats for a category feature"""
return self.feature_type == "cat"
def as_dict(self):
return {field.name: getattr(self, field.name) for field in fields(FeatureQualityStats)}
def __eq__(self, other):
for field in fields(FeatureQualityStats):
other_field_value = getattr(other, field.name)
self_field_value = getattr(self, field.name)
if pd.isnull(other_field_value) and pd.isnull(self_field_value):
continue
if not other_field_value == self_field_value:
return False
return True
@dataclass
class DataQualityStats:
num_features_stats: Optional[Dict[str, FeatureQualityStats]] = None
cat_features_stats: Optional[Dict[str, FeatureQualityStats]] = None
datetime_features_stats: Optional[Dict[str, FeatureQualityStats]] = None
target_stats: Optional[Dict[str, FeatureQualityStats]] = None
def get_all_features(self) -> Dict[str, FeatureQualityStats]:
result = {}
for features in (
self.target_stats,
self.datetime_features_stats,
self.cat_features_stats,
self.num_features_stats,
):
if features is not None:
result.update(features)
return result
def __getitem__(self, item) -> FeatureQualityStats:
for features in (
self.target_stats,
self.datetime_features_stats,
self.cat_features_stats,
self.num_features_stats,
):
if features is not None and item in features:
return features[item]
raise KeyError(item)
@dataclass
class DataQualityAnalyzerResults(BaseAnalyzerResult):
"""Class for all results of data quality calculations"""
reference_features_stats: DataQualityStats
reference_correlations: Dict[str, pd.DataFrame]
current_features_stats: Optional[DataQualityStats] = None
current_correlations: Optional[Dict[str, pd.DataFrame]] = None
class DataQualityAnalyzer(Analyzer):
"""Data quality analyzer
provides detailed feature statistics and feature behavior overview
"""
@staticmethod
def get_results(analyzer_results) -> DataQualityAnalyzerResults:
return analyzer_results[DataQualityAnalyzer]
def _calculate_stats(self, dataset: pd.DataFrame, columns: DatasetColumns, task: Optional[str]) -> DataQualityStats:
result = DataQualityStats()
result.num_features_stats = {
feature_name: self._get_features_stats(dataset[feature_name], feature_type="num")
for feature_name in columns.num_feature_names
}
result.cat_features_stats = {
feature_name: self._get_features_stats(dataset[feature_name], feature_type="cat")
for feature_name in columns.cat_feature_names
}
if columns.utility_columns.date:
date_list = columns.datetime_feature_names + [columns.utility_columns.date]
else:
date_list = columns.datetime_feature_names
result.datetime_features_stats = {
feature_name: self._get_features_stats(dataset[feature_name], feature_type="datetime")
for feature_name in date_list
}
target_name = columns.utility_columns.target
if target_name is not None and target_name in dataset:
result.target_stats = {}
if task == "classification":
result.target_stats[target_name] = self._get_features_stats(dataset[target_name], feature_type="cat")
else:
result.target_stats[target_name] = self._get_features_stats(dataset[target_name], feature_type="num")
return result
@staticmethod
def _recognize_task(target_name: str, reference_data: pd.DataFrame) -> str:
"""Try to guess about the target type:
if the target has a numeric type and number of unique values > 5: task == ‘regression’
in all other cases task == ‘classification’.
Args:
target_name: name of target column.
reference_data: usually the data which you used in training.
Returns:
Task parameter.
"""
if | pd.api.types.is_numeric_dtype(reference_data[target_name]) | pandas.api.types.is_numeric_dtype |
import os
import pandas as pd
from datetime import datetime, timedelta
from embrace import get_date_from_garmin
import collections
folders = ['01-09-TR1', '10-20-TR2', '21-30-TR3']
def timestamp2datetime2minutes(file_path):
df = | pd.read_csv(file_path, header=1) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : ML Studio #
# Version : 0.1.14 #
# File : test_objectives.py #
# Python : 3.8.3 #
# -------------------------------------------------------------------------- #
# Author : <NAME> #
# Company : DecisionScients #
# Email : <EMAIL> #
# URL : https://github.com/decisionscients/MLStudio #
# -------------------------------------------------------------------------- #
# Created : Monday, June 15th 2020, 3:45:31 pm #
# Last Modified : Monday, June 15th 2020, 3:45:31 pm #
# Modified By : <NAME> (<EMAIL>) #
# -------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2020 DecisionScients #
# =========================================================================== #
#%%
import math
import os
from pathlib import Path
import sys
import glob
import numpy as np
import pandas as pd
import pytest
from pytest import mark
from scipy.special import softmax
from sklearn.metrics import mean_squared_error
from sklearn.datasets import make_regression, make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
homedir = str(Path(__file__).parents[2])
datadir = os.path.join(homedir, "tests\\test_data")
sys.path.append(homedir)
sys.path.append(datadir)
from mlstudio.utils.data_manager import StandardScaler
from mlstudio.supervised.algorithms.optimization.services.loss import Quadratic, CrossEntropy
from mlstudio.supervised.algorithms.optimization.services.loss import CategoricalCrossEntropy
from mlstudio.supervised.algorithms.optimization.services.regularizers import L1, L2, L1_L2
# -------------------------------------------------------------------------- #
def create_regression_prediction(y=None, X=None, theta=None):
"""Creates vector of predictions based upon target plus random noise."""
noise = np.random.normal(0,1, size=y.shape)
y_pred = np.add(y,noise)
return y_pred
def create_classification_prediction(y=None, X=None, theta=None):
"""Creates classification prediction as probability [0,1]"""
return np.random.uniform(0,1, size=y.shape)
def create_multiclass_prediction(y=None, X=None, theta=None):
"""Creates multiclassification prediction."""
z = X.dot(theta)
return softmax(z, axis=1)
def make_regression_data():
X, y = make_regression(n_samples=100, n_features=5, random_state=5)
scaler = StandardScaler()
X = scaler.fit_transform(X)
return X, y
def make_classification_data():
X, y, = make_classification(n_samples=100, n_features=5, random_state=5)
scaler = StandardScaler()
X = scaler.fit_transform(X)
return X, y
def make_multiclass_data():
X, y, = make_classification(n_samples=100, n_features=5, n_classes=4,
n_informative=3, random_state=5)
enc = LabelBinarizer()
y = enc.fit_transform(y)
scaler = StandardScaler()
X = scaler.fit_transform(X)
print(X.shape)
print(y.shape)
return X, y
def create_data():
# Designate filenames and create filepaths
mse_filename = "test_objective_cost_functions_mse.xlsx"
xe_filename = "test_objective_cost_functions_xe.xlsx"
cxe_filename = "test_objective_cost_functions_cxe.xlsx"
mse_filepath = os.path.join(datadir, mse_filename)
xe_filepath = os.path.join(datadir, xe_filename)
cxe_filepath = os.path.join(datadir, cxe_filename)
# Obtain data
X_reg, y_reg = make_regression_data()
X_bin, y_bin = make_classification_data()
X_multi, y_multi = make_multiclass_data()
# Create parameters
regression_theta = np.random.default_rng().uniform(low=0, high=1, size=X_reg.shape[1])
classification_theta = np.random.default_rng().uniform(low=0, high=1, size=X_bin.shape[1])
multiclass_theta = np.random.default_rng().uniform(low=0, high=1, size=(X_multi.shape[1],y_multi.shape[1]))
# Create packages
regression_pack = {'locked': True, 'filepath': mse_filepath, 'X':X_reg,
'y': y_reg, 'theta': regression_theta,
'predict': create_regression_prediction}
classification_pack = {'locked': True, 'filepath': xe_filepath,
'X':X_bin, 'y': y_bin, 'theta': classification_theta,
'predict': create_classification_prediction}
multiclass_pack = {'locked': True, 'filepath': cxe_filepath, 'X':X_multi,
'y': y_multi, 'theta': multiclass_theta,
'predict': create_multiclass_prediction}
data_packs = [regression_pack, classification_pack, multiclass_pack]
# Write to files
for data in data_packs:
if data['locked']:
pass
else:
X = | pd.DataFrame(data=data['X']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: oustry
"""
from FDFDRadiowaveSimulator import FDFDRadiowaveSimulator
from pandas import DataFrame,read_csv
import time
def FirstExample():
"""
First example of use of the FDFDRadiowaveSimulator class. Generate a .png file
Returns
-------
None.
"""
mapname = "MAP4"
sim = FDFDRadiowaveSimulator(mapname)
#Reading map parameters
param = read_csv("sources/"+mapname+".csv")
sim.set_parameters(float(param["dx"]),0.5*float(param["lambda"]),float(param["opt_ind_walls"]),float(param["alpha_walls"]))
#Generates a bitmap in the output folder describing the field
Psi = sim.solve(350,125,True)
def Generate_Gain_Matrix(mapname,nodes_index,wavelength,wlindex):
"""
Generates the gain matrix associated to a map and a set of sources
Parameters
----------
mapname : string
The map name.
nodes_index : int
File index of the nodes positions.
wavelength: float, in meters
Returns
-------
None.
"""
#Simulator
sim = FDFDRadiowaveSimulator(mapname)
#Reading map parameters in the corresponding files (in the "sources" folder)
param = read_csv("sources/"+mapname+".csv")
sim.set_parameters(float(param["dx"]),wavelength,float(param["opt_ind_walls"]),float(param["alpha_walls"]))
#Reading clients and candidates positions in the corresponding files (in the "sources" folder)
dataframe_candidates,dataframe_clients = read_csv("sources/"+mapname+"_CA_"+str(nodes_index)+".csv"),read_csv("sources/"+mapname+"_CL_"+str(nodes_index)+".csv")
list_candidates = [(dataframe_candidates['X'][i],dataframe_candidates['Y'][i]) for i in range(len(dataframe_candidates))]
list_clients = [(dataframe_clients['X'][i],dataframe_clients['Y'][i]) for i in range(len(dataframe_clients))]
floor = [dataframe_clients['Floor'][i] for i in range(len(dataframe_clients))] + [dataframe_candidates['Floor'][i] for i in range(len(dataframe_candidates))]
#Computing 2D gain matrix
t0 = time.time()
gain = sim.gain_matrix(list_clients + list_candidates)
total_sim_time = time.time()-t0
print("\n Total simulation time = {0}s".format(total_sim_time))
sim.export_stats()
#Applying floor attenuation factor (2.5D method)
t0 = time.time()
att_gain = float(param["g"])
for i in range(len(gain)):
for j in range(len(gain)):
d=abs(floor[i]-floor[j])
gain[i,j] = gain[i,j]*(att_gain**d)
total_proj_time = time.time()-t0
print("\n Total projection time = {0}s".format(total_proj_time))
#Storing the gain matrix
G = | DataFrame(gain) | pandas.DataFrame |
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# todo: Clean this up! Make it into a real module
import os, sys, itertools
import networkx as nx
import pandas as pd
from statsmodels.tsa.stattools import ccf
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['font.sans-serif'] = 'Arial'
from scipy.stats import pearsonr
from sklearn.metrics.cluster import entropy
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.utils.validation import check_array
from math import log
from Swing.util.Evaluator import Evaluator
def get_experiment_list(filename, timepoints=None, perturbs=None, time_col='Time'):
# load files
timecourse = pd.read_csv(filename, sep="\t")
if timepoints is None:
timepoints = len(set(timecourse[time_col]))
if perturbs is None:
perturbs = len(timecourse)/timepoints
if perturbs.is_integer():
perturbs = int(perturbs)
else:
raise ValueError("Uneven number of timepoints between perturbation experiments")
# divide into list of dataframes
experiments = []
for i in range(0, timepoints * perturbs - timepoints + 1, timepoints):
experiments.append(timecourse.ix[i:i + timepoints - 1])
# reformat
for idx, exp in enumerate(experiments):
exp = exp.set_index(time_col)
experiments[idx] = exp
return experiments
def xcorr_experiments(experiments, gene_axis=1):
"""
Cross correlate the g
:param experiments: list
list of dataframes
:param gene_axis: int
axis corresponding to each gene. 0 for rows, 1 for columns
:return:
"""
return np.array([cc_experiment(experiment.values.T) if gene_axis == 1 else cc_experiment(experiment.values)
for experiment in experiments])
def cc_experiment(experiment):
"""
For one experiment.
x should be n rows (genes) by m columns (timepoints)
:param experiment:
:return:
"""
ccf_array = np.zeros((experiment.shape[0], experiment.shape[0], experiment.shape[1]))
for ii, static in enumerate(experiment):
for jj, moving in enumerate(experiment):
if ii == jj:
unbiased = True
else:
unbiased = False
ccf_array[ii][jj] = ccf(static, moving, unbiased=unbiased)
return ccf_array
def get_xcorr_indices(diff_ts, lag, tolerance):
pair_list = []
# get all pairs
targets = np.array(np.where((diff_ts >= lag-tolerance ) & (diff_ts <= lag+tolerance)))
n_ind = targets.shape[1]
pair_list = [tuple(targets[:,x]) for x in range(n_ind)]
# only keep tuples where the parent index is greater than the child
if lag != 0:
pair_list = [ x for x in pair_list if x[1] < x[2]]
p_pair_list = [(x[0],x[1]) for x in pair_list]
c_pair_list = [(x[0],x[2]) for x in pair_list]
return(p_pair_list,c_pair_list)
def get_pairwise_xcorr(parent,child,experiment,time_map,lag,tolerance,rc):
ts_shape = time_map.shape[1]-1
ts = time_map.iloc[:,:ts_shape]
ts = ts.values
all_ps_values = np.zeros(rc)
all_cs_values = np.zeros(rc)
# make an array of differences
diff_ts = np.abs(ts[:,:,None] - ts[:,None,:])
# get all indices with the same difference
ps_values = np.zeros(rc)
cs_values = np.zeros(rc)
ps = [x[parent].values for x in experiment]
cs = [x[child].values for x in experiment]
all_ps_values = np.vstack(ps)
all_cs_values = np.vstack(cs)
p_idx,c_idx = get_xcorr_indices(diff_ts, lag, tolerance)
ps_values = [all_ps_values[x] for x in p_idx]
cs_values = [all_cs_values[x] for x in c_idx]
rsq, pval = pearsonr(ps_values,cs_values)
#c_xy = np.histogram2d(ps_values, cs_values, 10)[0]
#n_samples = len(ps_values)
#mi = my_mutual_info_score(n_samples, x_val = ps_values, y_val = cs_values, labels_true=None, labels_pred=None, contingency=c_xy)
#print(mi, parent, child, lag)
return(rsq,pval)
def my_mutual_info_score(n_samples, x_val, y_val, labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency : {None, array, sparse matrix},
shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
else:
contingency = check_array(contingency,
accept_sparse=['csr', 'csc', 'coo'],
dtype=[int, np.int32, np.int64])
if isinstance(contingency, np.ndarray):
# For an array
nzx, nzy = np.nonzero(contingency)
nz_val = contingency[nzx, nzy]
elif sp.issparse(contingency):
# For a sparse matrix
nzx, nzy, nz_val = sp.find(contingency)
else:
raise ValueError("Unsupported type for 'contingency': %s" %
type(contingency))
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = pi.take(nzx) * pj.take(nzy)
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
mi = mi.sum()
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(x_val), entropy(y_val)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def calc_edge_lag2(experiments,genes, signed_edge_list=None, tolerance = 8, rc = (23,6), mode=None):
# load the interval file
edges = signed_edge_list['regulator-target']
#initialize dataframe to return
col, row = np.meshgrid(range(len(genes)), range(len(genes)))
edge_lag = pd.DataFrame()
edge_lag['parent'] = np.array(genes)[row.flatten()]
edge_lag['child'] = np.array(genes)[col.flatten()]
edge_lag['Edge'] = list(zip(edge_lag['parent'], edge_lag['child']))
lag_results = []
if mode is 'marbach':
time_map = pd.read_csv('../../data/invitro/marbach_timesteps.tsv', sep='\t')
rc = (23,6)
lags = [0,5,10,20,30,40]
tolerance = 3
else:
time_map = pd.read_csv('../../data/invitro/omranian_timesteps.tsv', sep='\t')
lags = [0,10,20,30,60,90]
time_steps = time_map['Timestep'].tolist()
for edge in edges:
# Ignore self edges
if edge[0] == edge[1]:
continue
tolerance = 8
c_list = []
for lag in lags:
r,p = get_pairwise_xcorr(edge[0],edge[1],experiments,time_map,lag,tolerance,rc)
c_list.append((lag,r,p))
sign = signed_edge_list[signed_edge_list['regulator-target'] == edge]['signs'].tolist()
best_lag = min(c_list, key = lambda x: x[2])
if best_lag[2] > 0.05/len(edges):
true_lag = np.nan
else:
true_lag = best_lag[0]
lag_results.append({'Edge':edge, 'Lag':true_lag, 'Sign': sign, 'Lag_list': c_list})
lag_results = pd.DataFrame(lag_results)
edge_lag = pd.merge(edge_lag, lag_results, how='outer', on='Edge')
lag_results['parent'] = [x[0] for x in lag_results['Edge'].tolist()]
lag_results['child'] = [x[1] for x in lag_results['Edge'].tolist()]
return(lag_results, edge_lag)
def calc_edge_lag(xcorr, genes, sc_frac=0.1, min_ccf=0.5, timestep=1, signed_edge_list=None, flat=True, return_raw=False):
"""
:param xcorr: 4d array
4 axes in order: experiments, parent, child, time
:param genes: list
:param sc_frac: float
related filtering. see filter_ccfs
:param min_ccf: float
minimum cross correlation needed to call a lag
:param timestep: int
:param signed: dataframe
can be a list of signed edges or none (default)
maximize either negative or positive correlation depending on prior information
:param flat: boolean
true: return the mean lag for each edge
false: return the list of all lags (for each exp) for each edge
:return:
"""
e, p, c, t = xcorr.shape
if signed_edge_list is not None:
edges = signed_edge_list['regulator-target']
else:
edges = itertools.product(genes, genes)
lag_estimate = np.zeros((p,c))
sc_thresh = sc_frac * t
#initialize dataframe to return
col, row = np.meshgrid(range(len(genes)), range(len(genes)))
edge_lag = pd.DataFrame()
edge_lag['Parent'] = np.array(genes)[row.flatten()]
edge_lag['Child'] = np.array(genes)[col.flatten()]
edge_lag['Edge'] = list(zip(edge_lag['Parent'], edge_lag['Child']))
lag_results = []
for edge in edges:
# Ignore self edges
if edge[0] == edge[1]:
continue
p_idx = genes.index(edge[0])
c_idx = genes.index(edge[1])
if signed_edge_list is not None:
sign = signed_edge_list[signed_edge_list['regulator-target'] == edge]['signs'].tolist()[0]
# The ccf keeps the parent static and moves the child. Therefore the reversed xcorr would show the true lag
reverse = xcorr[:, c_idx, p_idx]
filtered = filter_ccfs(reverse, sc_thresh, min_ccf)
if filtered.shape[0] > 0:
# f, axarr = plt.subplots(1,2)
# axarr[0].plot(reverse.T)
# axarr[1].plot(filtered.T)
# plt.show()
# default setting
if flat:
if signed_edge_list is None:
lag_estimate[p_idx, c_idx] = float(np.mean(np.argmax(np.abs(filtered), axis=1)))*timestep
elif sign == '+':
lag_estimate[p_idx, c_idx] = float(np.mean(np.argmax(filtered, axis=1)))*timestep
elif sign == '-':
lag_estimate[p_idx, c_idx] = float(np.mean(np.argmin(filtered, axis=1)))*timestep
elif sign == '+-':
lag_estimate[p_idx, c_idx] = float(np.mean(np.argmax(np.abs(filtered), axis=1)))*timestep
edge_lag['Lag'] = lag_estimate.flatten()
elif not flat:
if sign == '+':
lag = [float(x) for x in np.argmax(filtered, axis=1)]*timestep
elif sign == '-':
lag = [float(x) for x in np.argmin(filtered, axis=1)]*timestep
elif sign == '+-':
lag = [float(x) for x in np.argmax(np.abs(filtered), axis=1)]*timestep
lag_results.append({'Edge':edge, 'Lag':lag, 'Raw_CCF': lag})
if not flat:
lag_results = pd.DataFrame(lag_results)
edge_lag = | pd.merge(edge_lag, lag_results, how='outer', on='Edge') | pandas.merge |
# coding: utf-8
# In[1]:
import pandas as pd
import os
import matplotlib.pyplot as plt
import re
import numpy as np
import pandas as pd
from scipy.stats import mode
from nltk import skipgrams
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import itertools
import lightgbm as lgb
from lightgbm import LGBMClassifier
from sklearn.model_selection import cross_val_score, RandomizedSearchCV
from sklearn.metrics import accuracy_score
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn import decomposition, ensemble
from sklearn.pipeline import Pipeline
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.neighbors import KNeighborsClassifier
from xgboost import XGBClassifier
from rgf.sklearn import FastRGFClassifier
from sklearn.model_selection import GridSearchCV
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
SEED = 42
join = os.path.join
# In[68]:
data = | pd.read_csv('Devex_train.csv', encoding="latin-1") | pandas.read_csv |
"""Estimate direct damages to physical assets exposed to hazards
"""
import sys
import os
import pandas as pd
import geopandas as gpd
from shapely import wkb
import numpy as np
from analysis_utils import *
from tqdm import tqdm
tqdm.pandas()
def main(config):
incoming_data_path = config['paths']['incoming_data']
processed_data_path = config['paths']['data']
results_data_path = config['paths']['results']
country_attributes = [
{
'country': 'kenya',
},
{
'country': 'tanzania',
},
{
'country': 'uganda',
},
{
'country': 'zambia',
},
]
damage_data_path = os.path.join(processed_data_path,
"damage_estimation_processing")
asset_data_details = pd.read_csv(os.path.join(damage_data_path,
"network_layers_hazard_intersections_details.csv"))
for country in country_attributes:
param_values = pd.read_csv(os.path.join(damage_data_path, f"{country['country']}_parameter_combinations.txt"), sep=" ")
direct_damages_results = os.path.join(results_data_path,
country["country"],
"direct_damages")
for asset_info in asset_data_details.itertuples():
asset_damages_results = os.path.join(
results_data_path,
country["country"],
"direct_damages",
f"{asset_info.asset_gpkg}_{asset_info.asset_layer}"
)
for param in param_values.itertuples():
damage_file = os.path.join(
asset_damages_results,
f"{asset_info.asset_gpkg}_{asset_info.asset_layer}_direct_damages_parameter_set_{param.parameter_set}.csv"
)
if os.path.isfile(damage_file) is True:
expected_damages = []
df = | pd.read_csv(damage_file) | pandas.read_csv |
"""Compare different GNSS SPV Where datasets
Description:
------------
A dictionary with datasets is used as input for this writer. The keys of the dictionary are station names.
Example:
--------
from where import data
from where import writers
# Read a dataset
dset = data.Dataset(rundate=rundate, tech=tech, stage=stage, dataset_name=name, dataset_id=dataset_id)
# Write dataset
writers.write_one('gnss_spv_comparison_report', dset=dset, do_report=False)
"""
# Standard library imports
from typing import Any, Dict
# External library imports
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Midgard imports
from midgard.dev import plugins
from midgard.plot.matplotlib_extension import plot
# Where imports
import where
from where.lib import config
from where.lib import log
from where.writers._report import Report
FIGURE_FORMAT = "png"
FILE_NAME = __name__.split(".")[-1]
@plugins.register
def gnss_spv_comparison_report(dset: Dict[str, "Dataset"]) -> None:
"""Compare GNSS SPV datasets
Args:
dset: Dictionary with station name as keys and the belonging Dataset as value
"""
dset_first = dset[list(dset.keys())[0]]
dset_first.vars["solution"] = config.tech.gnss_spv_comparison_report.solution.str.lower()
# Generate figure directory to save figures generated for GNSS report
figure_dir = config.files.path(
"output_gnss_spv_comparison_report_figure", file_vars={**dset_first.vars, **dset_first.analysis}
)
figure_dir.mkdir(parents=True, exist_ok=True)
# Generate plots
_, dfs_day, dfs_month = _generate_dataframes(dset)
_plot_velocity_error(dfs_day, dfs_month, figure_dir, dset_first.vars)
# Generate GNSS comparison report
path = config.files.path("output_gnss_spv_comparison_report", file_vars={**dset_first.vars, **dset_first.analysis})
with config.files.open_path(path, create_dirs=True, mode="wt") as fid:
rpt = Report(
fid, rundate=dset_first.analysis["rundate"], path=path, description="Comparison of GNSS SPV analyses"
)
rpt.title_page()
_add_to_report(rpt, figure_dir, dfs_day, dfs_month, dset_first.vars)
rpt.markdown_to_pdf()
def _add_to_report(
rpt: "Report",
figure_dir: "pathlib.PosixPath",
dfs_day: Dict[str, pd.core.frame.DataFrame],
dfs_month: Dict[str, pd.core.frame.DataFrame],
file_vars: Dict[str, Any],
) -> None:
"""Add figures and tables to report
Args:
rpt: Report object.
figure_dir: Figure directory.
dfs_day: Dictionary with fields as keys (e.g. 2d_vel, 3d_vel) and the belonging dataframe as value with DAILY
samples of 95th percentile and stations as columns.
dfs_month: Dictionary with fields as keys (e.g. 2d_vel, 3d_vel) and the belonging dataframe as value with MONTHLY
samples of 95th percentile and stations as columns.
file_vars: File variables used for file and plot title naming.
"""
for sample_name in ["Daily", "Monthly"]:
rpt.add_text(f"\n# {sample_name} 95th percentile 2D and 3D solutions\n\n")
if sample_name == "Daily":
for field in dfs_day.keys():
dfs_day[field].index = dfs_day[field].index.strftime("%d-%m-%Y")
rpt.add_text("Daily 95th percentile 2D velocity results in meter/second:")
rpt.write_dataframe_to_markdown(dfs_day["2d_vel"], format="6.3f", statistic=True)
rpt.add_text("Daily 95th percentile 3D velocity in meter/second:")
rpt.write_dataframe_to_markdown(dfs_day["3d_vel"], format="6.3f", statistic=True)
elif sample_name == "Monthly":
rpt.add_text("Monthly 95th percentile 2D velocity results in meter/second:")
rpt.write_dataframe_to_markdown(dfs_month["2d_vel"], format="6.3f")
rpt.add_text("Monthly 95th percentile 3D velocity results in meter/second:")
rpt.write_dataframe_to_markdown(dfs_month["3d_vel"], format="6.3f")
# Add 2D and 3D velocity plots
rpt.add_figure(
f"{figure_dir}/plot_2d_vel_{sample_name.lower()}_{file_vars['date']}_{file_vars['solution'].lower()}.{FIGURE_FORMAT}",
caption="95th percentile for 2D velocity.",
clearpage=True,
)
rpt.add_figure(
f"{figure_dir}/plot_3d_vel_{sample_name.lower()}_{file_vars['date']}_{file_vars['solution'].lower()}.{FIGURE_FORMAT}",
caption="95th percentile for 3D velocity.",
clearpage=True,
)
def _generate_dataframes(dset: Dict[str, "Dataset"]) -> Dict[str, pd.core.frame.DataFrame]:
"""Generate dataframe based on station datasets
The dataframe for each station in dictionary "dfs" has following columns:
east: East-coordinate in topocentric system
north: North-coordinate in topocentric system
up: Up-coordinate in topocentric system
hpe: horizontal position error
vpe: vertical position error
Example for "dfs" dictionary:
'hons': time.gps 2d_vel 3d_vel
0 2019-03-01 00:00:00 0.301738 0.057244
1 2019-03-01 00:00:00 0.301738 0.057244
'krss': time.gps 2d_vel 3d_vel
0 2019-03-01 00:00:00 0.710014 0.186791
1 2019-03-01 00:00:00 0.710014 0.186791
Example for "dfs_day" dictionary:
'2d_vel': nabf vegs hons krss
time.gps
2019-03-01 1.368875 0.935687 1.136763 0.828754
2019-03-02 0.924839 0.728280 0.911677 0.854832
'3d_vel': nabf vegs hons krss
time.gps
2019-03-01 1.715893 1.147265 1.600330 0.976541
2019-03-02 1.533437 1.307373 1.476295 1.136991
Example for "dfs_month" dictionary:
'2d_vel': nabf vegs hons krss
Mar-2019 1.186240 0.861718 1.095827 1.021354
Apr-2019 0.891947 0.850343 0.977908 0.971099
'3d_vel': nabf vegs hons krss
Mar-2019 1.854684 1.291406 1.450466 1.225467
Apr-2019 1.964404 1.706507 1.687994 1.500742
Args:
dset: Dictionary with station name as keys and the belonging Dataset as value
Returns:
Tuple with following entries:
| Element | Description |
|----------------------|--------------------------------------------------------------------------------------|
| dfs | Dictionary with station name as keys and the belonging dataframe as value with |
| | following dataframe columns: 2d_vel, 3d_vel |
| dfs_day | Dictionary with fields as keys (e.g. 2d_vel, 3d_vel) and the belonging dataframe as |
| | value with DAILY samples of 95th percentile and stations as columns. |
| dfs_month | Dictionary with fields as keys (e.g. 2d_vel, 3d_vel) and the belonging dataframe as |
| | value with MONTHLY samples of 95th percentile and stations as columns. |
"""
dsets = dset
dfs = {}
dfs_day = {"2d_vel": pd.DataFrame(), "3d_vel": pd.DataFrame()}
dfs_month = {"2d_vel": pd.DataFrame(), "3d_vel": pd.DataFrame()}
for station, dset in dsets.items():
if dset.num_obs == 0:
log.warn(f"Dataset '{station}' is empty.")
continue
# Determine dataframe with 2d_vel and 3d_vel columns
# TODO: How to ensure that GPS time scale is used? fields=["time.gps", ...] does not work longer.
df = dset.as_dataframe(fields=["time", "2d_vel", "3d_vel"])
if df.empty:
continue
else:
# Save data in dictionaries
dfs.update({station: df})
# TODO This does not work anymore for Pandas version 1.0: df_day = df.set_index("time").resample("D", how=lambda x: np.nanpercentile(x, q=95))
df_day = df.set_index("time").resample("D").apply(lambda x: np.nanpercentile(x, q=95))
for field in dfs_day.keys():
if dfs_day[field].empty:
dfs_day[field][station] = df_day[field]
else:
dfs_day[field] = | pd.concat([dfs_day[field], df_day[field]], axis=1) | pandas.concat |
import os
import json
import pickle
import sys
import traceback
import datetime as dt
import numpy as np
import pandas as pd
import mlflow
import mlflow.pytorch
import torch
from torch.utils.data import Dataset
from MultVAE_Dataset import BasicHotelDataset
from scipy import sparse
import src.modules.letor_metrics as lm
import argparse
parser = argparse.ArgumentParser(description='Use MultVAE model to predict on validation set.')
parser.add_argument('-m',
'--model_folder',
type = str,
required=True,
help='model_folder. should be a dir. Needs to be MultVAE class.',
)
parser.add_argument('-n',
'--model_run_id',
type = str,
required=True,
help='model_run_id. should be the run_id of all the models in the model_folder',
)
parser.add_argument('-e',
'--max_epoch',
type = int,
required=True,
help='max epoch, the last epoch that you want to validate towards',
)
parser.add_argument('-d',
'--dataset_pkl',
nargs = '?',
type = str,
help='dataset pkl. Should be a user_to_queries.pkl. Check preprocessing.py for info on that structure',
default ='/scratch/work/js11133/sad_data/processed/full/val/user_to_queries.pkl' )
parser.add_argument('-i',
'--hotel_hash',
nargs = '?',
type = str,
help='hotel_hash.json. Check make_hashes.py for info on the hash',
default ='/scratch/work/js11133/sad_data/processed/hotel_hash.json')
parser.add_argument('-u',
'--user_hash',
nargs = '?',
type = str,
help='user_hash.json. Check make_hashes.py for info on the hash',
default ='/scratch/work/js11133/sad_data/processed/user_hash.json')
parser.add_argument('-o',
'--output_dir',
nargs = '?',
type = str,
help='output directory where predictions will go',
)
args = parser.parse_args()
def get_single_query_interaction_vec(user_id_to_query_struct_dict,user_id,sr_id):
return user_id_to_query_struct_dict[user_id][0][sr_id]
def get_user_entire_interaction_vec(user_id_to_query_struct_dict,user_id):
return user_id_to_query_struct_dict[user_id][1]
def densify_sparse_vec(user_interaction_dict, hotel_length):
sparse_dok = sparse.dok_matrix((1,hotel_length),dtype=np.float32)
sparse_obs = sparse.dok_matrix((1,hotel_length),dtype=np.float32)
for j in user_interaction_dict.keys():
sparse_dok[0,j] = user_interaction_dict[j]
sparse_obs[0,j] = 1
return torch.tensor(sparse_dok.toarray()),torch.tensor(sparse_obs.toarray())
def val_metrics(model_folder,
model_run_id,
max_epoch,
dataset_pkl_path ,
hotel_hash ,
user_hash ,
output_dir
):
print('IN MAIN')
#Check for CUDA
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
# Load user to query_struct
with open(dataset_pkl_path,'rb') as f:
user_to_query_struct = pickle.load(f)
#Put the dataset into the dataloader
dataset = BasicHotelDataset(data_path = dataset_pkl_path, dict_path = hotel_hash)
#Create sr_id_to_user_id dictionary
sr_id_to_user_id_hashed = {}
for user_id_hashed in user_to_query_struct.keys():
sr_ids = user_to_query_struct[user_id_hashed][0].keys()
for sr_id in sr_ids:
sr_id_to_user_id_hashed[sr_id] = user_id_hashed
# Load hotel_id to index dictionary
with open(hotel_hash, 'r') as fp:
hotel_id_indexed = json.load(fp)
# Load user_id to index dictionary
with open(user_hash, 'r') as fp:
user_id_indexed = json.load(fp)
#invert the maps so we can go back to hotel_id and user_id
user_idx_to_user_id = {v: k for k, v in user_id_indexed.items()}
hotel_idx_to_hotel_id = {v: k for k, v in hotel_id_indexed.items()}
# Get user_idx to/from user_id mappings
dlkeys_to_user_id = dataset.idx_to_dataset_keys_dict
user_id_to_dlkeys = {v: k for k, v in dlkeys_to_user_id.items()}
#Make model_path_list
model_path_list =[]
for epoch in range(max_epoch):
model_name = 'multvae_{0}_epoch_{1}.uri'.format(model_run_id,str(int(640+epoch)))
model_path = os.path.join(model_folder,model_name)
model_path_list.append(model_path)
#Now loop over each model we need to eval on
ndcg_list = []
for model_path in model_path_list:
# Load our multVAE model
model = mlflow.pytorch.load_model(model_path)
print('Loaded model from ',model_path)
model.to(device)
print('loading done')
# generate predictions
df_list = []
for sr_id in sr_id_to_user_id_hashed.keys():
user_id = sr_id_to_user_id_hashed[sr_id]
user_id_unhashed = user_idx_to_user_id[user_id]
# GET SINGLE QUERY, OR ENTIRE interaction?
user_interaction_vec = get_single_query_interaction_vec(user_to_query_struct,user_id,sr_id)
x, observed_vec = densify_sparse_vec(user_interaction_vec,dataset.hotel_length)
label = x
#print(x.shape)
x = x.to(device)
x_preds, mu, logvar = model(x.unsqueeze(dim=0))
model.eval()
x_preds = pd.DataFrame({'score':x_preds.cpu().detach().squeeze().numpy(),
'observed':observed_vec.cpu().detach().squeeze().numpy(),
'label':label.cpu().detach().squeeze().numpy()}
)
x_preds = x_preds[x_preds['observed']==1]
x_preds['hotel_id'] = x_preds.index.map(hotel_idx_to_hotel_id.get)
x_preds['search_request_id'] = sr_id
x_preds['user_id'] = user_id_unhashed
df_list.append(x_preds)
print('end for loop')
pred_array = pd.concat(df_list)
print('concat ended')
pred_array['rank'] = pred_array\
.groupby('search_request_id')\
['score']\
.rank(ascending=False)
#Now that we have preds, calculate NDCG
ndcg_model = lm.ndcg(
pred_array,
groupby='search_request_id',
ranker='score')
ndcg_score = ndcg_model.mean()
print(ndcg_score)
ndcg_list.append(ndcg_score)
print(ndcg_list)
ndcg_df = | pd.DataFrame({'ndcg_score':ndcg_list}) | pandas.DataFrame |
import operator
from enum import Enum
from typing import Union, Any, Optional, Hashable
import numpy as np
import pandas as pd
import pandas_flavor as pf
from pandas.core.construction import extract_array
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_numeric_dtype,
is_string_dtype,
)
from pandas.core.reshape.merge import _MergeOperation
from janitor.utils import check, check_column
@pf.register_dataframe_method
def conditional_join(
df: pd.DataFrame,
right: Union[pd.DataFrame, pd.Series],
*conditions,
how: str = "inner",
sort_by_appearance: bool = False,
df_columns: Optional[Any] = None,
right_columns: Optional[Any] = None,
) -> pd.DataFrame:
"""
This is a convenience function that operates similarly to `pd.merge`,
but allows joins on inequality operators,
or a combination of equi and non-equi joins.
Join solely on equality are not supported.
If the join is solely on equality, `pd.merge` function
covers that; if you are interested in nearest joins, or rolling joins,
or the first match (lowest or highest) - `pd.merge_asof` covers that.
There is also the IntervalIndex, which is usually more efficient
for range joins, especially if the intervals do not overlap.
Column selection in `df_columns` and `right_columns` is possible using the
[`select_columns`][janitor.functions.select_columns.select_columns] syntax.
This function returns rows, if any, where values from `df` meet the
condition(s) for values from `right`. The conditions are passed in
as a variable argument of tuples, where the tuple is of
the form `(left_on, right_on, op)`; `left_on` is the column
label from `df`, `right_on` is the column label from `right`,
while `op` is the operator. For multiple conditions, the and(`&`)
operator is used to combine the results of the individual conditions.
The operator can be any of `==`, `!=`, `<=`, `<`, `>=`, `>`.
A binary search is used to get the relevant rows for non-equi joins;
this avoids a cartesian join, and makes the process less memory intensive.
For equi-joins, Pandas internal merge function is used.
The join is done only on the columns.
MultiIndex columns are not supported.
For non-equi joins, only numeric and date columns are supported.
Only `inner`, `left`, and `right` joins are supported.
If the columns from `df` and `right` have nothing in common,
a single index column is returned; else, a MultiIndex column
is returned.
Example:
>>> import pandas as pd
>>> import janitor
>>> df1 = pd.DataFrame({"value_1": [2, 5, 7, 1, 3, 4]})
>>> df2 = pd.DataFrame({"value_2A": [0, 3, 7, 12, 0, 2, 3, 1],
... "value_2B": [1, 5, 9, 15, 1, 4, 6, 3],
... })
>>> df1
value_1
0 2
1 5
2 7
3 1
4 3
5 4
>>> df2
value_2A value_2B
0 0 1
1 3 5
2 7 9
3 12 15
4 0 1
5 2 4
6 3 6
7 1 3
>>> df1.conditional_join(
... df2,
... ("value_1", "value_2A", ">="),
... ("value_1", "value_2B", "<=")
... )
value_1 value_2A value_2B
0 2 1 3
1 2 2 4
2 5 3 5
3 5 3 6
4 7 7 9
5 1 0 1
6 1 0 1
7 1 1 3
8 3 1 3
9 3 2 4
10 3 3 5
11 3 3 6
12 4 2 4
13 4 3 5
14 4 3 6
:param df: A pandas DataFrame.
:param right: Named Series or DataFrame to join to.
:param conditions: Variable argument of tuple(s) of the form
`(left_on, right_on, op)`, where `left_on` is the column
label from `df`, `right_on` is the column label from `right`,
while `op` is the operator. The operator can be any of
`==`, `!=`, `<=`, `<`, `>=`, `>`. For multiple conditions,
the and(`&`) operator is used to combine the results
of the individual conditions.
:param how: Indicates the type of join to be performed.
It can be one of `inner`, `left`, `right`.
Full join is not supported. Defaults to `inner`.
:param sort_by_appearance: Default is `False`.
This is useful for strictly non-equi joins,
where the user wants the original order maintained.
If True, values from `df` and `right`
that meet the join condition will be returned
in the final dataframe in the same order
that they were before the join.
:param df_columns: Columns to select from `df`.
It can be a single column or a list of columns.
It is also possible to rename the output columns via a dictionary.
:param right_columns: Columns to select from `right`.
It can be a single column or a list of columns.
It is also possible to rename the output columns via a dictionary.
:returns: A pandas DataFrame of the two merged Pandas objects.
"""
return _conditional_join_compute(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
class _JoinOperator(Enum):
"""
List of operators used in conditional_join.
"""
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
LESS_THAN_OR_EQUAL = "<="
STRICTLY_EQUAL = "=="
NOT_EQUAL = "!="
class _JoinTypes(Enum):
"""
List of join types for conditional_join.
"""
INNER = "inner"
LEFT = "left"
RIGHT = "right"
operator_map = {
_JoinOperator.STRICTLY_EQUAL.value: operator.eq,
_JoinOperator.LESS_THAN.value: operator.lt,
_JoinOperator.LESS_THAN_OR_EQUAL.value: operator.le,
_JoinOperator.GREATER_THAN.value: operator.gt,
_JoinOperator.GREATER_THAN_OR_EQUAL.value: operator.ge,
_JoinOperator.NOT_EQUAL.value: operator.ne,
}
less_than_join_types = {
_JoinOperator.LESS_THAN.value,
_JoinOperator.LESS_THAN_OR_EQUAL.value,
}
greater_than_join_types = {
_JoinOperator.GREATER_THAN.value,
_JoinOperator.GREATER_THAN_OR_EQUAL.value,
}
def _check_operator(op: str):
"""
Check that operator is one of
`>`, `>=`, `==`, `!=`, `<`, `<=`.
Used in `conditional_join`.
"""
sequence_of_operators = {op.value for op in _JoinOperator}
if op not in sequence_of_operators:
raise ValueError(
"The conditional join operator "
f"should be one of {sequence_of_operators}"
)
def _conditional_join_preliminary_checks(
df: pd.DataFrame,
right: Union[pd.DataFrame, pd.Series],
conditions: tuple,
how: str,
sort_by_appearance: bool,
df_columns: Any,
right_columns: Any,
) -> tuple:
"""
Preliminary checks for conditional_join are conducted here.
Checks include differences in number of column levels,
length of conditions, existence of columns in dataframe, etc.
"""
check("right", right, [pd.DataFrame, pd.Series])
df = df.copy()
right = right.copy()
if isinstance(right, pd.Series):
if not right.name:
raise ValueError(
"Unnamed Series are not supported for conditional_join."
)
right = right.to_frame()
if df.columns.nlevels != right.columns.nlevels:
raise ValueError(
"The number of column levels "
"from the left and right frames must match. "
"The number of column levels from the left dataframe "
f"is {df.columns.nlevels}, while the number of column levels "
f"from the right dataframe is {right.columns.nlevels}."
)
if not conditions:
raise ValueError("Kindly provide at least one join condition.")
for condition in conditions:
check("condition", condition, [tuple])
len_condition = len(condition)
if len_condition != 3:
raise ValueError(
"condition should have only three elements; "
f"{condition} however is of length {len_condition}."
)
for left_on, right_on, op in conditions:
check("left_on", left_on, [Hashable])
check("right_on", right_on, [Hashable])
check("operator", op, [str])
check_column(df, [left_on])
check_column(right, [right_on])
_check_operator(op)
if all(
(op == _JoinOperator.STRICTLY_EQUAL.value for *_, op in conditions)
):
raise ValueError("Equality only joins are not supported.")
check("how", how, [str])
checker = {jointype.value for jointype in _JoinTypes}
if how not in checker:
raise ValueError(f"'how' should be one of {checker}.")
check("sort_by_appearance", sort_by_appearance, [bool])
if (df.columns.nlevels > 1) and (
isinstance(df_columns, dict) or isinstance(right_columns, dict)
):
raise ValueError(
"Column renaming with a dictionary is not supported "
"for MultiIndex columns."
)
return (
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
def _conditional_join_type_check(
left_column: pd.Series, right_column: pd.Series, op: str
) -> None:
"""
Raise error if column type is not any of numeric or datetime or string.
"""
permitted_types = {
is_datetime64_dtype,
is_numeric_dtype,
is_string_dtype,
is_categorical_dtype,
}
for func in permitted_types:
if func(left_column):
break
else:
raise ValueError(
"conditional_join only supports "
"string, category, numeric, or date dtypes (without timezone) - "
f"'{left_column.name} is of type {left_column.dtype}."
)
lk_is_cat = is_categorical_dtype(left_column)
rk_is_cat = is_categorical_dtype(right_column)
if lk_is_cat & rk_is_cat:
if not left_column.array._categories_match_up_to_permutation(
right_column.array
):
raise ValueError(
f"'{left_column.name}' and '{right_column.name}' "
"should have the same categories, and the same order."
)
elif not is_dtype_equal(left_column, right_column):
raise ValueError(
f"Both columns should have the same type - "
f"'{left_column.name}' has {left_column.dtype} type;"
f"'{right_column.name}' has {right_column.dtype} type."
)
if (op in less_than_join_types.union(greater_than_join_types)) & (
(is_string_dtype(left_column) | is_categorical_dtype(left_column))
):
raise ValueError(
"non-equi joins are supported "
"only for datetime and numeric dtypes. "
f"{left_column.name} in condition "
f"({left_column.name}, {right_column.name}, {op}) "
f"has a dtype {left_column.dtype}."
)
return None
def _conditional_join_compute(
df: pd.DataFrame,
right: pd.DataFrame,
conditions: list,
how: str,
sort_by_appearance: bool,
df_columns: Any,
right_columns: Any,
) -> pd.DataFrame:
"""
This is where the actual computation
for the conditional join takes place.
A pandas DataFrame is returned.
"""
(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
) = _conditional_join_preliminary_checks(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
eq_check = False
le_lt_check = False
for condition in conditions:
left_on, right_on, op = condition
_conditional_join_type_check(df[left_on], right[right_on], op)
if op == _JoinOperator.STRICTLY_EQUAL.value:
eq_check = True
elif op in less_than_join_types.union(greater_than_join_types):
le_lt_check = True
df.index = range(len(df))
right.index = range(len(right))
multiple_conditions = len(conditions) > 1
if not multiple_conditions:
left_on, right_on, op = conditions[0]
result = _generic_func_cond_join(
df[left_on], right[right_on], op, multiple_conditions
)
if result is None:
return _create_conditional_join_empty_frame(
df, right, how, df_columns, right_columns
)
return _create_conditional_join_frame(
df,
right,
*result,
how,
sort_by_appearance,
df_columns,
right_columns,
)
if eq_check:
result = _multiple_conditional_join_eq(df, right, conditions)
elif le_lt_check:
result = _multiple_conditional_join_le_lt(df, right, conditions)
else:
result = _multiple_conditional_join_ne(df, right, conditions)
if result is None:
return _create_conditional_join_empty_frame(
df, right, how, df_columns, right_columns
)
return _create_conditional_join_frame(
df, right, *result, how, sort_by_appearance, df_columns, right_columns
)
def _less_than_indices(
left_c: pd.Series,
right_c: pd.Series,
strict: bool,
) -> tuple:
"""
Use binary search to get indices where left_c
is less than or equal to right_c.
If strict is True, then only indices
where `left_c` is less than
(but not equal to) `right_c` are returned.
A tuple of integer indexes
for left_c and right_c is returned.
"""
# no point going through all the hassle
if left_c.min() > right_c.max():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = extract_array(left_c, extract_numpy=True)
right_index = right_c.index.to_numpy(dtype=int, copy=False)
right_c = extract_array(right_c, extract_numpy=True)
search_indices = right_c.searchsorted(left_c, side="left")
# if any of the positions in `search_indices`
# is equal to the length of `right_keys`
# that means the respective position in `left_c`
# has no values from `right_c` that are less than
# or equal, and should therefore be discarded
len_right = right_c.size
rows_equal = search_indices == len_right
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
# the idea here is that if there are any equal values
# shift to the right to the immediate next position
# that is not equal
if strict:
rows_equal = right_c[search_indices]
rows_equal = left_c == rows_equal
# replace positions where rows are equal
# with positions from searchsorted('right')
# positions from searchsorted('right') will never
# be equal and will be the furthermost in terms of position
# example : right_c -> [2, 2, 2, 3], and we need
# positions where values are not equal for 2;
# the furthermost will be 3, and searchsorted('right')
# will return position 3.
if rows_equal.any():
replacements = right_c.searchsorted(left_c, side="right")
# now we can safely replace values
# with strictly less than positions
search_indices = np.where(rows_equal, replacements, search_indices)
# check again if any of the values
# have become equal to length of right_c
# and get rid of them
rows_equal = search_indices == len_right
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
if not search_indices.size:
return None
right_c = [right_index[ind:len_right] for ind in search_indices]
right_c = np.concatenate(right_c)
left_c = np.repeat(left_index, len_right - search_indices)
return left_c, right_c
def _greater_than_indices(
left_c: pd.Series,
right_c: pd.Series,
strict: bool,
multiple_conditions: bool,
) -> tuple:
"""
Use binary search to get indices where left_c
is greater than or equal to right_c.
If strict is True, then only indices
where `left_c` is greater than
(but not equal to) `right_c` are returned.
if multiple_conditions is False, a tuple of integer indexes
for left_c and right_c is returned;
else a tuple of the index for left_c, right_c, as well
as the positions of left_c in right_c is returned.
"""
# quick break, avoiding the hassle
if left_c.max() < right_c.min():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = | extract_array(left_c, extract_numpy=True) | pandas.core.construction.extract_array |
import numpy as np
arr = np.arange(0,11)
print(arr)
print("----------------------------")
import pandas
df = | pandas.DataFrame([[1,2,3,4]], columns = ["A", "B", "C", "D"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 11:41:44 2018
@author: MichaelEK
"""
import os
import argparse
import types
import pandas as pd
import numpy as np
from pdsql import mssql
from datetime import datetime
import yaml
import itertools
import lowflows as lf
import util
pd.options.display.max_columns = 10
run_time_start = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
print(run_time_start)
try:
#####################################
### Read parameters file
base_dir = os.path.realpath(os.path.dirname(__file__))
with open(os.path.join(base_dir, 'parameters-test.yml')) as param:
param = yaml.safe_load(param)
# parser = argparse.ArgumentParser()
# parser.add_argument('yaml_path')
# args = parser.parse_args()
#
# with open(args.yaml_path) as param:
# param = yaml.safe_load(param)
## Integrety checks
use_types_check = np.in1d(list(param['misc']['use_types_codes'].keys()), param['misc']['use_types_priorities']).all()
if not use_types_check:
raise ValueError('use_type_priorities parameter does not encompass all of the use type categories. Please fix the parameters file.')
#####################################
### Read the hydro log
# max_date_stmt = "select max(RunTimeStart) from " + param.log_table + " where HydroTable='" + param.process_name + "' and RunResult='pass' and ExtSystem='" + param.ext_system + "'"
#
# last_date1 = mssql.rd_sql(server=param.hydro_server, database=param.hydro_database, stmt=max_date_stmt).loc[0][0]
#
# if last_date1 is None:
# last_date1 = '1900-01-01'
# else:
# last_date1 = str(last_date1.date())
#
# print('Last sucessful date is ' + last_date1)
#######################################
### Read in source data and update accela tables in ConsentsReporting db
print('--Reading in source data...')
## Make object to contain the source data
db = types.SimpleNamespace()
for i, p in param['source data'].items():
setattr(db, i, mssql.rd_sql(p['server'], p['database'], p['table'], p['col_names'], rename_cols=p['rename_cols'], username=p['username'], password=p['password']))
if (p['database'] == 'Accela') & (not (p['table'] in ['Ecan.vAct_Water_AssociatedPermits', 'Ecan.vQA_Relationship_Actuals'])):
table1 = 'Accela.' + p['table'].split('Ecan.')[1]
print(table1)
t1 = getattr(db, i).copy().dropna(subset=p['pk'])
t1.drop_duplicates(p['pk'], inplace=True)
print('update in db')
new_ones, _ = mssql.update_from_difference(t1, param['output']['server'], param['output']['database'], table1, on=p['pk'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
######################################
### Populate base tables
print('--Update base tables')
## HydroGroup
hf1 = pd.DataFrame(param['misc']['HydroGroup'])
hf1['ModifiedDate'] = run_time_start
hf0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])
hf_diff1 = hf1[~hf1.HydroGroup.isin(hf0.HydroGroup)]
if not hf_diff1.empty:
mssql.to_mssql(hf_diff1, param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])
hf0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])
## Activity
act1 = param['misc']['Activities']['ActivityType']
act2 = pd.DataFrame(list(itertools.product(act1, hf0.HydroGroupID.tolist())), columns=['ActivityType', 'HydroGroupID'])
act2['ModifiedDate'] = run_time_start
act0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])
act_diff1 = act2[~act2[['ActivityType', 'HydroGroupID']].isin(act0[['ActivityType', 'HydroGroupID']]).any(axis=1)]
if not act_diff1.empty:
mssql.to_mssql(act_diff1, param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])
act0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])
# Combine activity and hydro features
act_types1 = pd.merge(act0[['ActivityID', 'ActivityType', 'HydroGroupID']], hf0[['HydroGroupID', 'HydroGroup']], on='HydroGroupID')
act_types1['ActivityName'] = act_types1['ActivityType'] + ' ' + act_types1['HydroGroup']
## AlloBlock
ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
sw_blocks1 = pd.Series(db.wap_allo['sw_allo_block'].unique())
gw_blocks1 = pd.Series(db.allocated_volume['allo_block'].unique())
# Fixes
wap_allo1 = db.wap_allo.copy()
wap_allo1['sw_allo_block'] = wap_allo1['sw_allo_block'].str.strip()
wap_allo1.loc[wap_allo1.sw_allo_block == 'Migration: Not Classified', 'sw_allo_block'] = 'A'
allo_vol1 = db.allocated_volume.copy()
allo_vol1['allo_block'] = allo_vol1['allo_block'].str.strip()
allo_vol1.loc[allo_vol1.allo_block == 'Migration: Not Classified', 'allo_block'] = 'A'
# Determine blocks and what needs to be added
sw_blocks1 = set(wap_allo1['sw_allo_block'].unique())
gw_blocks1 = set(allo_vol1['allo_block'].unique())
blocks1 = sw_blocks1.union(gw_blocks1)
ab1 = pd.DataFrame(list(itertools.product(blocks1, hf0.HydroGroupID.tolist())), columns=['AllocationBlock', 'HydroGroupID'])
ab1['ModifiedDate'] = run_time_start
ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
ab_diff1 = ab1[~ab1[['AllocationBlock', 'HydroGroupID']].isin(ab0[['AllocationBlock', 'HydroGroupID']]).any(axis=1)]
if not ab_diff1.empty:
mssql.to_mssql(ab_diff1, param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
# Combine alloblock and hydro features
ab_types1 = pd.merge(ab0[['AlloBlockID', 'AllocationBlock', 'HydroGroupID']], hf0[['HydroGroupID', 'HydroGroup']], on='HydroGroupID').drop('HydroGroupID', axis=1)
## Attributes
att1 = pd.DataFrame(param['misc']['Attributes'])
att1['ModifiedDate'] = run_time_start
att0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])
att_diff1 = att1[~att1.Attribute.isin(att0.Attribute)]
if not att_diff1.empty:
mssql.to_mssql(att_diff1, param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])
att0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])
##################################################
### Sites and streamdepletion
print('--Update sites tables')
## takes
wap_allo1['WAP'] = wap_allo1['WAP'].str.strip().str.upper()
wap_allo1.loc[~wap_allo1.WAP.str.contains('[A-Z]+\d\d/\d\d\d\d'), 'WAP'] = np.nan
wap1 = wap_allo1['WAP'].unique()
wap1 = wap1[~pd.isnull(wap1)]
## Diverts
div1 = db.divert.copy()
div1['WAP'] = div1['WAP'].str.strip().str.upper()
div1.loc[~div1.WAP.str.contains('[A-Z]+\d\d/\d\d\d\d'), 'WAP'] = np.nan
wap2 = div1['WAP'].unique()
wap2 = wap2[~pd.isnull(wap2)]
## Combo
waps = np.concatenate((wap1, wap2), axis=None)
## Check that all WAPs exist in the USM sites table
usm_waps1 = db.sites[db.sites.ExtSiteID.isin(waps)].copy()
usm_waps1[['NZTMX', 'NZTMY']] = usm_waps1[['NZTMX', 'NZTMY']].astype(int)
if len(wap1) != len(usm_waps1):
miss_waps = set(wap1).difference(set(usm_waps1.ExtSiteID))
print('Missing {} WAPs in USM'.format(len(miss_waps)))
wap_allo1 = wap_allo1[~wap_allo1.WAP.isin(miss_waps)].copy()
## Update ConsentsSites table
cs1 = usm_waps1[['ExtSiteID', 'SiteName']].copy()
# cs1['SiteType'] = 'WAP'
new_sites, _ = mssql.update_from_difference(cs1, param['output']['server'], param['output']['database'], 'ConsentsSites', on='ExtSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentsSites', 'pass', '{} sites updated'.format(len(new_sites)), username=param['output']['username'], password=param['output']['password'])
cs0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'ConsentsSites', ['SiteID', 'ExtSiteID'], username=param['output']['username'], password=param['output']['password'])
cs_waps2 = pd.merge(cs0, usm_waps1.drop('SiteName', axis=1), on='ExtSiteID')
cs_waps3 = pd.merge(cs_waps2, db.wap_sd, on='ExtSiteID').drop('ExtSiteID', axis=1).round()
new_waps, _ = mssql.update_from_difference(cs_waps3, param['output']['server'], param['output']['database'], 'SiteStreamDepletion', on='SiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'WAP', 'pass', '{} sites updated'.format(len(new_waps)), username=param['output']['username'], password=param['output']['password'])
## Read db table
# wap0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'SiteStreamDepletion')
## Make linked WAP-SiteID table
wap_site = cs0.rename(columns={'ExtSiteID': 'WAP'})
##################################################
### Permit table
print('--Update Permit table')
## Clean data
permits1 = db.permit.copy()
permits1['RecordNumber'] = permits1['RecordNumber'].str.strip().str.upper()
permits1['ConsentStatus'] = permits1['ConsentStatus'].str.strip()
permits1['EcanID'] = permits1['EcanID'].str.strip().str.upper()
permits1['FromDate'] = pd.to_datetime(permits1['FromDate'], infer_datetime_format=True, errors='coerce')
permits1['ToDate'] = pd.to_datetime(permits1['ToDate'], infer_datetime_format=True, errors='coerce')
permits1.loc[permits1['ConsentStatus'] == 'Issued - s124 Continuance', 'ToDate'] = permits1.loc[permits1['ConsentStatus'] == 'Issued - s124 Continuance', 'FromDate'] + pd.DateOffset(years=30)
permits1[['NZTMX', 'NZTMY']] = permits1[['NZTMX', 'NZTMY']].round()
permits1.loc[(permits1['FromDate'] < '1950-01-01'), 'FromDate'] = np.nan
permits1.loc[(permits1['ToDate'] < '1950-01-01'), 'ToDate'] = np.nan
## Filter data
permits2 = permits1.drop_duplicates('RecordNumber')
permits2 = permits2[permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()
# permits2 = permits2[(permits2['FromDate'] > '1950-01-01') & (permits2['ToDate'] > '1950-01-01') & (permits2['ToDate'] > permits2['FromDate']) & permits2.NZTMX.notnull() & permits2.NZTMY.notnull() & permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()
## Convert datetimes to date
permits2['FromDate'] = permits2['FromDate'].dt.date
permits2['ToDate'] = permits2['ToDate'].dt.date
permits2.loc[permits2['FromDate'].isnull(), 'FromDate'] = '1900-01-01'
permits2.loc[permits2['ToDate'].isnull(), 'ToDate'] = '1900-01-01'
## Save results
new_permits, _ = mssql.update_from_difference(permits2, param['output']['server'], param['output']['database'], 'Permit', on='RecordNumber', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'Permit', 'pass', '{} rows updated'.format(len(new_permits)), username=param['output']['username'], password=param['output']['password'])
## Read db table
permits0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Permit', username=param['output']['username'], password=param['output']['password'])
##################################################
### Parent-Child
print('--Update Parent-child table')
## Clean data
pc1 = db.parent_child.copy()
pc1['ParentRecordNumber'] = pc1['ParentRecordNumber'].str.strip().str.upper()
pc1['ChildRecordNumber'] = pc1['ChildRecordNumber'].str.strip().str.upper()
pc1['ParentCategory'] = pc1['ParentCategory'].str.strip()
pc1['ChildCategory'] = pc1['ChildCategory'].str.strip()
## Filter data
pc1 = pc1.drop_duplicates()
pc1 = pc1[pc1['ParentRecordNumber'].notnull() & pc1['ChildRecordNumber'].notnull()]
## Check foreign keys
crc1 = permits0.RecordNumber.unique()
pc2 = pc1[pc1.ParentRecordNumber.isin(crc1) & pc1.ChildRecordNumber.isin(crc1)].copy()
## Save results
new_pc, _ = mssql.update_from_difference(pc2, param['output']['server'], param['output']['database'], 'ParentChild', on=['ParentRecordNumber', 'ChildRecordNumber'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ParentChild', 'pass', '{} rows updated'.format(len(new_pc)), username=param['output']['username'], password=param['output']['password'])
## Read db table
pc0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'ParentChild', username=param['output']['username'], password=param['output']['password'])
#################################################
### AllocatedRatesVolumes
print('--Update Allocation tables')
attr1 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', ['AttributeID', 'Attribute'], username=param['output']['username'], password=param['output']['password'])
## Rates
# Clean data
wa1 = wap_allo1.copy()
wa1['RecordNumber'] = wa1['RecordNumber'].str.strip().str.upper()
wa1['take_type'] = wa1['take_type'].str.strip().str.title()
wa1['FromMonth'] = wa1['FromMonth'].str.strip().str.title()
wa1['ToMonth'] = wa1['ToMonth'].str.strip().str.title()
wa1['IncludeInSwAllocation'] = wa1['IncludeInSwAllocation'].str.strip().str.title()
wa1['AllocatedRate'] = pd.to_numeric(wa1['AllocatedRate'], errors='coerce').round(2)
wa1['WapRate'] = pd.to_numeric(wa1['WapRate'], errors='coerce').round(2)
wa1['VolumeDaily'] = pd.to_numeric(wa1['VolumeDaily'], errors='coerce').astype(int)
wa1['VolumeWeekly'] = pd.to_numeric(wa1['VolumeWeekly'], errors='coerce').astype(int)
wa1['Volume150Day'] = pd.to_numeric(wa1['Volume150Day'], errors='coerce').astype(int)
wa1.loc[wa1['FromMonth'] == 'Migration: Not Classified', 'FromMonth'] = 'Jul'
wa1.loc[wa1['ToMonth'] == 'Migration: Not Classified', 'ToMonth'] = 'Jun'
mon_mapping = {'Jan': 7, 'Feb': 8, 'Mar': 9, 'Apr': 10, 'May': 11, 'Jun': 12, 'Jul': 1, 'Aug': 2, 'Sep': 3, 'Oct': 4, 'Nov': 5, 'Dec': 6}
wa1.replace({'FromMonth': mon_mapping, 'ToMonth': mon_mapping}, inplace=True)
wa1.loc[wa1['IncludeInSwAllocation'] == 'No', 'IncludeInSwAllocation'] = False
wa1.loc[wa1['IncludeInSwAllocation'] == 'Yes', 'IncludeInSwAllocation'] = True
wa1.replace({'sw_allo_block': {'In Waitaki': 'A'}}, inplace=True)
# Check foreign keys
wa4 = wa1[wa1.RecordNumber.isin(crc1)].copy()
# Filters
# wa4 = wa2[(wa2.AllocatedRate > 0)].copy()
# wa3.loc[~wa3['IncludeInSwAllocation'], ['AllocatedRate', 'SD1', 'SD2']] = 0
# wa4 = wa3.drop('IncludeInSwAllocation', axis=1).copy()
# Find the missing WAPs per consent
crc_wap_mis1 = wa4.loc[wa4.WAP.isnull(), 'RecordNumber'].unique()
crc_wap4 = wa4[['RecordNumber', 'WAP']].drop_duplicates()
for i in crc_wap_mis1:
crc2 = pc0[np.in1d(pc0.ParentRecordNumber, i)].ChildRecordNumber.values
wap1 = []
while (len(crc2) > 0) & (len(wap1) == 0):
wap1 = crc_wap4.loc[np.in1d(crc_wap4.RecordNumber, crc2), 'WAP'].values
crc2 = pc0[np.in1d(pc0.ParentRecordNumber, crc2)].ChildRecordNumber.values
if len(wap1) > 0:
wa4.loc[wa4.RecordNumber == i, 'WAP'] = wap1[0]
wa4 = wa4[wa4.WAP.notnull()].copy()
wa4.rename(columns={'sw_allo_block': 'AllocationBlock'}, inplace=True)
# Distribute the months
cols1 = wa4.columns.tolist()
from_mon_pos = cols1.index('FromMonth')
to_mon_pos = cols1.index('ToMonth')
allo_rates_list = []
# c1 = 0
for val in wa4.itertuples(False, None):
from_month = int(val[from_mon_pos])
to_month = int(val[to_mon_pos])
if from_month > to_month:
mons = list(range(1, to_month + 1))
# c1 = c1 + 1
else:
mons = range(from_month, to_month + 1)
d1 = [val + (i,) for i in mons]
allo_rates_list.extend(d1)
col_names1 = wa4.columns.tolist()
col_names1.extend(['Month'])
wa5 = pd.DataFrame(allo_rates_list, columns=col_names1).drop(['FromMonth', 'ToMonth'], axis=1)
# Mean of all months
grp1 = wa5.groupby(['RecordNumber', 'take_type', 'AllocationBlock', 'WAP'])
mean1 = grp1[['WapRate', 'AllocatedRate', 'VolumeDaily', 'VolumeWeekly', 'Volume30Day', 'Volume150Day', 'SD1', 'SD2']].mean().round(2)
include1 = grp1['IncludeInSwAllocation'].first()
mon_min = grp1['Month'].min()
mon_min.name = 'FromMonth'
mon_max = grp1['Month'].max()
mon_max.name = 'ToMonth'
wa6 = pd.concat([mean1, mon_min, mon_max, include1], axis=1).reset_index()
# wa6['HydroGroup'] = 'Surface Water'
## Allocated Volume
av1 = allo_vol1.copy()
# clean data
av1['RecordNumber'] = av1['RecordNumber'].str.strip().str.upper()
av1['take_type'] = av1['take_type'].str.strip().str.title()
av1['IncludeInGwAllocation'] = av1['IncludeInGwAllocation'].str.strip().str.title()
av1.loc[av1['IncludeInGwAllocation'] == 'No', 'IncludeInGwAllocation'] = False
av1.loc[av1['IncludeInGwAllocation'] == 'Yes', 'IncludeInGwAllocation'] = True
av1['IncludeInGwAllocation'] = av1['IncludeInGwAllocation'].astype(bool)
# av1['AllocatedAnnualVolume'] = pd.to_numeric(av1['AllocatedAnnualVolume'], errors='coerce').astype(int)
av1['FullAnnualVolume'] = pd.to_numeric(av1['FullAnnualVolume'], errors='coerce').astype(int)
# av1.loc[av1['AllocatedAnnualVolume'] <= 0, 'AllocatedAnnualVolume'] = 0
# av1 = av1.loc[av1['AllocatedAnnualVolume'] > 0]
av1.rename(columns={'allo_block': 'AllocationBlock'}, inplace=True)
av1.drop('AllocatedAnnualVolume', axis=1, inplace=True)
av1.replace({'AllocationBlock': {'In Waitaki': 'A'}}, inplace=True)
av1.drop_duplicates(subset=['RecordNumber', 'take_type', 'AllocationBlock'], inplace=True)
## Combine volumes with rates
wa7 = pd.merge(av1, wa6, on=['RecordNumber', 'take_type', 'AllocationBlock'])
## Distribute the volumes by WapRate
wa8 = wa7.copy()
grp3 = wa8.groupby(['RecordNumber', 'take_type', 'AllocationBlock'])
wa8['WapRateAgg'] = grp3['WapRate'].transform('sum')
wa8['ratio'] = wa8['WapRate'] / wa8['WapRateAgg']
wa8.loc[wa8['ratio'].isnull(), 'ratio'] = 1
wa8['FullAnnualVolume'] = (wa8['FullAnnualVolume'] * wa8['ratio']).round()
wa8.drop(['WapRateAgg', 'ratio', 'VolumeDaily', 'VolumeWeekly', 'Volume30Day', 'Volume150Day', 'SD2', 'WapRate'], axis=1, inplace=True)
wa8 = wa8[wa8.FullAnnualVolume >= 0].copy()
## Add in stream depletion
# wa9 = pd.merge(wa8, db.wap_sd.rename(columns={'ExtSiteID': 'WAP'}), on='WAP').drop(['SD1_NZTMX', 'SD1_NZTMY', 'SD1_30Day', 'SD2_NZTMX', 'SD2_NZTMY', 'SD2_7Day', 'SD2_30Day', 'SD2_150Day', 'SD1', 'SD2'], axis=1)
#
# wa9['SD1_7Day'] = pd.to_numeric(wa9['SD1_7Day'], errors='coerce').round(0)
# wa9['SD1_150Day'] = pd.to_numeric(wa9['SD1_150Day'], errors='coerce').round(0)
## Combine with aquifer test storativity
# aq1 = db.wap_aquifer_test.dropna(subset=['storativity']).copy()
# aq1.rename(columns={'ExtSiteID': 'WAP'}, inplace=True)
# aq2 = aq1.groupby('WAP')['storativity'].mean().dropna().reset_index()
# aq2.storativity = True
#
# wa9 = pd.merge(wa9, aq2, on='WAP', how='left')
# wa9.loc[wa9.storativity.isnull(), 'storativity'] = False
## Distribute the rates and volumes by allocation hydro group
wa8['sw_rate'] = 0
wa8['gw_rate'] = 0
wa8['sw_vol'] = 0
wa8['gw_vol'] = 0
wa8.loc[wa8.take_type == 'Take Surface Water', 'sw_rate'] = wa8.loc[wa8.take_type == 'Take Surface Water', 'AllocatedRate']
wa8.loc[wa8.take_type == 'Take Groundwater', 'sw_rate'] = wa8.loc[wa8.take_type == 'Take Groundwater', 'SD1']
wa8.loc[wa8.take_type == 'Take Groundwater', 'gw_rate'] = wa8.loc[wa8.take_type == 'Take Groundwater', 'AllocatedRate'] - wa8.loc[wa8.take_type == 'Take Groundwater', 'SD1']
wa8.loc[wa8.take_type == 'Take Surface Water', 'sw_vol'] = wa8.loc[wa8.take_type == 'Take Surface Water', 'FullAnnualVolume']
wa8.loc[wa8.take_type == 'Take Groundwater', 'sw_vol'] = (wa8.loc[wa8.take_type == 'Take Groundwater', 'SD1']/wa8.loc[wa8.take_type == 'Take Groundwater', 'AllocatedRate']) * wa8.loc[wa8.take_type == 'Take Groundwater', 'FullAnnualVolume']
wa8.loc[wa8.take_type == 'Take Groundwater', 'gw_vol'] = (wa8.loc[wa8.take_type == 'Take Groundwater', 'gw_rate']/wa8.loc[wa8.take_type == 'Take Groundwater', 'AllocatedRate']) * wa8.loc[wa8.take_type == 'Take Groundwater', 'FullAnnualVolume']
allo_list = []
for k, row in wa8.iterrows():
# print(k)
if row['IncludeInSwAllocation']:
sw1 = row[['RecordNumber', 'AllocationBlock', 'WAP', 'FromMonth', 'ToMonth', 'sw_rate', 'sw_vol']].rename({'sw_rate': 'AllocatedRate', 'sw_vol': 'AllocatedAnnualVolume'})
sw1['HydroGroup'] = 'Surface Water'
allo_list.append(sw1.to_frame().T)
if row['IncludeInGwAllocation']:
gw1 = row[['RecordNumber', 'AllocationBlock', 'WAP', 'FromMonth', 'ToMonth', 'gw_rate', 'gw_vol']].rename({'gw_rate': 'AllocatedRate', 'gw_vol': 'AllocatedAnnualVolume'})
gw1['HydroGroup'] = 'Groundwater'
allo_list.append(gw1.to_frame().T)
rv1 = pd.concat(allo_list)
rv1['AllocatedAnnualVolume'] = pd.to_numeric(rv1['AllocatedAnnualVolume'])
rv1['AllocatedRate'] = pd.to_numeric(rv1['AllocatedRate'])
rv1['FromMonth'] = pd.to_numeric(rv1['FromMonth'], downcast='integer')
rv1['ToMonth'] = pd.to_numeric(rv1['ToMonth'], downcast='integer')
rv1.loc[rv1['AllocatedAnnualVolume'].isnull(), 'AllocatedAnnualVolume'] = 0
rv1.loc[rv1['AllocatedAnnualVolume'] == np.inf, 'AllocatedAnnualVolume'] = 0
rv1.loc[rv1['AllocatedRate'].isnull(), 'AllocatedRate'] = 0
rv1.loc[rv1['AllocatedRate'] == np.inf, 'AllocatedRate'] = 0
# Cut out the fat
rv4 = rv1[(rv1['AllocatedAnnualVolume'] > 0) | (rv1['AllocatedRate'] > 0)].copy()
## Calculate missing volumes and rates
ann_bool = rv4.AllocatedAnnualVolume == 0
rv4.loc[ann_bool, 'AllocatedAnnualVolume'] = (rv4.loc[ann_bool, 'AllocatedRate'] * 0.001*60*60*24*30.42* (rv4.loc[ann_bool, 'ToMonth'] - rv4.loc[ann_bool, 'FromMonth'] + 1))
rate_bool = rv4.AllocatedRate == 0
rv4.loc[rate_bool, 'AllocatedRate'] = (rv4.loc[rate_bool, 'AllocatedAnnualVolume'] / 60/60/24/30.42/ (rv4.loc[rate_bool, 'ToMonth'] - rv4.loc[rate_bool, 'FromMonth'] + 1) * 1000)
## Convert the rates and volumes to integers
rv4['AllocatedAnnualVolume'] = rv4['AllocatedAnnualVolume'].round().astype(int)
rv4['AllocatedRate'] = rv4['AllocatedRate'].round().astype(int)
## Merge tables for IDs
avr5 = pd.merge(rv4, ab_types1, on=['AllocationBlock', 'HydroGroup']).drop(['AllocationBlock', 'HydroGroup'], axis=1).copy()
avr6 = pd.merge(avr5, wap_site, on='WAP').drop('WAP', axis=1)
## Update CrcAlloSite table
crc_allo = avr6[['RecordNumber', 'AlloBlockID', 'SiteID']].copy()
crc_allo['SiteAllo'] = True
crc_allo['SiteType'] = 'WAP'
## Determine which rows should be updated
# old_crc_allo = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcAlloSite', where_in={'SiteAllo': [1], 'SiteType': ['WAP']})
#
# diff_dict = mssql.compare_dfs(old_crc_allo.drop(['CrcAlloSiteID', 'ModifiedDate'], axis=1), crc_allo, on=['RecordNumber', 'AlloBlockID', 'SiteID'])
#
# both1 = pd.concat([diff_dict['new'], diff_dict['diff']])
#
# rem1 = diff_dict['remove']
# Save results
new_crc_allo, rem_crc_allo = mssql.update_from_difference(crc_allo, param['output']['server'], param['output']['database'], 'CrcAlloSite', on=['RecordNumber', 'AlloBlockID', 'SiteID'], mod_date_col='ModifiedDate', where_cols=['SiteID', 'SiteType'], username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcAlloSite', 'pass', '{} rows updated'.format(len(new_crc_allo)), username=param['output']['username'], password=param['output']['password'])
# Read db table
allo_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcAlloSite', ['CrcAlloSiteID', 'RecordNumber', 'AlloBlockID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
# Remove old data if needed
if not rem_crc_allo.empty:
rem_crc_allo1 = pd.merge(allo_site0, rem_crc_allo, on=['RecordNumber', 'AlloBlockID', 'SiteID']).drop(['RecordNumber', 'AlloBlockID', 'SiteID'], axis=1)
mssql.del_table_rows(param['output']['server'], param['output']['database'], 'AllocatedRateVolume', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])
# mssql.del_table_rows(param['output']['server'], param['output']['database'], 'TSLowFlowRestr', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])
# mssql.del_table_rows(param['output']['server'], param['output']['database'], 'LowFlowConditions', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])
# mssql.del_table_rows(param['output']['server'], param['output']['database'], 'CrcAlloSite', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])
allo_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcAlloSite', ['CrcAlloSiteID', 'RecordNumber', 'AlloBlockID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
## Update AllocatedRateVolume table
avr7 = pd.merge(allo_site0, avr6, on=['RecordNumber', 'AlloBlockID', 'SiteID']).drop(['RecordNumber', 'AlloBlockID', 'SiteID'], axis=1).drop_duplicates('CrcAlloSiteID')
# Save results
new_avr, _ = mssql.update_from_difference(avr7, param['output']['server'], param['output']['database'], 'AllocatedRateVolume', on='CrcAlloSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'AllocatedRateVolume', 'pass', '{} rows updated'.format(len(new_avr)), username=param['output']['username'], password=param['output']['password'])
#################################################
### ConsentedRateVolume
print('--Update Consent tables')
## Clean data
crv1 = db.consented_takes.copy()
crv1['RecordNumber'] = crv1['RecordNumber'].str.strip().str.upper()
crv1['take_type'] = crv1['take_type'].str.strip().str.title()
crv1['LowflowCondition'] = crv1['LowflowCondition'].str.strip().str.upper()
crv1['ConsentedAnnualVolume'] = pd.to_numeric(crv1['ConsentedAnnualVolume'], errors='coerce').round()
crv1['ConsentedMultiDayVolume'] = pd.to_numeric(crv1['ConsentedMultiDayVolume'], errors='coerce').round()
crv1['ConsentedMultiDayPeriod'] = pd.to_numeric(crv1['ConsentedMultiDayPeriod'], errors='coerce').round()
crv1['ConsentedRate'] = pd.to_numeric(crv1['ConsentedRate'], errors='coerce')
crv1.loc[crv1['ConsentedMultiDayVolume'] <= 0, 'ConsentedMultiDayVolume'] = np.nan
crv1.loc[crv1['ConsentedMultiDayPeriod'] <= 0, 'ConsentedMultiDayPeriod'] = np.nan
crv1.loc[crv1['ConsentedRate'] <= 0, 'ConsentedRate'] = np.nan
crv1.loc[crv1['ConsentedAnnualVolume'] <= 0, 'ConsentedAnnualVolume'] = np.nan
crv1.loc[crv1['LowflowCondition'].isnull(), 'LowflowCondition'] = 'NO'
crv1.loc[(crv1['LowflowCondition'] == 'COMPLEX'), 'LowflowCondition'] = 'YES'
crv1.loc[crv1['LowflowCondition'] == 'NO', 'LowflowCondition'] = False
crv1.loc[crv1['LowflowCondition'] == 'YES', 'LowflowCondition'] = True
## Filter data
crv2 = crv1[crv1.ConsentedRate.notnull()]
## Check foreign keys
crv2 = crv2[crv2.RecordNumber.isin(crc1)].copy()
## Aggregate take types for counts and min/max month
grp4 = wa4.groupby(['RecordNumber', 'take_type', 'WAP'])
mon_min = grp4['FromMonth'].min()
mon_min.name = 'FromMonth'
mon_max = grp4['ToMonth'].max()
mon_max.name = 'ToMonth'
mon_min_max = pd.concat([mon_min, mon_max], axis=1)
mon_min_max1 = mon_min_max.reset_index()
grp5 = mon_min_max1.groupby(['RecordNumber', 'take_type'])
mon_min_max1['wap_count'] = grp5['WAP'].transform('count')
## Distribute WAPs to consents
crv3 = pd.merge(crv2, mon_min_max1, on=['RecordNumber', 'take_type'])
crv3[['ConsentedAnnualVolume', 'ConsentedMultiDayVolume']] = crv3[['ConsentedAnnualVolume', 'ConsentedMultiDayVolume']].divide(crv3['wap_count'], 0).round()
crv3['ConsentedRate'] = crv3['ConsentedRate'].divide(crv3['wap_count'], 0).round(2)
## Convert take types to ActivityID
take_types1 = act_types1[act_types1.ActivityType == 'Take'].copy()
crv4 = pd.merge(crv3.drop('wap_count', axis=1), take_types1[['ActivityID', 'ActivityName']], left_on='take_type', right_on='ActivityName').drop(['take_type', 'ActivityName'], axis=1)
## Convert WAPs to SiteIDs
crv5 = pd.merge(crv4, wap_site, on='WAP').drop('WAP', axis=1)
## Create CrcActSite table
crc_act = crv5[['RecordNumber', 'ActivityID', 'SiteID']].copy()
crc_act['SiteActivity'] = True
crc_act['SiteType'] = 'WAP'
# Save results
new_crc_act, rem_crc_act = mssql.update_from_difference(crc_act, param['output']['server'], param['output']['database'], 'CrcActSite', on=['RecordNumber', 'ActivityID', 'SiteID'], mod_date_col='ModifiedDate', where_cols=['RecordNumber', 'ActivityID', 'SiteID', 'SiteType'], username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcActSite', 'pass', '{} rows updated'.format(len(new_crc_act)), username=param['output']['username'], password=param['output']['password'])
# Read db table
act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
# Remove old data if needed
if not rem_crc_act.empty:
rem_crc_act1 = pd.merge(act_site0, rem_crc_act, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID'], axis=1)
del_stmt = "delete from {table} where {col} in ({val})"
# del_stmt1 = del_stmt.format(table='ConsentedAttributes', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt1, username=param['output']['username'], password=param['output']['password'])
#
# del_stmt2a = del_stmt.format(table='LinkedPermits', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt2a, username=param['output']['username'], password=param['output']['password'])
#
# del_stmt2b = del_stmt.format(table='LinkedPermits', col='OtherCrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt2b, username=param['output']['username'], password=param['output']['password'])
del_stmt3 = del_stmt.format(table='ConsentedRateVolume', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt3, username=param['output']['username'], password=param['output']['password'])
# del_stmt4 = del_stmt.format(table='CrcActSite', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt4, username=param['output']['username'], password=param['output']['password'])
act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
## Create ConsentedRateVolume table
crv6 = pd.merge(crv5, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID', 'LowflowCondition'], axis=1)
# Save results
new_crv, _ = mssql.update_from_difference(crv6, param['output']['server'], param['output']['database'], 'ConsentedRateVolume', on='CrcActSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentedRateVolume', 'pass', '{} rows updated'.format(len(new_crv)), username=param['output']['username'], password=param['output']['password'])
###########################################
### Diverts
## Clean
div1 = db.divert.copy()
div1['RecordNumber'] = div1['RecordNumber'].str.strip().str.upper()
div1['DivertType'] = div1['DivertType'].str.strip().str.title()
div1['LowflowCondition'] = div1['LowflowCondition'].str.strip().str.upper()
div1['ConsentedMultiDayVolume'] = pd.to_numeric(div1['ConsentedMultiDayVolume'], errors='coerce').round()
div1['ConsentedMultiDayPeriod'] = pd.to_numeric(div1['ConsentedMultiDayPeriod'], errors='coerce').round()
div1['ConsentedRate'] = pd.to_numeric(div1['ConsentedRate'], errors='coerce').round(2)
div1.loc[div1['ConsentedMultiDayVolume'] <= 0, 'ConsentedMultiDayVolume'] = np.nan
div1.loc[div1['ConsentedMultiDayPeriod'] <= 0, 'ConsentedMultiDayPeriod'] = np.nan
div1.loc[div1['ConsentedRate'] <= 0, 'ConsentedRate'] = np.nan
div1.loc[div1['LowflowCondition'].isnull(), 'LowflowCondition'] = 'NO'
div1.loc[(~div1['LowflowCondition'].isin(['NO', 'YES'])), 'LowflowCondition'] = 'YES'
div1.loc[div1['LowflowCondition'] == 'NO', 'LowflowCondition'] = False
div1.loc[div1['LowflowCondition'] == 'YES', 'LowflowCondition'] = True
div1['WAP'] = div1['WAP'].str.strip().str.upper()
div1.loc[~div1.WAP.str.contains('[A-Z]+\d\d/\d\d\d\d'), 'WAP'] = np.nan
## Filter
div2 = div1[div1.WAP.notnull()]
## Check foreign keys
div2 = div2[div2.RecordNumber.isin(crc1)].copy()
## Check primary keys
div2 = div2.drop_duplicates(['RecordNumber', 'WAP'])
## Join to get the IDs and filter WAPs
div3 = pd.merge(div2, act_types1[['ActivityID', 'ActivityName']], left_on='DivertType', right_on='ActivityName').drop(['DivertType', 'ActivityName'], axis=1)
div3 = pd.merge(div3, wap_site, on='WAP').drop('WAP', axis=1)
## CrcActSite
crc_act_div = div3[['RecordNumber', 'ActivityID', 'SiteID']].copy()
crc_act_div['SiteActivity'] = True
crc_act_div['SiteType'] = 'WAP'
# Save results
new_crc_div, rem_crc_div = mssql.update_from_difference(crc_act_div, param['output']['server'], param['output']['database'], 'CrcActSite', on=['RecordNumber', 'ActivityID', 'SiteID'], mod_date_col='ModifiedDate', where_cols=['RecordNumber', 'ActivityID', 'SiteID', 'SiteType'], username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcActSite', 'pass', '{} rows updated'.format(len(new_crc_div)), username=param['output']['username'], password=param['output']['password'])
# Read db table
act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
## ConsentedRateVolume
crc_div = pd.merge(div3, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID', 'LowflowCondition'], axis=1).dropna(subset=['ConsentedRate', 'ConsentedMultiDayVolume'], how='all')
crc_div['FromMonth'] = 1
crc_div['ToMonth'] = 12
# Save results
new_crc_div, _ = mssql.update_from_difference(crc_div, param['output']['server'], param['output']['database'], 'ConsentedRateVolume', on='CrcActSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentedRateVolume', 'pass', '{} rows updated'.format(len(new_crc_div)), username=param['output']['username'], password=param['output']['password'])
###########################################
### Water use types
wu1 = db.water_use.copy()
## Clean
wu1['RecordNumber'] = wu1['RecordNumber'].str.strip().str.upper()
wu1['UseType'] = wu1['UseType'].str.strip().str.title()
wu1['ConsentedMultiDayVolume'] = pd.to_numeric(wu1['ConsentedMultiDayVolume'], errors='coerce').round()
wu1['ConsentedMultiDayPeriod'] = pd.to_numeric(wu1['ConsentedMultiDayPeriod'], errors='coerce').round()
wu1['ConsentedRate'] = pd.to_numeric(wu1['ConsentedRate'], errors='coerce').round(2)
wu1.loc[wu1['ConsentedMultiDayVolume'] <= 0, 'ConsentedMultiDayVolume'] = np.nan
wu1.loc[wu1['ConsentedMultiDayPeriod'] <= 0, 'ConsentedMultiDayPeriod'] = np.nan
wu1.loc[wu1['ConsentedRate'] <= 0, 'ConsentedRate'] = np.nan
spaces_bool = wu1['UseType'].str[3:5] == ' '
wu1.loc[spaces_bool, 'UseType'] = wu1.loc[spaces_bool, 'UseType'].str[:3] + wu1.loc[spaces_bool, 'UseType'].str[4:]
## Check foreign keys
wu2 = wu1[wu1.RecordNumber.isin(crc1)].copy()
## Split into WAPs by take type equivelant
wu3 = wu2.copy()
wu3['take_type'] = wu3['UseType'].str.replace('Use', 'Take')
wu4 = pd.merge(wu3, mon_min_max1, on=['RecordNumber', 'take_type'])
wu4['ConsentedMultiDayVolume'] = wu4['ConsentedMultiDayVolume'].divide(wu4['wap_count'], 0).round()
wu4['ConsentedRate'] = wu4['ConsentedRate'].divide(wu4['wap_count'], 0).round(2)
wu4.drop(['wap_count', 'take_type'], axis=1, inplace=True)
## Convert Use types to broader categories
types_cat = {}
for key, value in param['misc']['use_types_codes'].items():
for string in value:
types_cat[string] = key
types_check = np.in1d(wu4.WaterUse.unique(), list(types_cat.keys())).all()
if not types_check:
raise ValueError('Some use types are missing in the parameters file. Check the use type table and the parameters file.')
wu4.WaterUse.replace(types_cat, inplace=True)
wu4['WaterUse'] = wu4['WaterUse'].astype('category')
## Join to get the IDs and filter WAPs
wu5 = pd.merge(wu4, act_types1[['ActivityID', 'ActivityName']], left_on='UseType', right_on='ActivityName').drop(['UseType', 'ActivityName'], axis=1)
wu5 = pd.merge(wu5, wap_site, on='WAP').drop('WAP', axis=1)
## Drop duplicate uses
wu5.WaterUse.cat.set_categories(param['misc']['use_types_priorities'], True, inplace=True)
wu5 = wu5.sort_values('WaterUse')
wu6 = wu5.drop_duplicates(['RecordNumber', 'ActivityID', 'SiteID']).copy()
## CrcActSite
crc_act_wu = wu6[['RecordNumber', 'ActivityID', 'SiteID']].copy()
crc_act_wu['SiteActivity'] = True
crc_act_wu['SiteType'] = 'WAP'
# Save results
new_crv_wu, _ = mssql.update_from_difference(crc_act_wu, param['output']['server'], param['output']['database'], 'CrcActSite', on=['RecordNumber', 'ActivityID', 'SiteID'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcActSite', 'pass', '{} rows updated'.format(len(new_crv_wu)), username=param['output']['username'], password=param['output']['password'])
# Read db table
act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
## ConsentedRateVolume
crv_wu = pd.merge(wu6, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID'])[['CrcActSiteID', 'ConsentedRate', 'ConsentedMultiDayVolume', 'ConsentedMultiDayPeriod']].dropna(subset=['ConsentedRate', 'ConsentedMultiDayVolume'], how='all')
crv_wu['FromMonth'] = 1
crv_wu['ToMonth'] = 12
# Save results
new_crv_wu, _ = mssql.update_from_difference(crv_wu, param['output']['server'], param['output']['database'], 'ConsentedRateVolume', on='CrcActSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentedRateVolume', 'pass', '{} rows updated'.format(len(new_crv_wu)), username=param['output']['username'], password=param['output']['password'])
## Attributes
cols1 = ['RecordNumber', 'ActivityID', 'SiteID']
attr_cols = attr1.Attribute[attr1.Attribute.isin(wu6.columns)].tolist()
cols1.extend(attr_cols)
wua1 = wu6.loc[:, wu6.columns.isin(cols1)].set_index(['RecordNumber', 'ActivityID', 'SiteID'])
wua2 = wua1.stack()
wua2.name = 'Value'
wua2 = wua2.reset_index()
wua2.rename(columns={'level_3': 'Attribute'}, inplace=True)
wua3 = pd.merge(wua2, attr1, on='Attribute').drop('Attribute', axis=1)
wua4 = pd.merge(wua3, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID'], axis=1)
# Save results
new_wua, _ = mssql.update_from_difference(wua4, param['output']['server'], param['output']['database'], 'ConsentedAttributes', on=['CrcActSiteID', 'AttributeID'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentedAttributes', 'pass', '{} rows updated'.format(len(new_wua)), username=param['output']['username'], password=param['output']['password'])
#################################################
### Linked Consents
print('--Update LinkConsent table')
## Clean data
lc1 = db.linked_permits.copy()
lc1['RecordNumber'] = lc1['RecordNumber'].str.strip().str.upper()
lc1['OtherRecordNumber'] = lc1['OtherRecordNumber'].str.strip().str.upper()
lc1['Relationship'] = lc1['Relationship'].str.strip()
lc1['LinkedStatus'] = lc1['LinkedStatus'].str.strip()
lc1['CombinedAnnualVolume'] = | pd.to_numeric(lc1['CombinedAnnualVolume'], errors='coerce') | pandas.to_numeric |
"""
Fetch GPU load data from a remote server using SSH.
"""
import argparse
import configparser
import io
from fabric import Connection
import pandas as pd
import invoke
def get_username(pid, hostname, user):
"""Get the corresponding username for a PID"""
SSH_CMD = 'ps -o user= {}'.format(pid)
# sometimes zombies occur...
try:
result = Connection(hostname, user=user).run(SSH_CMD, hide=True).stdout
result = str.strip(result)
except invoke.exceptions.UnexpectedExit:
result = None
return result
def get_gpu_processes(hostname, user):
SSH_CMD = 'nvidia-smi --query-compute-apps=pid,gpu_uuid --format=csv'
result = Connection(hostname, user=user).run(SSH_CMD, hide=True).stdout # does return a CSV
csv = io.StringIO(result) # temporary string stream
csv = pd.read_csv(csv) # parse as csv
csv['username'] = ''
csv = csv.rename(columns={' gpu_uuid': 'gpu_uuid'})
for cur_idx, cur_row in csv.iterrows():
username = get_username(cur_row['pid'], hostname, user)
if username is None:
username = 'Zombie'
csv.loc[cur_idx, 'username'] = username
return csv
def get_load_data(hostname, user):
"""Get the output from `nvidia-smi` and parse it."""
SSH_CMD = 'nvidia-smi --query-gpu=utilization.gpu,utilization.memory,index,gpu_name,gpu_uuid --format=csv'
result = Connection(hostname, user=user).run(SSH_CMD, hide=True).stdout # does return a CSV
csv = io.StringIO(result) # temporary string stream
csv = | pd.read_csv(csv) | pandas.read_csv |
'''
Created on: 14/12/2016
@author: <NAME>
@description: Extract the O/D weight matrix from a network diagram
'''
import argparse as arg
import os
import sys
import math
from lxml import etree
import numpy as np
import pandas as pd
from collections import defaultdict
# --------------------------------------------------------------
def printBanner():
# Take here the banner: http://patorjk.com/software/taag/#p=display&f=Doom&t=mutraff%20odgen
# Font: Doom
print(" _ __ __ _ ")
print(" | | / _|/ _| | | ")
print(" _ __ ___ _ _| |_ _ __ __ _| |_| |_ ___ __| | __ _ ___ _ __ ")
print("| '_ ` _ \| | | | __| '__/ _` | _| _| / _ \ / _` |/ _` |/ _ \ '_ \ ")
print("| | | | | | |_| | |_| | | (_| | | | | | (_) | (_| | (_| | __/ | | |")
print("|_| |_| |_|\__,_|\__|_| \__,_|_| |_| \___/ \__,_|\__, |\___|_| |_|")
print(" __/ | ")
print(" |___/ \n")
print(" MUTRAFF O/D Matrix generator")
print(" <EMAIL>")
print("")
# --------------------------------------------------------------
opts = {}
edge_types_speeds = defaultdict(dict)
od_trips = defaultdict(dict)
od_grouped_trips = defaultdict(dict)
od_nodes = defaultdict(dict)
od_edgeIds = defaultdict(dict)
od_edges = defaultdict(dict)
od_types = defaultdict(dict)
od_speeds = defaultdict(dict)
od_weights = defaultdict(dict)
od_lengths = defaultdict(dict)
od_names = defaultdict(dict)
od_priorities = defaultdict(dict)
od_tazs = defaultdict(dict)
od_taz_nodes = defaultdict(dict)
# --------------------------------------------------------------
def error(code,msg):
print('ERROR: '+msg)
exit(code)
# --------------------------------------------------------------
def getConfig():
parser = arg.ArgumentParser(
prog="mutraff_odgen",
formatter_class=arg.RawDescriptionHelpFormatter,
description='''\
MuTRAFF Origin/Destination Demand Matrices generator.
Generates different kinds of O/D matrices from the SUMO simulation files: network and trip files.
Examples:
* Generate all matrices:
python mutraff_odgen.py -n alcalahenares.net.xml -d alcalahenares_L_Bastra_uni1x8_timeALL_fulltraffic_logit50.trip.xml -A -x alcalahenares.nod.xml
* Generate time-filtered trip counters:
python mutraff_odgen.py -fti 1 -fte 2 -n alcalahenares.net.xml -C -d alcalahenares_L_Bastra_uni1x8_timeALL_fulltraffic_logit50.trip.xml -x alcalahenares.nod.xml
''')
# REQUIRED OPTS
parser.add_argument( "-n","--in-net", help='Input. SUMOs XML net description file', default="mutraff.net.xml", required=True)
parser.add_argument( "-x","--in-nodes", help='Input. SUMOs XML nodes description file', default="mutraff.nod.xml", required=True)
# NON-REQUIRED OPTS
parser.add_argument( "-d","--in-demand", help='Input. SUMOs trip file', required=False)
parser.add_argument( "-v","--verbose", help='Verbose output', default=False, action='store_true')
parser.add_argument( "-p","--out-prefix", help='Output. prefix for O/D generated files', default="mutraff", required=False)
parser.add_argument( "-O","--out-dir", help='Output. Directory for generated files', default=".", required=False)
parser.add_argument( "-A","--out-all", help='Output. Generate all the O/D matrices', default=False, action='store_true')
parser.add_argument( "-I","--out-edge-ids", help='Output. Generate O/D matrix with edge ids', default=False, action='store_true')
parser.add_argument( "-T","--out-edge-types", help='Output. Generate O/D matrix with edge types', default=False, action='store_true')
parser.add_argument( "-P","--out-edge-priorities", help='Output. Generate O/D matrix with edge priorities',default=False, action='store_true')
parser.add_argument( "-W","--out-fftt", help='Output. Generate O/D matrix with edge weights as FREE-FLOW TRAVEL TIMES', default=False, action='store_true')
parser.add_argument( "-L","--out-lengths", help='Output. Generate O/D matrix with edge lengths', default=False, action='store_true')
parser.add_argument( "-S","--out-speeds", help='Output. Generate O/D matrix with edge speeds', default=False, action='store_true')
parser.add_argument( "-C","--out-trip-counters", help='Output. Generate O/D matrix with timeless trip counters. Requires -d option enabled', default=False, action='store_true')
parser.add_argument( "-G","--out-group-trip-counters", help='Output. Generate O/D matrix with timeless GROUPED trip counters. Requires -d option enabled', default=False, action='store_true')
parser.add_argument( "-N","--out-names", help='Output. Generate O/D matrix with street names', default=False, action='store_true')
parser.add_argument( "-W2","--out-edge-weights", help='Output. Dump default edge weights', default=False, action='store_true')
parser.add_argument( "-X","--out-nodes", help='Output. Dump CSV nodes listing with GPS coordinates', default=False, action='store_true')
# filters
parser.add_argument( "-fti","--filter-time-ini", help='Filter. Dump trips from specified time', default=False )
parser.add_argument( "-fte","--filter-time-end", help='Filter. Dump trips up to specified time', default=False )
options = vars(parser.parse_args())
if( options['out_all'] ):
options['out_edge_ids'] = True
options['out_edge_types'] = True
options['out_edge_priorities'] = True
options['out_speeds'] = True
options['out_fftt'] = True
options['out_lengths'] = True
options['out_trip_counters'] = True
options['out_group_trip_counters'] = True
options['out_names'] = True
options['out_nodes'] = True
options['out_edge_weigths'] = True
if( options['out_trip_counters'] and not options['in_demand'] ):
parser.error('-C/--out-trip-counters requires -d/--in-demand setting enabled')
error(1,'Bad config')
if( options['out_group_trip_counters'] and not options['in_demand'] ):
parser.error('-C/--out-group-trip-counters requires -d/--in-demand setting enabled')
error(1,'Bad config')
if( options['filter_time_ini'] ):
options['filter_time_ini'] = float(options['filter_time_ini'])
if( options['filter_time_end'] ):
options['filter_time_end'] = float(options['filter_time_end'])
if( options['verbose'] ):
print(options)
return options
# --------------------------------------------------------------
# http://stackoverflow.com/questions/15736995/how-can-i-quickly-estimate-the-distance-between-two-latitude-longitude-points
# --------------------------------------------------------------
def distance(lat1, lng1, lat2, lng2):
#return distance as meter if you want km distance, remove "* 1000"
radius = 6371.0 * 1000.0
dLat = (lat2-lat1) * math.pi / 180
dLng = (lng2-lng1) * math.pi / 180
lat1 = lat1 * math.pi / 180
lat2 = lat2 * math.pi / 180
ang = 0
try:
val = math.sin(dLat/2) * math.sin(dLat/2) + math.sin(dLng/2) * math.sin(dLng/2) * math.cos(lat1) * math.cos(lat2)
ang = 2 * math.atan2(math.sqrt(val), math.sqrt(1-val))
except:
error(2,"Calculating distance. Exception: "+str(sys.exc_info()) )
return radius * ang
# --------------------------------------------------------------
def parseNodes(opts):
global edge_types_speeds
global od_edgeIds
global od_edges
global od_types
global od_speeds
global od_weights
global od_lengths
global od_names
global od_priorities
global od_nodes
global od_trips
print( "Start parsing nodes file" )
root = {}
nodesfile=opts['in_nodes']
try:
tree=etree.parse(nodesfile)
root=tree.getroot()
except:
error(2,"Parsing "+nodesfile+" file. Exception: "+str(sys.exc_info()) )
x_min = x_max = y_min = y_max = 0
for elem in root:
# print(elem)
try:
if( elem.tag ):
if( elem.tag == 'node' ):
id=elem.attrib['id']
x= float(elem.attrib['x'])
y= float(elem.attrib['y'])
od_nodes[id]['x'] = x
od_nodes[id]['y'] = y
od_nodes[id]['taz'] = ''
x_min = x if x < x_min else x_min
x_max = x if x > x_max else x_max
y_min = y if y < y_min else y_min
y_min = y if y > y_max else y_max
#print( "Pushed node:{}".format(id) )
except:
error(3,"Error parsing XML tree. Exception: "+sys.exc_info())
print( "End parsing nodes file" )
print( "Initializing O/D matrices")
for x in od_nodes:
#print "--> {}".format(x)
for y in od_nodes:
od_edgeIds[x][y] = ''
od_types[x][y] = ''
od_speeds[x][y] = 0
od_weights[x][y] = 0
od_lengths[x][y] = 0
od_names[x][y] = 0
od_priorities[x][y] = 0
od_trips[x][y] = 0
od_grouped_trips[x][y] = 0
# --------------------------------------------------------------
def parseNet(opts):
global edge_types_speeds
global od_edgeIds
global od_edges
global od_types
global od_speeds
global od_weights
global od_lengths
global od_names
global od_priorities
global od_nodes
global od_tazs
global od_taz_nodes
flag_debug=False
print( "Start parsing netfile" );
od_taz_nodes['from']={}
od_taz_nodes['to']={}
root = {}
netfile=opts['in_net']
try:
tree=etree.parse(netfile)
root=tree.getroot()
except:
error(2,"Parsing "+netfile+" file. Exception: "+str(sys.exc_info()) )
# -- Weight format --
# {'priority': '1', 'width': '2.00', 'allow': 'pedestrian', 'oneway': '1', 'numLanes': '1', 'speed': '2.78', 'id': 'highway.bridleway'}
for elem in root:
if flag_debug:
print(elem)
try:
if( elem.tag ):
# ----------------------------------------------
if( elem.tag == 'type' ):
edge_types_speeds[elem.attrib['id']] = elem.attrib['speed']
# ----------------------------------------------
if( elem.tag == 'edge' ):
if( not ('function' in elem.attrib and elem.attrib['function'] == 'internal' )):
edge_id = elem.attrib['id']
#print('edge['+vfrom+','+vto+']='+edge_id)
#print(elem.attrib)
vfrom = elem.attrib['from']
vto = elem.attrib['to']
# -----------------
od_edgeIds[vfrom][vto] = edge_id
# -----------------
od_edges[edge_id]['from'] = vfrom
od_edges[edge_id]['to'] = vto
# -----------------
vtype = elem.attrib['type']
od_types[vfrom][vto] = vtype
# -----------------
speed = 0
if vtype in edge_types_speeds:
speed = float(edge_types_speeds[vtype])
od_speeds[vfrom][vto] = speed
# -----------------
od_edges[edge_id]['speed'] = speed
lat1 = od_nodes[vfrom]['y']
lon1 = od_nodes[vfrom]['x']
lat2 = od_nodes[vto]['y']
lon2 = od_nodes[vto]['x']
linear_distance = distance(lat1, lon1, lat2, lon2)
# Length = true path length
# Distance = linear distance between origen and destiantion coordinates
od_edges[edge_id]['length'] = linear_distance
od_edges[edge_id]['distance'] = linear_distance
weight = 999
if speed > 0:
weight = linear_distance/speed
od_edges[edge_id]['weight'] = weight
od_weights[vfrom][vto] = weight
od_lengths[vfrom][vto] = linear_distance
if flag_debug:
print("EDGE({}: {}->{}): lindist={}, speed={}, FFTT={}".format( edge_id, vfrom, vto, linear_distance, speed, weight ))
# -----------------
od_names[vfrom][vto] = elem.attrib['name'] if 'name' in elem.attrib else ''
# -----------------
od_priorities[vfrom][vto] = elem.attrib['priority'] if 'priority' in elem.attrib else 0
# ----------------------------------------------
# edge_id, vfromm, vto --> coming from previous edge xml-tag
for elem2 in elem.iter("lane"):
if( elem2.tag == 'lane' ):
#print("lane:{}".format( elem2.attrib))
lane_id = elem2.attrib['id']
lane_speed = float(elem2.attrib['speed'])
lane_length = float(elem2.attrib['length'])
lane_weight = 999
if lane_speed > 0:
lane_weight = lane_length/lane_speed
od_weights[vfrom][vto] = lane_weight
od_lengths[vfrom][vto] = lane_length
od_speeds[vfrom][vto] = lane_speed
od_edges[edge_id]['weight'] = lane_weight
od_edges[edge_id]['speed'] = lane_speed
od_edges[edge_id]['length'] = lane_length
if flag_debug:
print(" LANE({}: {}->{}): length={}, speed={}, FFTT={}".format( edge_id, vfrom, vto, lane_length, lane_speed, lane_weight ))
# ----------------------------------------------
if( elem.tag == 'tazs' ):
for e2 in elem:
try:
# --- ADD A NEW TAZ ------------------------
if( e2.tag and e2.tag == 'taz' ):
# print("TAZ: {}".format(e2.attrib['edges']))
taz_id = e2.attrib['id']
#print("Adding taz {}".format(taz_id))
od_tazs[taz_id]['edges'] = e2.attrib['edges'].split(' ')
od_tazs[taz_id]['nodes'] = {}
od_tazs[taz_id]['nodes']['from'] = []
od_tazs[taz_id]['nodes']['to'] = []
od_tazs[taz_id]['nodes']['leader_from']=None
od_tazs[taz_id]['nodes']['leader_to']=None
# -- Create nodes list ---------------
for e in od_tazs[taz_id]['edges']:
try:
vfrom = od_edges[e]['from']
except:
print("Warning: TAZ parsing: found edge from-node {} out of the map".format(e))
continue
try:
vto = od_edges[e]['to']
except:
print("Warning: TAZ parsing: found edge to-node {} out of the map".format(e))
continue
od_tazs[taz_id]['nodes']['from'].append(vfrom)
od_tazs[taz_id]['nodes']['to'].append(vto)
if not vfrom in od_taz_nodes['from']:
od_taz_nodes['from'][vfrom]=[]
od_taz_nodes['from'][vfrom].append(taz_id)
if not vto in od_taz_nodes['from']:
od_taz_nodes['from'][vto]=[]
od_taz_nodes['from'][vto].append(taz_id)
od_nodes[vfrom]['taz'] = taz_id
od_nodes[vto]['taz'] = taz_id
# -- Calculate TAZ centroids ---------------
x = y = 0
for node in od_tazs[taz_id]['nodes']['from']:
#print("--> {},{}".format( od_nodes[node]['x'], od_nodes[node]['y'] ))
#print("<-- {},{}".format( float(od_nodes[node]['x']), float(od_nodes[node]['y']) ))
x += od_nodes[node]['x']
y += od_nodes[node]['y']
x = x/len(od_tazs[taz_id]['nodes']['from'])
y = y/len(od_tazs[taz_id]['nodes']['from'])
od_tazs[taz_id]['nodes']['centroid']={ 'x': x, 'y':y }
except:
error(4,"Error parsing XML tree in TAZ. Exception: "+str(sys.exc_info()))
# if( elem.tag == 'tlLogic' or elem.tag == 'junction' ):
# print("End parsing")
# return
except:
error(3,"Error parsing XML tree. Exception: "+str(sys.exc_info()))
print( "End parsing netfile" );
# --------------------------------------------------------------
def calc_distance( x1, y1, x2, y2 ):
return math.sqrt( math.pow(x1-x2,2)+math.pow(y1-y2,2) )
# --------------------------------------------------------------
def calculate_taz_leaders(opts):
global od_tazs
global od_taz_nodes
print("Selecting distance leader nodes")
num_tazs = len(od_tazs)
for taz in od_tazs:
# Calculate centroid from centroids of rest of tazs
cen_x = cen_y = 0
for t in od_tazs:
if not taz == t:
cen_x += od_tazs[taz]['nodes']['centroid']['x']
cen_y += od_tazs[taz]['nodes']['centroid']['y']
cen_x = cen_x/(num_tazs-1)
cen_y = cen_y/(num_tazs-1)
#print("TAZ[{}] --> Distance Centroid:{} , {}".format(taz,cen_x,cen_y) )
# Obtain the most far node from taz to the taz centroids
d = 0
leader = ''
for n in od_tazs[taz]['nodes']['from']:
d1 = calc_distance( cen_x, cen_y, od_nodes[n]['x'], od_nodes[n]['y'] )
#print("n:{} --> Taz[{}] --> far node:{}".format(n,taz,leader))
if d1 > d:
d=d1
leader=n
print(" Taz[{}] --> distance leader node:{}".format(taz,leader))
od_tazs[taz]['nodes']['leader']=leader
# --------------------------------------------------------------
def dump_taz_nodes(opts):
global od_tazs
global od_taz_nodes
print("=== DUMPING TAZ NODES ==================")
for taz in od_tazs:
print("NODES.FROM.TAZ[{}]: {}".format(taz,od_tazs[taz]['nodes']['from']))
print("CENTROID.FROM.TAZ[{}]: {}".format(taz,od_tazs[taz]['nodes']['centroid']))
print("========================================")
# --------------------------------------------------------------
def count_individual_trips(vfrom,vto):
global od_trips
if( vfrom in od_trips and vto in od_trips[vfrom] ):
od_trips[vfrom][vto] += 1
else:
od_trips[vfrom][vto] = 1
# --------------------------------------------------------------
def count_group_trips(vfrom,vto):
global od_grouped_trips
global od_taz_nodes
global od_tazs
# get vfrom and vto equivalences
if vfrom in od_taz_nodes['from'] and len(od_taz_nodes['from'][vfrom]) > 0:
taz = od_taz_nodes['from'][vfrom][0]
eqfrom = od_tazs[taz]['nodes']['leader']
#print(" --> FOUND TAZ leader equivalence for FROM:{}".format( vfrom) )
else:
print(" --> Not found TAZ leader equivalence for FROM:{}".format( vfrom) )
eqfrom = vfrom
if vto in od_taz_nodes['from'] and len(od_taz_nodes['from'][vto]) > 0:
taz = od_taz_nodes['from'][vto][0]
eqto = od_tazs[taz]['nodes']['leader']
#print(" --> FOUND TAZ leader equivalence for TO:{}".format( vfrom) )
else:
print(" --> Not found TAZ leader equivalence for TO:{}".format( vto) )
eqto = vto
if( eqfrom in od_grouped_trips and eqto in od_grouped_trips[eqfrom] ):
od_grouped_trips[eqfrom][eqto] += 1
else:
od_grouped_trips[eqfrom][eqto] = 1
# --------------------------------------------------------------
def parseDemand(opts):
global od_grouped_trips
global od_edgeIds
global od_edges
global od_nodes
tot_trips = 0
count_trips = 0
print( "Start parsing demand file" );
# Read Demand file
root = {}
demandfile=opts['in_demand']
try:
tree=etree.parse(demandfile)
root=tree.getroot()
except:
error(2,"Parsing "+demandfile+" file. Exception: "+ str(sys.exc_info()) )
# -- Trip format --
# from and to fields are ***edges*** not nodes. It is necessary to change them.
# <trip id="9100" depart="0.14" from="4616750#0" to="23858590#3" fromTaz="4" toTaz="99" departLane="best" departSpeed="0"/>
for elem in root:
try:
if( elem.tag ):
if( elem.tag == 'trip' ):
tot_trips += 1
# print(elem.attrib)
edgeFrom = elem.attrib['from']
edgeTo = elem.attrib['to']
depart = float(elem.attrib['depart'])
consider = True
if( opts['filter_time_ini'] and depart < opts['filter_time_ini'] ):
consider = False
if( opts['filter_time_end'] and depart > opts['filter_time_end'] ):
consider = False
if( consider ):
vfrom = od_edges[edgeFrom]['from']
vto = od_edges[edgeTo]['to']
count_trips += 1
# Count individual trips
count_individual_trips(vfrom,vto)
# Count leader trips
count_group_trips( vfrom, vto );
except:
error(3,"Error setting value. Exception: "+str(sys.exc_info()))
print( "End parsing demand file" );
print("Counted trips: "+str(count_trips)+"/"+str(tot_trips) )
# --------------------------------------------------------------
def dumpODmatrices():
global od_edgeIds
global od_types
global od_speeds
global od_weights
global od_lengths
global od_names
global od_priorities
global od_trips
global od_grouped_trips
global od_nodes
if not os.path.exists(opts['out_dir']):
print( "Creating directory "+opts['out_dir'])
os.makedirs(opts['out_dir'])
if( 'out_fftt' in opts) & (opts['out_fftt']) :
print("Generating od matrix for edge weights")
df = pd.DataFrame(od_weights).T.fillna(0)
df.to_csv( opts['out_dir']+'/'+opts['out_prefix']+"_od_weights.csv" )
if( 'out_speeds' in opts) & (opts['out_speeds']) :
print("Generating od matrix for edge speeds")
df = pd.DataFrame(od_speeds).T.fillna(0)
df.to_csv( opts['out_dir']+'/'+opts['out_prefix']+"_od_speeds.csv" )
if( 'out_lengths' in opts) & (opts['out_lengths']) :
print("Generating od matrix for edge lengths")
df = pd.DataFrame(od_lengths).T.fillna(0)
df.to_csv( opts['out_dir']+'/'+opts['out_prefix']+"_od_lengths.csv" )
if( 'out_edge_types' in opts) & (opts['out_edge_types']) :
print("Generating od matrix for edge types")
df = pd.DataFrame(od_types).T.fillna(0)
df.to_csv( opts['out_dir']+'/'+opts['out_prefix']+"_od_types.csv" )
if( 'out_edge_ids' in opts) & (opts['out_edge_ids']) :
print("Generating od matrix for edge ids")
df = pd.DataFrame(od_edgeIds).T.fillna(0)
df.to_csv( opts['out_dir']+'/'+opts['out_prefix']+"_od_edgeids.csv" )
if( 'out_names' in opts) & (opts['out_names']) :
print("Generating od matrix for edge names")
df = pd.DataFrame(od_names).T.fillna(0)
df.to_csv( opts['out_dir']+'/'+opts['out_prefix']+"_od_names.csv", encoding='utf-8' )
if( 'out_edge_priorities' in opts) & (opts['out_edge_priorities']) :
print("Generating od matrix for edge priorities")
df = pd.DataFrame(od_priorities).T.fillna(0)
df.to_csv( opts['out_dir']+'/'+opts['out_prefix']+"_od_priorities.csv" )
if( 'out_trip_counters' in opts) & (opts['out_trip_counters']) :
print("Generating od matrix for trip counters")
df = pd.DataFrame(od_trips).T.fillna(0)
df.to_csv( opts['out_dir']+'/'+opts['out_prefix']+"_od_trips.csv" )
if( 'out_group_trip_counters' in opts) & (opts['out_group_trip_counters']) :
print("Generating od matrix for grouped trip counters")
df = pd.DataFrame(od_grouped_trips).T.fillna(0)
df.to_csv( opts['out_dir']+'/'+opts['out_prefix']+"_od_group_trips.csv" )
if( 'out_nodes' in opts) & (opts['out_nodes']) :
print("Generating od matrix for nodes")
df = | pd.DataFrame(od_nodes) | pandas.DataFrame |
# Import Libraries, some are uncessary right now
import configparser
import pandas as pd
import numpy as np
import sys
import os
import random
import copy
import math
import scanpy as sc
from matplotlib import pyplot as plt
import matplotlib as mpl
import seaborn as sns
# null distribution fitting
from scipy.stats import norm
# bonferroni correction
from statsmodels.stats.multitest import multipletests
#CountsFile = sys.argv[1]
np.seterr(all = 'warn')
cfgFile = sys.argv[1] # '../switchy/SS2.ini'
# Helper functions
# Load the data a get filter into a usable form
def prepareData(CountsFile, datatype, highly_variable, n_highly_variable, onlyClones, remove_immune_receptors, normalize, filterCells):
""" Accepts: H5ad file where the adata.obs has a column "CLONE" denoting the clonal membership of the cell
dataype: "scaled" or anything else would make it return log
Returns: adata after filtering"""
adata = sc.read_h5ad(CountsFile)
adata, df = preprocessWScanpy(adata, datatype, highly_variable, n_highly_variable, remove_immune_receptors, normalize, filterCells)
# After filtering select only cells which are clones
if onlyClones == True:
# Logic for dropping non-clones from the klein dataset
#adata.obs.CLONE.fillna('None', inplace = True)
adata = adata[adata.obs.CLONE != 'NaN' ,:]
# Select only clones (applies to my dataset mostly)
selector = adata.obs.CLONE.value_counts() > 1
selector = selector[selector == True]
adata = adata[adata.obs.CLONE.isin(selector.index), :]
df = df[df.index.isin(adata.obs.index)]
return adata, df
def readConfig(cfgFile):
config = configparser.ConfigParser()
config.read(cfgFile)
stat_parameters = config['stat_parameters']
io = config['IO']
CountsFile = io['CountsFile']
out_dir = io['out_dir']
return stat_parameters, io, config
# Filter Genes and Cells to get a manageable datafram
def preprocessWScanpy(adata, datatype, highly_variable, n_highly_variable, remove_immune_receptors, normalize, filterCells):
# TODO: make this into an argument
# What is the best way to control parameters, probably a yaml file?
#sc.pp.calculate_qc_metrics(adata, inplace=True)
if remove_immune_receptors == True:
immune_receptors = pd.read_csv('/home/mswift/B_cells/CSR/sc_RNAseq/data_tables/metadata/immune_receptor_genes_keepConstantRegion.csv', index_col=0)
immune_receptors.columns = ['genes']
print("removing variable immune receptor genes which may drive clustering")
adata = adata[:, ~adata.var.index.isin(immune_receptors.genes)]
if filterCells == True:
# Filter Cells and Genes
sc.pp.filter_cells(adata, min_genes=800, inplace = True)
sc.pp.filter_cells(adata, min_counts=100000, inplace = True)
# always filter out the lowest expressed genes for computation time
sc.pp.filter_genes(adata, min_cells=4, inplace = True)
sc.pp.filter_genes(adata, min_counts=200, inplace = True)
print(adata.obs.shape, adata.var.shape, "shape of adata after filtering ")
# Make parameter in cfg
if normalize == True:
sc.pp.normalize_total(adata, target_sum=1e6)
sc.pp.log1p(adata, base = 10)
adata.raw = adata
sc.pp.highly_variable_genes(adata, n_top_genes=n_highly_variable)
# datatype logic
if datatype == 'scaled':
sc.pp.scale(adata)
else:
pass
#Subset to highly variable gene
if highly_variable == True:
adata = adata[:,adata.var['highly_variable'] == True]
highly_variable_genes = adata.var.index[adata.var["highly_variable"] == True]
df = convertSparsetoDataFrame(adata)
return adata, df
def convertSparsetoDataFrame(adata):
""" Input: anndata object with sparse matrix as .X attribute
Returns: Pandas dataframe with rows as cells and columns as genes
My take: This is inefficient but convenient, I wrote the code based on this, which is in hindsight a bad idea, but it it more readable possibly?"""
# Get the gene expression values for each cell x gene
columns = adata.var.index.to_list()
index = adata.obs.index.to_list()
try:
denseArray = adata.X.toarray()
except:
denseArray = adata.X
df = pd.DataFrame(data = denseArray, index = index , columns = columns )
return df
def plotWaterfall(df, adata_obs, gene, label):
LabelsTesting = adata_obs.copy()
# Implementing the Hodgkin Protocol
fig, ax1 = plt.subplots(1,1)
LabelsTesting.loc[:,gene] = df[gene]
order = LabelsTesting.groupby(label)[gene].mean().sort_values(ascending = False).index
g = sns.stripplot(ax=ax1, data = LabelsTesting, x = LabelsTesting[label], y = gene, order = order, color = None)
#save_figure(fig, '{}_{}'.format(label, str(gene)))
return g
def plotCI(df, adata_obs, num_shuffles, gene, label, alpha):
# This is expensive to do twice, like this because I really am only plotting a subset of hits
LabelsTesting = | pd.merge(adata.obs[label], df[gene], left_index=True, right_index=True) | pandas.merge |
import re
import gensim
import torch
import transformers
import pandas as pd
import numpy as np
from os.path import dirname
from pathlib import Path
from tqdm.auto import tqdm
from collections import Counter
import os
import sys
CURRENT_DIR = os.getcwd()
sys.path.append(CURRENT_DIR)
MODULE_PATH = Path(dirname(__file__))
from umigame.nlp import labelling
from umigame.nlp.preprocess import Preprocessor
from umigame.datasets import fetch_crypto
class SimpleTextDataset(torch.utils.data.Dataset):
def __init__(self, ids, targets):
self.ids = ids
self.targets = targets
def __len__(self):
return len(self.targets)
def __getitem__(self, item):
id_ = torch.tensor(self.ids[item], dtype=torch.long)
target = torch.tensor(self.targets[item], dtype=torch.float)
return id_, target
class NewsBertDataset(torch.utils.data.Dataset):
"""
Vocab size: 30522
"""
def __init__(self, state="train", max_length=16):
file_path = os.path.join(MODULE_PATH, "..", "datasets", "text", "news.csv")
news_df = pd.read_csv(file_path)
news_df["text"] = news_df["text"].apply(lambda x: x.lower())
# news_df = self._remain_rows_contain_keywords(news_df, "text", ["btc", "blockchain"])
news_df.set_index("date", inplace=True, drop=True)
news_df.index = | pd.to_datetime(news_df.index) | pandas.to_datetime |
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
import csv
import json
from app.models import Dataset, Record, Attribute
from api.models import Result, ExecutionLog
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
import numpy as np
import matplotlib.pyplot as plt
from api.helpers import switcher, buildDatasetMatrix, buildDataFrame
from api.pre_processing import pre_processing
import pandas as pd
from sklearn.decomposition import PCA
import time
from django.template.loader import render_to_string
from django.conf import settings as djangoSettings
import os
import zipfile
from io import BytesIO
# upload & validate dataset
@csrf_exempt
def upload_dataset(request):
start_time = time.time()
result = {}
executionStatus = True
try:
csvfile = request.FILES['dataset']
datasetName = request.POST.get('dataset_name')
datasetDescription = request.POST.get('dataset_description') if request.POST.get(
'dataset_description') != None else ''
allowMissinValues = request.POST.get('Allow_missing_values')
# link : https://www.kaggle.com/smohubal/market-customer-segmentation-clustering/notebook
try:
if allowMissinValues == 'false':
tmpCsvfile = pd.read_csv(csvfile)
datasetInstance = pre_processing(tmpCsvfile)
visual, data = datasetInstance.missing_percent()
executionTime = "{:.4f}".format(time.time() - start_time)
execLog = ExecutionLog(method='Dataset Uploading', dataset_id=None,
exec_time=executionTime, status=False)
execLog.save()
result['data'] = data
result['visual'] = visual
result['status'] = False
return JsonResponse(result)
else:
csvfile.seek(0)
except Exception as e:
csvfile.seek(0)
decoded_file = csvfile.read().decode('utf-8').splitlines()
reader = csv.reader(decoded_file, delimiter=',')
attributes = {}
attributes = next(reader)
reader = csv.DictReader(decoded_file, delimiter=',')
records = []
for row in reader:
records.append(row)
# create the dataset
dataset = Dataset(title=datasetName, description=datasetDescription)
dataset.save()
# save the attributes
for attributeName in attributes:
try:
attributeInstance = Attribute.objects.get(name=attributeName)
except Attribute.DoesNotExist:
attributeInstance = Attribute(
name=attributeName, label=attributeName)
attributeInstance.save()
dataset.attributes.add(attributeInstance)
# save the records
for data in records:
record = Record(data=data)
record.save()
dataset.records.add(record)
result['attributes'] = attributes
result['records_count'] = len(records)
except Exception as e:
executionStatus = False
result['message'] = str(e)
pass
executionTime = "{:.4f}".format(time.time() - start_time)
execLog = ExecutionLog(method='Dataset Uploading', dataset_id=(dataset.id if executionStatus else None), error=('' if executionStatus else result['message']),
exec_time=executionTime, status=executionStatus)
execLog.save()
result['status'] = executionStatus
return JsonResponse(result)
@csrf_exempt
def clone_dataset(request):
start_time = time.time()
result = {}
executionStatus = True
try:
datasetId = request.POST.get('datasetId')
datasetName = request.POST.get('dataset_name')
datasetDescription = request.POST.get('dataset_description') if request.POST.get(
'dataset_description') != None else ''
allowMissinValues = request.POST.get('Allow_missing_values')
# create the dataset
dataset = Dataset(title=datasetName, description=datasetDescription)
dataset.save()
# save the attributes
for attributeName in attributes:
try:
attributeInstance = Attribute.objects.get(name=attributeName)
except Attribute.DoesNotExist:
attributeInstance = Attribute(
name=attributeName, label=attributeName)
attributeInstance.save()
dataset.attributes.add(attributeInstance)
# save the records
for data in records:
record = Record(data=data)
record.save()
dataset.records.add(record)
result['attributes'] = attributes
result['records_count'] = len(records)
except Exception as e:
executionStatus = False
result['message'] = str(e)
pass
executionTime = "{:.4f}".format(time.time() - start_time)
execLog = ExecutionLog(method='Dataset Uploading', dataset_id=(dataset.id if executionStatus else None), error=('' if executionStatus else result['message']),
exec_time=executionTime, status=executionStatus)
execLog.save()
result['status'] = executionStatus
return JsonResponse(result)
@csrf_exempt
def donwload_clusters_zip(request):
path = djangoSettings.STATIC_ROOT + '/clusters'
filenames = os.listdir(path)
zip_buffer = BytesIO()
with zipfile.ZipFile(zip_buffer, "a", zipfile.ZIP_DEFLATED, False) as zip_file:
for file in filenames:
f = open(path + '/' + file)
zip_file.writestr(file, f.read())
f.close()
zip_buffer.seek(0)
resp = HttpResponse(zip_buffer, content_type='application/zip')
resp['Content-Disposition'] = 'attachment; filename = clusters.zip'
return resp
# optimum_clusters_number
@csrf_exempt
def optimum_clusters_number(request):
start_time = time.time()
result = {}
executionStatus = True
try:
datasetId = request.POST.get('datasetId')
method = request.POST.get('method')
maxIterationsNumeber = request.POST.get('maxIterationsNumeber')
pcaComponents = request.POST.get('pcaComponents')
dataset = Dataset.objects.get(id=datasetId)
datasetMatrix = buildDatasetMatrix(dataset=dataset)
if pcaComponents != "" and pcaComponents != 0:
pca = PCA(n_components=float(pcaComponents) if float(
pcaComponents) < 1 else int(pcaComponents))
datasetMatrix = pca.fit_transform(datasetMatrix)
result = switcher(method=method, args={
'datasetMatrix': datasetMatrix, 'maxIterationsNumeber': maxIterationsNumeber})
pass
except Exception as e:
executionStatus = False
result['message'] = str(e)
pass
executionTime = "{:.4f}".format(time.time() - start_time)
execLog = ExecutionLog(method=method, dataset_id=datasetId, error=('' if executionStatus else result['message']),
exec_time=executionTime, status=executionStatus)
execLog.save()
result['status'] = executionStatus
return JsonResponse(result)
# clustering
@csrf_exempt
def clustering(request):
start_time = time.time()
result = {}
executionStatus = True
contributions = False
try:
datasetId = request.POST.get('datasetId')
method = request.POST.get('method')
clustersNumber = request.POST.get('clustersNumber')
samplingPoints = request.POST.get('samplingPoints')
linkageMethod = request.POST.get('linkageMethod')
pcaComponents = request.POST.get('pcaComponents')
dataset = Dataset.objects.get(id=datasetId)
datasetDf = buildDataFrame(dataset=dataset)
if pcaComponents == '':
pcaComponents = 0
nPca = float(pcaComponents) if float(
pcaComponents) < 1 else int(pcaComponents)
if pcaComponents != "" and pcaComponents != 0 and (nPca > 2 or nPca < 1):
print('========= PCA Applied =========')
pca = PCA(n_components=float(pcaComponents) if float(
pcaComponents) < 1 else int(pcaComponents))
datasetDf = pca.fit_transform(datasetDf)
contributions = pca.components_
columns = ['PC'+str(i) for i in range(datasetDf.shape[1])]
datasetDf = | pd.DataFrame(datasetDf, columns=columns) | pandas.DataFrame |
import os
import tensorflow as tf
import pandas as pd
from addressnet.predict import predict_one, predict
def get_gnaf_dataset_labels():
labels_list = [
'building_name', # 1
'level_number_prefix', # 2
'level_number', # 3
'level_number_suffix', # 4
'level_type', # 5
'flat_number_prefix', # 6
'flat_number', # 7
'flat_number_suffix', # 8
'flat_type', # 9
'number_first_prefix', # 10
'number_first', # 11
'number_first_suffix', # 12
'number_last_prefix', # 13
'number_last', # 14
'number_last_suffix', # 15
'street_name', # 16
'street_suffix', # 17
'street_type', # 18
'locality_name', # 19
'state', # 20
'postcode' # 21
]
return labels_list
if __name__ == "__main__":
print(tf.__version__)
print(predict_one("casa del gelato, 10A 24-26 high street road mount waverley vic 3183"))
# load CSV
df_in = pd.read_csv(os.path.join(os.getcwd(), 'data/full_address.csv'), header=0)
print(df_in.shape)
# get a list of addresses to parse
addresses_to_parse = df_in['FullAddress'].tolist()
# make predictions
print('Parsing addresses...')
parsed_addresses = predict(addresses_to_parse)
# save predictions into a dataframe
df_out = | pd.DataFrame() | pandas.DataFrame |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import TimeoutException
from sheet_scraper import ScraleScraper
import pandas as pd
import time
def bestellen_maar(datafreempje, driver):
# maak frame
df_gemist = | pd.DataFrame(data=None, columns=datafreempje.columns) | pandas.DataFrame |
"""
The ``python_function`` model flavor serves as a default model interface for MLflow Python models.
Any MLflow Python model is expected to be loadable as a ``python_function`` model.
In addition, the ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format
<pyfunc-filesystem-format>` for Python models and provides utilities for saving to and loading from
this format. The format is self contained in the sense that it includes all necessary information
for anyone to load it and use it. Dependencies are either stored directly with the model or
referenced via a Conda environment.
The ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models
using frameworks and inference logic that may not be natively included in MLflow. See
:ref:`pyfunc-create-custom`.
.. _pyfunc-inference-api:
*************
Inference API
*************
Python function models are loaded as an instance of :py:class:`PyFuncModel
<mlflow.pyfunc.PyFuncModel>`, which is an MLflow wrapper around the model implementation and model
metadata (MLmodel file). You can score the model by calling the :py:func:`predict()
<mlflow.pyfunc.PyFuncModel.predict>` method, which has the following signature::
predict(model_input: pandas.DataFrame) -> [numpy.ndarray | pandas.(Series | DataFrame)]
.. _pyfunc-filesystem-format:
*****************
Filesystem format
*****************
The Pyfunc format is defined as a directory structure containing all required data, code, and
configuration::
./dst-path/
./MLmodel: configuration
<code>: code packaged with the model (specified in the MLmodel file)
<data>: data packaged with the model (specified in the MLmodel file)
<env>: Conda environment definition (specified in the MLmodel file)
The directory structure may contain additional contents that can be referenced by the ``MLmodel``
configuration.
.. _pyfunc-model-config:
MLModel configuration
#####################
A Python model contains an ``MLmodel`` file in **python_function** format in its root with the
following parameters:
- loader_module [required]:
Python module that can load the model. Expected as module identifier
e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``.
The imported module must contain a function with the following signature::
_load_pyfunc(path: string) -> <pyfunc model implementation>
The path argument is specified by the ``data`` parameter and may refer to a file or
directory. The model implementation is expected to be an object with a
``predict`` method with the following signature::
predict(model_input: pandas.DataFrame) -> [numpy.ndarray | pandas.(Series | DataFrame)]
- code [optional]:
Relative path to a directory containing the code packaged with this model.
All files and directories inside this directory are added to the Python path
prior to importing the model loader.
- data [optional]:
Relative path to a file or directory containing model data.
The path is passed to the model loader.
- env [optional]:
Relative path to an exported Conda environment. If present this environment
should be activated prior to running the model.
- Optionally, any additional parameters necessary for interpreting the serialized model in
``pyfunc`` format.
.. rubric:: Example
::
tree example/sklearn_iris/mlruns/run1/outputs/linear-lr
::
├── MLmodel
├── code
│ ├── sklearn_iris.py
│
├── data
│ └── model.pkl
└── mlflow_env.yml
::
cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel
::
python_function:
code: code
data: data/model.pkl
loader_module: mlflow.sklearn
env: mlflow_env.yml
main: sklearn_iris
.. _pyfunc-create-custom:
******************************
Creating custom Pyfunc models
******************************
MLflow's persistence modules provide convenience functions for creating models with the
``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and
more); however, they do not cover every use case. For example, you may want to create an MLflow
model with the ``pyfunc`` flavor using a framework that MLflow does not natively support.
Alternatively, you may want to build an MLflow model that executes custom logic when evaluating
queries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc``
provides utilities for creating ``pyfunc`` models from arbitrary code and model data.
The :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows
for creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts
that the logic may require.
An `artifact` is a file or directory, such as a serialized model or a CSV. For example, a
serialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact.
.. _pyfunc-create-custom-workflows:
Workflows
#########
:meth:`save_model()` and :meth:`log_model()` support the following workflows:
1. Programmatically defining a new MLflow model, including its attributes and artifacts.
Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can
automatically download artifacts from their URIs and create an MLflow model directory.
In this case, you must define a Python class which inherits from :class:`~PythonModel`,
defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is
specified via the ``python_model`` parameter; it is automatically serialized and deserialized
as a Python class, including all of its attributes.
2. Interpreting pre-existing data as an MLflow model.
If you already have a directory containing model data, :meth:`save_model()` and
:meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter
specifies the local filesystem path to the directory containing model data.
In this case, you must provide a Python module, called a `loader module`. The
loader module defines a ``_load_pyfunc()`` method that performs the following tasks:
- Load data from the specified ``data_path``. For example, this process may include
deserializing pickled Python objects or models or parsing CSV files.
- Construct and return a pyfunc-compatible model wrapper. As in the first
use case, this wrapper must define a ``predict()`` method that is used to evaluate
queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`.
The ``loader_module`` parameter specifies the name of your loader module.
For an example loader module implementation, refer to the `loader module
implementation in mlflow.keras <https://github.com/mlflow/mlflow/blob/
74d75109aaf2975f5026104d6125bb30f4e3f744/mlflow/keras.py#L157-L187>`_.
.. _pyfunc-create-custom-selecting-workflow:
Which workflow is right for my use case?
########################################
We consider the first workflow to be more user-friendly and generally recommend it for the
following reasons:
- It automatically resolves and collects specified model artifacts.
- It automatically serializes and deserializes the ``python_model`` instance and all of
its attributes, reducing the amount of user logic that is required to load the model
- You can create Models using logic that is defined in the ``__main__`` scope. This allows
custom models to be constructed in interactive environments, such as notebooks and the Python
REPL.
You may prefer the second, lower-level workflow for the following reasons:
- Inference logic is always persisted as code, rather than a Python object. This makes logic
easier to inspect and modify later.
- If you have already collected all of your model data in a single location, the second
workflow allows it to be saved in MLflow format directly, without enumerating constituent
artifacts.
"""
import importlib
import numpy as np
import os
import pandas
import yaml
from copy import deepcopy
import logging
from typing import Any, Union, List, Dict
import mlflow
import mlflow.pyfunc.model
import mlflow.pyfunc.utils
from mlflow.models import Model, ModelSignature, ModelInputExample
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.utils import _save_example
from mlflow.pyfunc.model import PythonModel, PythonModelContext # pylint: disable=unused-import
from mlflow.pyfunc.model import get_default_conda_env
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.types import DataType, Schema
from mlflow.utils import PYTHON_VERSION, get_major_minor_py_version
from mlflow.utils.annotations import deprecated
from mlflow.utils.file_utils import TempDir, _copy_file_or_tree
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.exceptions import MlflowException
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.protos.databricks_pb2 import (
INVALID_PARAMETER_VALUE,
RESOURCE_ALREADY_EXISTS,
RESOURCE_DOES_NOT_EXIST,
)
FLAVOR_NAME = "python_function"
MAIN = "loader_module"
CODE = "code"
DATA = "data"
ENV = "env"
PY_VERSION = "python_version"
_logger = logging.getLogger(__name__)
PyFuncInput = Union[pandas.DataFrame, np.ndarray, List[Any], Dict[str, Any]]
PyFuncOutput = Union[pandas.DataFrame, pandas.Series, np.ndarray, list]
def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):
"""
Add a ``pyfunc`` spec to the model configuration.
Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model
flavor out of an existing directory structure. For example, other model flavors can use this to
specify how to use their output as a ``pyfunc``.
NOTE:
All paths are relative to the exported model root directory.
:param model: Existing model.
:param loader_module: The module to be used to load the model.
:param data: Path to the model data.
:param code: Path to the code dependencies.
:param env: Conda environment.
:param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.
Values must be YAML-serializable.
:return: Updated model configuration.
"""
parms = deepcopy(kwargs)
parms[MAIN] = loader_module
parms[PY_VERSION] = PYTHON_VERSION
if code:
parms[CODE] = code
if data:
parms[DATA] = data
if env:
parms[ENV] = env
return model.add_flavor(FLAVOR_NAME, **parms)
def _load_model_env(path):
"""
Get ENV file string from a model configuration stored in Python Function format.
Returned value is a model-relative path to a Conda Environment file,
or None if none was specified at model save time
"""
return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)
def _enforce_type(name, values: pandas.Series, t: DataType):
"""
Enforce the input column type matches the declared in model input schema.
The following type conversions are allowed:
1. np.object -> string
2. int -> long (upcast)
3. float -> double (upcast)
4. int -> double (safe conversion)
Any other type mismatch will raise error.
"""
if values.dtype == np.object and t not in (DataType.binary, DataType.string):
values = values.infer_objects()
if t == DataType.string and values.dtype == np.object:
# NB: strings are by default parsed and inferred as objects, but it is
# recommended to use StringDtype extension type if available. See
#
# `https://pandas.pydata.org/pandas-docs/stable/user_guide/text.html`
#
# for more detail.
try:
return values.astype(t.to_pandas(), errors="raise")
except ValueError:
raise MlflowException(
"Failed to convert column {0} from type {1} to {2}.".format(name, values.dtype, t)
)
# NB: Comparison of pandas and numpy data type fails when numpy data type is on the left hand
# side of the comparison operator. It works, however, if pandas type is on the left hand side.
# That is because pandas is aware of numpy.
if t.to_pandas() == values.dtype or t.to_numpy() == values.dtype:
# The types are already compatible => conversion is not necessary.
return values
if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:
# NB: bytes in numpy have variable itemsize depending on the length of the longest
# element in the array (column). Since MLflow binary type is length agnostic, we ignore
# itemsize when matching binary columns.
return values
numpy_type = t.to_numpy()
if values.dtype.kind == numpy_type.kind:
is_upcast = values.dtype.itemsize <= numpy_type.itemsize
elif values.dtype.kind == "u" and numpy_type.kind == "i":
is_upcast = values.dtype.itemsize < numpy_type.itemsize
elif values.dtype.kind in ("i", "u") and numpy_type == np.float64:
# allow (u)int => double conversion
is_upcast = values.dtype.itemsize <= 6
else:
is_upcast = False
if is_upcast:
return values.astype(numpy_type, errors="raise")
else:
# NB: conversion between incompatible types (e.g. floats -> ints or
# double -> float) are not allowed. While supported by pandas and numpy,
# these conversions alter the values significantly.
def all_ints(xs):
return all([pandas.isnull(x) or int(x) == x for x in xs])
hint = ""
if (
values.dtype == np.float64
and numpy_type.kind in ("i", "u")
and values.hasnans
and all_ints(values)
):
hint = (
" Hint: the type mismatch is likely caused by missing values. "
"Integer columns in python can not represent missing values and are therefore "
"encoded as floats. The best way to avoid this problem is to infer the model "
"schema based on a realistic data sample (training dataset) that includes missing "
"values. Alternatively, you can declare integer columns as doubles (float64) "
"whenever these columns may have missing values. See `Handling Integers With "
"Missing Values <https://www.mlflow.org/docs/latest/models.html#"
"handling-integers-with-missing-values>`_ for more details."
)
raise MlflowException(
"Incompatible input types for column {0}. "
"Can not safely convert {1} to {2}.{3}".format(name, values.dtype, numpy_type, hint)
)
def _enforce_schema(pdf: PyFuncInput, input_schema: Schema):
"""
Enforce column names and types match the input schema.
For column names, we check there are no missing columns and reorder the columns to match the
ordering declared in schema if necessary. Any extra columns are ignored.
For column types, we make sure the types match schema or can be safely converted to match the
input schema.
"""
if isinstance(pdf, (list, np.ndarray, dict)):
try:
pdf = | pandas.DataFrame(pdf) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.advanced_activations import PReLU
from keras.models import Sequential
from keras.utils import np_utils
from hep_ml.losses import BinFlatnessLossFunction
from hep_ml.gradientboosting import UGradientBoostingClassifier
from sklearn.preprocessing import StandardScaler
trainingFilePath = 'training.csv'
testFilePath = 'test.csv'
def get_training_data():
filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'IP', 'IPSig', 'isolationc']
f = open(trainingFilePath)
data = []
y = []
ids = []
for i, l in enumerate(f):
if i == 0:
labels = l.rstrip().split(',')
label_indices = dict((l, i) for i, l in enumerate(labels))
continue
values = l.rstrip().split(',')
filtered = []
for v, l in zip(values, labels):
if l not in filter_out:
filtered.append(float(v))
label = values[label_indices['signal']]
ID = values[0]
data.append(filtered)
y.append(float(label))
ids.append(ID)
return ids, np.array(data), np.array(y)
def get_test_data():
filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'IP', 'IPSig', 'isolationc']
f = open(testFilePath)
data = []
ids = []
for i, l in enumerate(f):
if i == 0:
labels = l.rstrip().split(',')
continue
values = l.rstrip().split(',')
filtered = []
for v, l in zip(values, labels):
if l not in filter_out:
filtered.append(float(v))
ID = values[0]
data.append(filtered)
ids.append(ID)
return ids, np.array(data)
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
# get training data
ids, X, y = get_training_data()
print('Data shape:', X.shape)
# shuffle the data
np.random.seed(248)
np.random.shuffle(X)
np.random.seed(248)
np.random.shuffle(y)
print('Signal ratio:', np.sum(y) / y.shape[0])
# preprocess the data
X, scaler = preprocess_data(X)
y = np_utils.to_categorical(y)
# split into training / evaluation data
nb_train_sample = int(len(y) * 0.97)
X_train = X[:nb_train_sample]
X_eval = X[nb_train_sample:]
y_train = y[:nb_train_sample]
y_eval = y[nb_train_sample:]
print('Train on:', X_train.shape[0])
print('Eval on:', X_eval.shape[0])
# deep pyramidal MLP, narrowing with depth
model = Sequential()
model.add(Dropout(0.15))
model.add(Dense(X_train.shape[1],200))
model.add(PReLU((200,)))
model.add(Dropout(0.13))
model.add(Dense(200, 150))
model.add(PReLU((150,)))
model.add(Dropout(0.12))
model.add(Dense(150,100))
model.add(PReLU((100,)))
model.add(Dropout(0.11))
model.add(Dense(100, 50))
model.add(PReLU((50,)))
model.add(Dropout(0.09))
model.add(Dense(50, 30))
model.add(PReLU((30,)))
model.add(Dropout(0.07))
model.add(Dense(30, 25))
model.add(PReLU((25,)))
model.add(Dense(25, 2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# train model
model.fit(X_train, y_train, batch_size=128, nb_epoch=75, validation_data=(X_eval, y_eval), verbose=2, show_accuracy=True)
# generate submission
ids, X = get_test_data()
print('Data shape:', X.shape)
X, scaler = preprocess_data(X, scaler)
predskeras = model.predict(X, batch_size=256)[:, 1]
print("Load the training/test data using pandas")
train = pd.read_csv(trainingFilePath)
test = | pd.read_csv(testFilePath) | pandas.read_csv |
from dataapi import SGS
from bloomberg import BBG
import numpy as np
import pandas as pd
from sklearn import preprocessing
getdata = SGS()
bbg = BBG()
start_date = pd.to_datetime("01-01-2001")
end_date = pd.to_datetime("07-01-2019")
#fetching Brazil FGV Consumer Confidence Index SA Sep 2005=100 Original Date: '30-sep-2005'
df = bbg.fetch_series(securities=['BZFGCCSA Index'],
fields=['PX_LAST'],
startdate=start_date,
enddate=end_date)
consbr = pd.DataFrame(data=df)
consbr = consbr.droplevel(0)
consbr = consbr.reset_index()
consbr = consbr.set_index('TRADE_DATE')
consbr = consbr.resample('Q').mean()
# Normalized series Consumer Confidence
x = np.array(consbr['BZFGCCSA Index'])
x = x.reshape(-1,1)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
confnorm = consbr
confnorm['BZFGCCSA Normalized'] = ''
confnorm['BZFGCCSA Normalized'] = x_scaled
confnorm = confnorm.drop('BZFGCCSA Index', axis=1)
#fetching GDP Growth in R$
df_gr = pd.DataFrame(getdata.fetch("1207",start_date, end_date)) #for GDP in dollars, change the string to 7324
df_gr = df_gr['1207'].resample('Q').mean()
df_gr = df_gr.pct_change(4)
df_gr = df_gr.dropna()
#normalizing GDP
x = df_gr.values
x = x.reshape(-1,1)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df_gr_norm = | pd.DataFrame(x_scaled, index=df_gr.index, columns=['GDP Growth Normalized']) | pandas.DataFrame |
import argparse
import datetime
import logging
import os
import synapseclient
import genie
import pandas as pd
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def get_center_data_completion(center, df):
'''
Get center data completion. Calulates the percentile of
how complete a clinical data element is:
Number of not blank/Unknown/NA divded by
total number of patients or samples
Args:
center: GENIE center
df: sample or patient dataframe
Returns:
Dataframe: Center data
'''
centerdf = df[df['CENTER'] == center]
total = len(centerdf)
center_data = pd.DataFrame()
skip_cols = ['CENTER', 'PATIENT_ID', 'SAMPLE_ID', 'SAMPLE_TYPE_DETAILED',
'SECONDARY_RACE', 'TERTIARY_RACE']
for col in centerdf:
if col not in skip_cols:
not_missing = [not pd.isnull(value) and value != 'Not Collected'
for value in centerdf[col]]
completeness = float(sum(not_missing)) / int(total)
returned = pd.DataFrame([[col, center, total, completeness]])
center_data = center_data.append(returned)
return(center_data)
def update_samples_in_release_table(syn, file_mapping, release,
samples_in_release_synid):
'''
Convenience function that updates the sample in release table
This tracks the samples of each release. 1 means it exists, and 0
means it doesn't
Args:
syn: synapse object
file_mapping: file mapping generated from file mapping function
release: GENIE release number (ie. 5.3-consortium)
samples_in_release_synid: Synapse Id of 'samples in release' Table
'''
clinical_ent = syn.get(file_mapping['clinical'], followLink=True)
clinicaldf = pd.read_csv(clinical_ent.path, sep="\t", comment="#")
cols = [i['name'] for i in
list(syn.getTableColumns(samples_in_release_synid))]
if release not in cols:
schema = syn.get(samples_in_release_synid)
syn_col = synapseclient.Column(
name=release, columnType='INTEGER', defaultValue=0)
new_column = syn.store(syn_col)
schema.addColumn(new_column)
schema = syn.store(schema)
# Columns of samples in release
samples_per_release = syn.tableQuery(
'SELECT SAMPLE_ID, "%s" FROM %s' % (release, samples_in_release_synid))
samples_per_releasedf = samples_per_release.asDataFrame()
new_samples = clinicaldf[['SAMPLE_ID']][
~clinicaldf.SAMPLE_ID.isin(samples_per_releasedf.SAMPLE_ID)]
new_samples[release] = 1
old_samples = clinicaldf[['SAMPLE_ID']][
clinicaldf.SAMPLE_ID.isin(samples_per_releasedf.SAMPLE_ID)]
old_samples[release] = 1
samples_in_releasedf = new_samples.append(old_samples)
genie.process_functions.updateDatabase(
syn, samples_per_releasedf,
samples_in_releasedf, samples_in_release_synid, ["SAMPLE_ID"])
def update_cumulative_sample_table(syn, file_mapping, release,
cumulative_sample_count_synid):
'''
Consortium release sample count table update function
This gets the cumulative sample count of each file type in each release
Args:
syn: synapse object
file_mapping: file mapping generated from file mapping function
release: GENIE release number (ie. 5.3-consortium)
cumulative_sample_count_synid: Synapse Id of
'Cumulative sample count' Table
'''
sample_count_per_round = syn.tableQuery(
'SELECT * FROM %s' % cumulative_sample_count_synid)
sample_count_per_rounddf = sample_count_per_round.asDataFrame()
clinical_ent = syn.get(file_mapping['clinical'], followLink=True)
clinicaldf = pd.read_csv(clinical_ent.path, sep="\t", comment="#")
clinicaldf.columns = [i.upper() for i in clinicaldf.columns]
if clinicaldf.get("CENTER") is None:
clinicaldf['CENTER'] = \
[sample.split("-")[1] for sample in clinicaldf.SAMPLE_ID]
clinical_counts = clinicaldf['CENTER'].value_counts()
clinical_counts['Total'] = sum(clinical_counts)
clinical_counts.name = "Clinical"
fusion_ent = syn.get(file_mapping['fusion'], followLink=True)
fusiondf = pd.read_csv(fusion_ent.path, sep="\t", comment="#")
fusiondf.columns = [i.upper() for i in fusiondf.columns]
fusion_counts = fusiondf['CENTER'][
~fusiondf['TUMOR_SAMPLE_BARCODE'].duplicated()].value_counts()
fusion_counts['Total'] = sum(fusion_counts)
cna_ent = syn.get(file_mapping['cna'], followLink=True)
cnadf = pd.read_csv(cna_ent.path, sep="\t", comment="#")
cna_counts = pd.Series(
[i.split("-")[1] for i in cnadf.columns[1:]]).value_counts()
cna_counts['Total'] = sum(cna_counts)
seg_ent = syn.get(file_mapping['seg'], followLink=True)
segdf = pd.read_csv(seg_ent.path, sep="\t", comment="#")
segdf.columns = [i.upper() for i in segdf.columns]
segdf['CENTER'] = [i.split("-")[1] for i in segdf['ID']]
seg_counts = segdf['CENTER'][~segdf['ID'].duplicated()].value_counts()
seg_counts['Total'] = sum(seg_counts)
total_counts = pd.DataFrame(clinical_counts)
total_counts['Fusions'] = fusion_counts
total_counts['CNV'] = cna_counts
total_counts['Mutation'] = clinical_counts
total_counts['SEG'] = seg_counts
total_counts = total_counts.fillna(0)
total_counts = total_counts.applymap(int)
total_counts['Center'] = total_counts.index
total_counts['Release'] = release
genie.process_functions.updateDatabase(
syn, sample_count_per_rounddf, total_counts,
cumulative_sample_count_synid, ["Center", "Release"])
def get_file_mapping(syn, release_folder_synid):
"""
Get file mapping between important files needed for dashboard and
their synapse ids
Args:
syn: synapse object
release_folder_synid: synapse id of release
"""
files = syn.getChildren(release_folder_synid)
file_mapping = dict()
for metadata in files:
filename = metadata['name']
synid = metadata['id']
if not filename.startswith("meta"):
if filename.startswith("data_clinical_sample"):
file_mapping['clinical'] = synid
elif filename.endswith("fusions.txt"):
file_mapping['fusion'] = synid
elif filename.endswith("CNA.txt"):
file_mapping['cna'] = synid
elif filename.endswith(".seg"):
file_mapping['seg'] = synid
return(file_mapping)
def update_release_numbers(syn, database_mappingdf, release=None):
'''
Function that updates all release dashboard numbers or
specific release number
Args:
syn: synapse object
database_mappingdf: mapping between synapse ids and database
release: GENIE release (ie. 5.3-consortium). Defaults to None
'''
# Update release table with current release or all releases
samples_in_release_synid = database_mappingdf['Id'][
database_mappingdf['Database'] == 'samplesInRelease'].values[0]
cumulative_sample_count_synid = database_mappingdf['Id'][
database_mappingdf['Database'] == 'cumulativeSampleCount'].values[0]
release_folder_fileview_synid = database_mappingdf['Id'][
database_mappingdf['Database'] == 'releaseFolder'].values[0]
release_folder = syn.tableQuery(
"select id,name from %s" % release_folder_fileview_synid +
" where name not like 'Release%' and name <> 'case_lists' and " +
"name not like '%.0.%'")
release_folderdf = release_folder.asDataFrame()
for rel_synid, rel_name in zip(release_folderdf.id, release_folderdf.name):
file_mapping = get_file_mapping(syn, rel_synid)
# If release is specified, only process on that,
# otherwise process for all
if release is None or release == rel_name:
update_samples_in_release_table(
syn, file_mapping, rel_name, samples_in_release_synid)
update_cumulative_sample_table(
syn, file_mapping, rel_name, cumulative_sample_count_synid)
else:
pass
def update_database_numbers(syn, database_mappingdf):
'''
Updates database cumulative numbers (Only called when not staging)
Args:
syn: synapse object
database_mappingdf: mapping between synapse ids and database
'''
cumulative_sample_count_synid = database_mappingdf['Id'][
database_mappingdf['Database'] == 'cumulativeSampleCount'].values[0]
# Database
database_count = syn.tableQuery(
"SELECT * FROM %s where Release = 'Database'" %
cumulative_sample_count_synid)
database_countdf = database_count.asDataFrame()
clinical = syn.tableQuery('select CENTER from syn7517674')
clinicaldf = clinical.asDataFrame()
clinincal_counts = clinicaldf['CENTER'].value_counts()
clinincal_counts['Total'] = sum(clinincal_counts)
clinincal_counts.name = "Clinical"
fusion = syn.tableQuery('select * from syn7893268')
fusiondf = fusion.asDataFrame()
fusion_counts = fusiondf['CENTER'][
~fusiondf['TUMOR_SAMPLE_BARCODE'].duplicated()].value_counts()
fusion_counts['Total'] = sum(fusion_counts)
center_flat_files = syn.getChildren("syn12278118")
cna_file_paths = [syn.get(file['id']).path for file in center_flat_files if
file['name'].startswith("data_CNA")]
cna_numbers = {}
for cna_file in cna_file_paths:
center = os.path.basename(cna_file).replace(".txt", "").split("_")[2]
with open(cna_file, 'r') as cna:
header = cna.readline()
samples = header.split("\t")
# Minus one because of Hugo_Symbol
cna_numbers[center] = len(samples) - 1
cna_counts = pd.Series(cna_numbers)
cna_counts['Total'] = sum(cna_counts)
seg = syn.tableQuery('select * from syn7893341')
segdf = seg.asDataFrame()
seg_counts = segdf['CENTER'][~segdf['ID'].duplicated()].value_counts()
seg_counts['Total'] = sum(seg_counts)
db_counts = pd.DataFrame(clinincal_counts)
db_counts['Fusions'] = fusion_counts
db_counts['CNV'] = cna_counts
db_counts['Mutation'] = clinincal_counts
db_counts['SEG'] = seg_counts
db_counts = db_counts.fillna(0)
db_counts = db_counts.applymap(int)
db_counts['Center'] = db_counts.index
db_counts['Release'] = "Database"
genie.process_functions.updateDatabase(
syn, database_countdf, db_counts,
cumulative_sample_count_synid, ["Center", "Release"])
today = datetime.date.today()
if today.month in [1, 4, 8, 12]:
db_count_tracker = db_counts[['Clinical', 'Center', 'Release']]
db_count_tracker.rename(
columns={'Clinical': 'sample_count',
'Center': 'center',
'Release': 'date'},
inplace=True)
db_count_tracker['date'] = today.strftime("%b-%Y")
# Hard coded syn id
syn.store(synapseclient.Table("syn18404852", db_count_tracker))
def update_oncotree_code_tables(syn, database_mappingdf):
'''
Function that updates database statistics of oncotree codes
and primary onocotree codes
Args:
syn: synapse object
database_mappingdf: mapping between synapse ids and database
'''
oncotree_distribution_synid = database_mappingdf['Id'][
database_mappingdf['Database'] == 'oncotree'].values[0]
clinical = syn.tableQuery('select * from syn7517674')
clinicaldf = clinical.asDataFrame()
# DISTRIBUTION OF ONCOTREE CODE TABLE UPDATE
oncotree_code_distributiondf = pd.DataFrame(
columns=set(clinicaldf['CENTER']),
index=set(clinicaldf['ONCOTREE_CODE']))
for center in oncotree_code_distributiondf.columns:
onc_counts = clinicaldf['ONCOTREE_CODE'][
clinicaldf['CENTER'] == center].value_counts()
oncotree_code_distributiondf[center] = onc_counts
oncotree_code_distributiondf = oncotree_code_distributiondf.fillna(0)
oncotree_code_distributiondf = oncotree_code_distributiondf.applymap(int)
oncotree_code_distributiondf['Total'] = \
oncotree_code_distributiondf.apply(sum, axis=1)
oncotree_code_distributiondf['Oncotree_Code'] = \
oncotree_code_distributiondf.index
oncotree_distribution_db = syn.tableQuery(
'SELECT %s FROM %s' %
("Oncotree_Code," + ",".join(clinicaldf['CENTER'].unique()) +
",Total", oncotree_distribution_synid))
oncotree_distribution_dbdf = oncotree_distribution_db.asDataFrame()
genie.process_functions.updateDatabase(
syn, oncotree_distribution_dbdf, oncotree_code_distributiondf,
oncotree_distribution_synid, ["Oncotree_Code"], to_delete=True)
# DISTRIBUTION OF PRIMARY CODE TABLE UPDATE
oncotree_link_synid = database_mappingdf['Id'][
database_mappingdf['Database'] == 'oncotreeLink'].values[0]
primary_code_synId = database_mappingdf['Id'][
database_mappingdf['Database'] == 'primaryCode'].values[0]
'''
Can also use most up to date oncotree code,
because these tables are updated from the database
'''
oncotree_link_ent = syn.get(oncotree_link_synid)
oncotree_link = oncotree_link_ent.externalURL
oncotree_mapping = \
genie.process_functions.get_oncotree_code_mappings(oncotree_link)
clinicaldf['PRIMARY_CODES'] = \
[oncotree_mapping[i.upper()]['ONCOTREE_PRIMARY_NODE']
if i.upper() in oncotree_mapping.keys() else 'DEPRECATED_CODE'
for i in clinicaldf.ONCOTREE_CODE]
# ### DISTRIBUTION OF PRIMARY ONCOTREE CODE TABLE UPDATE
primary_code_distributiondf = pd.DataFrame(
columns=set(clinicaldf['CENTER']),
index=set(clinicaldf['PRIMARY_CODES']))
for center in primary_code_distributiondf.columns:
onc_counts = clinicaldf['PRIMARY_CODES'][
clinicaldf['CENTER'] == center].value_counts()
primary_code_distributiondf[center] = onc_counts
primary_code_distributiondf = primary_code_distributiondf.fillna(0)
primary_code_distributiondf = primary_code_distributiondf.applymap(int)
primary_code_distributiondf['Total'] = \
primary_code_distributiondf.apply(sum, axis=1)
primary_code_distributiondf['Oncotree_Code'] = \
primary_code_distributiondf.index
primary_code_dist_db = syn.tableQuery(
'SELECT %s FROM %s' %
("Oncotree_Code," + ",".join(clinicaldf['CENTER'].unique()) +
",Total", primary_code_synId))
primary_code_dist_dbdf = primary_code_dist_db.asDataFrame()
genie.process_functions.updateDatabase(
syn, primary_code_dist_dbdf, primary_code_distributiondf,
primary_code_synId, ["Oncotree_Code"], to_delete=True)
def update_sample_difference_table(syn, database_mappingdf):
'''
Function that updates sample difference table between
consortium releases
Args:
syn: synapse object
database_mappingdf: mapping between synapse ids and database
'''
cumulative_sample_count_synid = database_mappingdf['Id'][
database_mappingdf['Database'] == 'cumulativeSampleCount'].values[0]
sample_diff_count_synid = database_mappingdf['Id'][
database_mappingdf['Database'] == 'sampleDiffCount'].values[0]
# UPDATE DIFF TABLE
sample_count_per_round = syn.tableQuery(
"SELECT * FROM %s where Center <> 'Total' and Release <> 'Database'"
% cumulative_sample_count_synid)
sample_count_per_rounddf = sample_count_per_round.asDataFrame()
releases = list(sample_count_per_rounddf['Release'].unique())
# sort the releases and remove public releases
releases.sort()
consortium_releases = [
release for release in releases if "public" not in release
and ".0." not in release]
diff_between_releasesdf = sample_count_per_rounddf[
sample_count_per_rounddf['Release'] == consortium_releases[0]]
for index, release_name in enumerate(consortium_releases[1:]):
prior_release = sample_count_per_rounddf[
sample_count_per_rounddf['Release'] == consortium_releases[index]]
current_release = sample_count_per_rounddf[
sample_count_per_rounddf['Release'] == release_name]
prior_release.index = prior_release['Center']
current_release.index = current_release['Center']
del prior_release['Center']
del prior_release['Release']
del current_release['Center']
del current_release['Release']
# Append new rows of centers that are new and
# just added to the releases
new_centers = current_release.index[
~current_release.index.isin(prior_release.index)]
if not new_centers.empty:
prior_release = prior_release.append(
pd.DataFrame(index=new_centers))
prior_release = prior_release.fillna(0)
difference = current_release - prior_release
difference['Center'] = difference.index
difference['Release'] = release_name
diff_between_releasesdf = diff_between_releasesdf.append(difference)
difftable_db = syn.tableQuery('SELECT * FROM %s' % sample_diff_count_synid)
difftable_dbdf = difftable_db.asDataFrame()
difftable_dbdf = difftable_dbdf.fillna(0)
new_values = diff_between_releasesdf[[
'Clinical', 'Mutation',
'CNV', 'SEG', 'Fusions']].fillna(0).applymap(int)
diff_between_releasesdf[
['Clinical', 'Mutation', 'CNV', 'SEG', 'Fusions']] = new_values
genie.process_functions.updateDatabase(
syn, difftable_dbdf, diff_between_releasesdf,
sample_diff_count_synid, ["Center", "Release"], to_delete=True)
def update_data_completeness_table(syn, database_mappingdf):
'''
Function that updates the data completeness of the database
Args:
syn: synapse object
database_mappingdf: mapping between synapse ids and database
'''
data_completion_synid = database_mappingdf['Id'][
database_mappingdf['Database'] == 'dataCompletion'].values[0]
sample = syn.tableQuery('select * from syn7517674')
sampledf = sample.asDataFrame()
patient = syn.tableQuery('select * from syn7517669')
patientdf = patient.asDataFrame()
data_completenessdf = pd.DataFrame()
center_infos = sampledf.CENTER.drop_duplicates().apply(
lambda center: get_center_data_completion(center, sampledf))
for center_info in center_infos:
data_completenessdf = data_completenessdf.append(center_info)
center_infos = patientdf.CENTER.drop_duplicates().apply(
lambda center: get_center_data_completion(center, patientdf))
for center_info in center_infos:
data_completenessdf = data_completenessdf.append(center_info)
data_completeness_db = syn.tableQuery(
'select * from %s' % data_completion_synid)
data_completeness_dbdf = data_completeness_db.asDataFrame()
data_completenessdf.columns = data_completeness_dbdf.columns
genie.process_functions.updateDatabase(
syn,
data_completeness_dbdf,
data_completenessdf,
data_completion_synid,
["FIELD", "CENTER"],
to_delete=True)
def update_wiki(syn, database_mappingdf):
'''
Updates the GENIE project dashboard wiki timestamp
Args:
syn: synapse object
database_mappingdf: mapping between synapse ids and database
'''
# Updates to query and date dashboard was updated
cumulative_sample_count_synid = database_mappingdf['Id'][
database_mappingdf['Database'] == 'cumulativeSampleCount'].values[0]
primary_code_synId = database_mappingdf['Id'][
database_mappingdf['Database'] == 'primaryCode'].values[0]
centers = syn.tableQuery(
'select distinct(CENTER) as CENTER from syn7517674')
centersdf = centers.asDataFrame()
now = datetime.datetime.now()
markdown = \
["_Updated {month}/{day}/{year}_\n\n".format(
month=now.month,
day=now.day,
year=now.year),
"##Count of Clinical Samples\n",
"${synapsetable?query=SELECT Center%2C Clinical%2C Release FROM " +
cumulative_sample_count_synid + "}\n\n",
"\n\n##Primary Oncotree Codes\n\n",
"${synapsetable?query=SELECT Oncotree%5FCode%2C " +
"%2C ".join(centersdf['CENTER'].unique()) +
"%2C Total FROM " + primary_code_synId +
" ORDER BY Total DESC&limit=15}\n\n"]
wikiPage = syn.getWiki("syn3380222", 235803)
wikiPage.markdown = "".join(markdown)
syn.store(wikiPage)
def string_to_unix_epoch_time_milliseconds(string_time):
'''
This function takes dates in this format: 2018-10-25T20:16:07.959Z
and turns it into unix epoch time in milliseconds
Args:
string_time: string in this format: 2018-10-25T20:16:07.959Z
'''
datetime_obj = datetime.datetime.strptime(
string_time.split(".")[0], "%Y-%m-%dT%H:%M:%S")
return(synapseclient.utils.to_unix_epoch_time(datetime_obj))
def update_data_release_file_table(syn, database_mappingdf):
release_folder_fileview_synid = database_mappingdf['Id'][
database_mappingdf['Database'] == 'releaseFolder'].values[0]
release_folder = syn.tableQuery(
"select id,name from %s" % release_folder_fileview_synid +
" where name not like 'Release%' and name <> 'case_lists' " +
"and name not like '0.%'")
release_folderdf = release_folder.asDataFrame()
data_release_table_synid = "syn16804261"
data_release_table = syn.tableQuery(
"select * from %s" % data_release_table_synid)
data_release_tabledf = data_release_table.asDataFrame()
not_in_release_tabledf = release_folderdf[
~release_folderdf.name.isin(data_release_tabledf.release)]
for synid, name in \
zip(not_in_release_tabledf.id, not_in_release_tabledf.name):
release_files = syn.getChildren(synid)
append_rows = [
[release_file['name'],
release_file['id'],
name,
string_to_unix_epoch_time_milliseconds(
release_file['modifiedOn']), synid]
for release_file in release_files
if release_file['name'] != "case_lists"]
syn.store(synapseclient.Table(data_release_table_synid, append_rows))
def check_column_decreases(currentdf, olderdf):
"""
Check entity decreases
Args:
current_ent: Current entity dataframe
old_ent: Older entity dataframe
"""
diff_map = dict()
for col in currentdf:
new_counts = currentdf[col].value_counts()
if olderdf.get(col) is not None:
old_counts = olderdf[col].value_counts()
# Make sure any values that exist in the new get added
# to the old to show the decrease
new_keys = pd.Series(index=new_counts.keys()[
~new_counts.keys().isin(old_counts.keys())])
old_counts = old_counts.add(new_keys, fill_value=0)
old_counts.fillna(0, inplace=True)
# Make sure any values that don't exist in the old get added
# to show the decrease
new_keys = pd.Series(index=old_counts.keys()[
~old_counts.keys().isin(new_counts.keys())])
new_counts = new_counts.add(new_keys, fill_value=0)
new_counts.fillna(0, inplace=True)
if any(new_counts - old_counts < 0):
logger.info("\tDECREASE IN COLUMN: %s" % col)
# diff = new_counts[new_counts - old_counts < 0]
diffs = new_counts-old_counts
diffstext = diffs[diffs < 0].to_csv().replace("\n", "; ")
logger.info("\t" + diffstext)
diff_map[col] = True
else:
diff_map[col] = False
return(diff_map)
def print_clinical_values_difference_table(syn, database_mappingdf):
'''
Function that checks for a decrease in values in the clinical file
from last consortium release to most recent consortium release
Args:
syn: synapse object
database_mappingdf: mapping between synapse ids and database
'''
release_folder_fileview_synid = database_mappingdf['Id'][
database_mappingdf['Database'] == 'releaseFolder'].values[0]
clinical_key_decrease_synid = database_mappingdf['Id'][
database_mappingdf['Database'] == 'clinicalKeyDecrease'].values[0]
release_folder = syn.tableQuery(
"select id,name from %s" % release_folder_fileview_synid +
" where name not like 'Release%' and name <> 'case_lists' " +
"and name not like '%.0.%' and name not like '%-public'")
release_folderdf = release_folder.asDataFrame()
release_folderdf.sort_values("name", ascending=False, inplace=True)
current_release = release_folderdf['id'][0]
older_release = release_folderdf['id'][1]
current_release_files = syn.getChildren(current_release)
current_clinical_synids = {
file['name']: file['id']
for file in current_release_files if file['name'] in
['data_clinical_sample.txt', 'data_clinical_patient.txt']}
older_release_files = syn.getChildren(older_release)
older_clinical_synids = {
file['name']: file['id']
for file in older_release_files if file['name'] in
['data_clinical_sample.txt', 'data_clinical_patient.txt']}
current_sample_ent = syn.get(
current_clinical_synids['data_clinical_sample.txt'], followLink=True)
older_sample_ent = syn.get(
older_clinical_synids['data_clinical_sample.txt'], followLink=True)
current_sampledf = pd.read_csv(
current_sample_ent.path, sep="\t", comment="#")
current_sampledf['CENTER'] = [
patient.split("-")[1] for patient in current_sampledf['PATIENT_ID']]
older_sampledf = pd.read_csv(older_sample_ent.path, sep="\t", comment="#")
older_sampledf['CENTER'] = [
patient.split("-")[1] for patient in older_sampledf['PATIENT_ID']]
# Rather than take the CENTER, must take the SAMPLE_ID to compare
current_sampledf = current_sampledf[current_sampledf['SAMPLE_ID'].isin(
older_sampledf['SAMPLE_ID'].unique())]
logger.info("SAMPLE CLINICAL VALUE DECREASES")
center_decrease_mapping = dict()
for center in older_sampledf['CENTER'].unique():
current_center_sampledf = current_sampledf[
current_sampledf['CENTER'] == center]
older_center_sampledf = older_sampledf[
older_sampledf['CENTER'] == center]
logger.info(center)
decrease_map = check_column_decreases(
current_center_sampledf, older_center_sampledf)
center_decrease_mapping[center] = decrease_map
current_patient_ent = syn.get(
current_clinical_synids['data_clinical_patient.txt'], followLink=True)
older_patient_ent = syn.get(
older_clinical_synids['data_clinical_patient.txt'], followLink=True)
current_patientdf = pd.read_csv(
current_patient_ent.path, sep="\t", comment="#")
older_patientdf = pd.read_csv(
older_patient_ent.path, sep="\t", comment="#")
# Rather than take the CENTER, must take the PATIENT_ID to compare
current_patientdf = current_patientdf[current_patientdf['PATIENT_ID'].isin(
older_patientdf['PATIENT_ID'].unique())]
logger.info("PATIENT CLINICAL VALUE DECREASES")
for center in older_patientdf['CENTER'].unique():
current_center_patientdf = current_patientdf[
current_patientdf['CENTER'] == center]
older_center_patientdf = older_patientdf[
older_patientdf['CENTER'] == center]
logger.info(center)
patient_decrease_map = check_column_decreases(
current_center_patientdf, older_center_patientdf)
center_decrease_mapping[center].update(patient_decrease_map)
center_decrease_mapping = | pd.DataFrame(center_decrease_mapping) | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
repr(float_frame)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a
tm.assert_frame_equal(result, df)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=range(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
sliced.loc[:, "C"] = 4.0
assert (float_frame["C"] == 4).all()
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
tm.assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=["B", "D"])
result = ix[f.index[5:10], [False, True, False, True]]
tm.assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, :]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
frame = float_frame.copy()
expected = float_frame.copy()
mask = frame["A"] > 0
frame.loc[mask] = 0.0
expected.values[mask.values] = 0.0
tm.assert_frame_equal(frame, expected)
frame = float_frame.copy()
expected = float_frame.copy()
frame.loc[mask, ["A", "B"]] = 0.0
expected.values[mask.values, :2] = 0.0
tm.assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self, float_frame):
result = float_frame.iloc[[1, 4, 7]]
expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
tm.assert_frame_equal(result, expected)
result = float_frame.iloc[:, [2, 0, 1]]
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
mask = float_frame["A"][::-1] > 1
result = float_frame.loc[mask]
expected = float_frame.loc[mask[::-1]]
tm.assert_frame_equal(result, expected)
cp = float_frame.copy()
expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
tm.assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
tm.assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]), np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
tm.assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
tm.assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
tm.assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
tm.assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
msg = (
"cannot do positional indexing on Float64Index with "
r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
with pytest.raises(TypeError, match=_slice_msg):
cp.iloc[1.0:5] = 0
with pytest.raises(TypeError, match=msg):
result = cp.iloc[1.0:5] == 0
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
tm.assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["foo", "bar", "baz"],
)
df["timestamp"] = Timestamp("20010102")
# check our dtypes
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 3 + [np.dtype("datetime64[ns]")],
index=["foo", "bar", "baz", "timestamp"],
)
tm.assert_series_equal(result, expected)
# GH#16674 iNaT is treated as an integer when given by the user
df.loc["b", "timestamp"] = iNaT
assert not isna(df.loc["b", "timestamp"])
assert df["timestamp"].dtype == np.object_
assert df.loc["b", "timestamp"] == iNaT
# allow this syntax (as of GH#3216)
df.loc["c", "timestamp"] = np.nan
assert isna(df.loc["c", "timestamp"])
# allow this syntax
df.loc["d", :] = np.nan
assert not isna(df.loc["c", :]).all()
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame(
{
"a": [0, 0, 0, 0, 13, 14],
"b": [
datetime(2012, 1, 1),
1,
"x",
"y",
datetime(2013, 1, 1),
datetime(2014, 1, 1),
],
}
)
df = DataFrame(0, columns=list("ab"), index=range(6))
df["b"] = pd.NaT
df.loc[0, "b"] = datetime(2012, 1, 1)
df.loc[1, "b"] = 1
df.loc[[2, 3], "b"] = "x", "y"
A = np.array(
[
[13, np.datetime64("2013-01-01T00:00:00")],
[14, np.datetime64("2014-01-01T00:00:00")],
]
)
df.loc[[4, 5], ["a", "b"]] = A
tm.assert_frame_equal(df, expected)
def test_setitem_frame_float(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_setitem_frame_mixed(self, float_string_frame):
# GH 3216
# already aligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"]
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame):
# GH#3216 rows unaligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
index=list(f.index[0:2]) + ["foo", "bar"],
columns=["A", "B"],
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(
f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2]
)
def test_setitem_frame_mixed_key_unaligned(self, float_string_frame):
# GH#3216 key is unaligned with values
f = float_string_frame.copy()
piece = f.loc[f.index[:2], ["A"]]
piece.index = f.index[-2:]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece
piece["B"] = np.nan
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_ndarray(self, float_string_frame):
# GH#3216 ndarray
f = float_string_frame.copy()
piece = float_string_frame.loc[f.index[:2], ["A", "B"]]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece.values
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
tm.assert_frame_equal(df2, expected)
def test_setitem_frame_align(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
piece.index = float_frame.index[-2:]
piece.columns = ["A", "B"]
float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc["foo"]
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.loc["bar"]
expected = df.iloc[[2, 4]]
tm.assert_frame_equal(result, expected)
result = df.loc["baz"]
expected = df.iloc[3]
tm.assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc[["bar"]]
exp = df.iloc[[2, 4]]
tm.assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
tm.assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("bool_value", [True, False])
def test_getitem_setitem_ix_bool_keyerror(self, bool_value):
# #2199
df = DataFrame({"a": [1, 2, 3]})
message = f"{bool_value}: boolean label can not be used without a boolean index"
with pytest.raises(KeyError, match=message):
df.loc[bool_value]
msg = "cannot use a single bool to index into setitem"
with pytest.raises(KeyError, match=msg):
df.loc[bool_value] = 0
# TODO: rename? remove?
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
assert issubclass(float_frame["E"].dtype.type, (int, np.integer))
result = float_frame.loc[float_frame.index[5], "E"]
assert is_integer(result)
# GH 11617
df = DataFrame({"a": [1.23]})
df["b"] = 666
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name="b")
result = df.loc[[0], "b"]
tm.assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
tm.assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_row_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
original = df.copy()
# verify slice is view
# setting it makes it raise/warn
subset = df.iloc[slice(4, 8)]
assert np.shares_memory(df[2], subset[2])
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 2] = 0.0
exp_col = original[2].copy()
# TODO(ArrayManager) verify it is expected that the original didn't change
if not using_array_manager:
exp_col[4:8] = 0.0
tm.assert_series_equal(df[2], exp_col)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
tm.assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_col_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
original = df.copy()
subset = df.iloc[:, slice(4, 8)]
if not using_array_manager:
# verify slice is view
assert np.shares_memory(df[8]._values, subset[8]._values)
# and that we are setting a copy
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 8] = 0.0
assert (df[8] == 0).all()
else:
# TODO(ArrayManager) verify this is the desired behaviour
subset[8] = 0.0
# subset changed
assert (subset[8] == 0).all()
# but df itself did not change (setitem replaces full column)
tm.assert_frame_equal(df, original)
def test_loc_duplicates(self):
# gh-17105
# insert a duplicate element to the index
trange = date_range(
start=Timestamp(year=2017, month=1, day=1),
end=Timestamp(year=2017, month=1, day=5),
)
trange = trange.insert(loc=5, item=Timestamp(year=2017, month=1, day=5))
df = DataFrame(0, index=trange, columns=["A", "B"])
bool_idx = np.array([False, False, False, False, False, True])
# assignment
df.loc[trange[bool_idx], "A"] = 6
expected = DataFrame(
{"A": [0, 0, 0, 0, 6, 6], "B": [0, 0, 0, 0, 0, 0]}, index=trange
)
tm.assert_frame_equal(df, expected)
# in-place
df = DataFrame(0, index=trange, columns=["A", "B"])
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = Series(date_range("2015-01-01", periods=3, tz="utc"), name="dates")
df = DataFrame({"dates": column})
df["dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
df = DataFrame({"dates": column})
df.loc[[0, 1, 2], "dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
def test_loc_setitem_datetimelike_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range("20130101", periods=4))
df["A"] = np.array([1 * one_hour] * 4, dtype="m8[ns]")
df.loc[:, "B"] = np.array([2 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "C"] = np.array([3 * one_hour] * 3, dtype="m8[ns]")
df.loc[:, "D"] = np.array([4 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "E"] = np.array([5 * one_hour] * 3, dtype="m8[ns]")
df["F"] = np.timedelta64("NaT")
df.loc[df.index[:-1], "F"] = np.array([6 * one_hour] * 3, dtype="m8[ns]")
df.loc[df.index[-3] :, "G"] = date_range("20130101", periods=3)
df["H"] = np.datetime64("NaT")
result = df.dtypes
expected = Series(
[np.dtype("timedelta64[ns]")] * 6 + [np.dtype("datetime64[ns]")] * 2,
index=list("ABCDEFGH"),
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_indexing_mixed(self):
df = DataFrame(
{
0: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
1: {
35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139,
},
2: {
35: np.nan,
40: np.nan,
43: 0.29012581014105987,
49: np.nan,
50: np.nan,
},
3: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
4: {
35: 0.34215328467153283,
40: np.nan,
43: np.nan,
49: np.nan,
50: np.nan,
},
"y": {35: 0, 40: 0, 43: 0, 49: 0, 50: 1},
}
)
# mixed int/float ok
df2 = df.copy()
df2[df2 > 0.3] = 1
expected = df.copy()
expected.loc[40, 1] = 1
expected.loc[49, 1] = 1
expected.loc[50, 1] = 1
expected.loc[35, 4] = 1
tm.assert_frame_equal(df2, expected)
df["foo"] = "test"
msg = "not supported between instances|unorderable types"
with pytest.raises(TypeError, match=msg):
df[df > 0.3] = 1
def test_type_error_multiindex(self):
# See gh-12218
mi = MultiIndex.from_product([["x", "y"], [0, 1]], names=[None, "c"])
dg = DataFrame(
[[1, 1, 2, 2], [3, 3, 4, 4]], columns=mi, index=Index([0, 1], name="i")
)
with pytest.raises(InvalidIndexError, match="slice"):
dg[:, 0]
index = Index(range(2), name="i")
columns = MultiIndex(
levels=[["x", "y"], [0, 1]], codes=[[0, 1], [0, 0]], names=[None, "c"]
)
expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)
result = dg.loc[:, (slice(None), 0)]
tm.assert_frame_equal(result, expected)
name = ("x", 0)
index = Index(range(2), name="i")
expected = Series([1, 3], index=index, name=name)
result = dg["x", 0]
tm.assert_series_equal(result, expected)
def test_getitem_interval_index_partial_indexing(self):
# GH#36490
df = DataFrame(
np.ones((3, 4)), columns=pd.IntervalIndex.from_breaks(np.arange(5))
)
expected = df.iloc[:, 0]
res = df[0.5]
tm.assert_series_equal(res, expected)
res = df.loc[:, 0.5]
tm.assert_series_equal(res, expected)
def test_setitem_array_as_cell_value(self):
# GH#43422
df = DataFrame(columns=["a", "b"], dtype=object)
df.loc[0] = {"a": np.zeros((2,)), "b": np.zeros((2, 2))}
expected = DataFrame({"a": [np.zeros((2,))], "b": [np.zeros((2, 2))]})
tm.assert_frame_equal(df, expected)
# with AM goes through split-path, loses dtype
@td.skip_array_manager_not_yet_implemented
def test_iloc_setitem_nullable_2d_values(self):
df = DataFrame({"A": [1, 2, 3]}, dtype="Int64")
orig = df.copy()
df.loc[:] = df.values[:, ::-1]
tm.assert_frame_equal(df, orig)
df.loc[:] = pd.core.arrays.PandasArray(df.values[:, ::-1])
tm.assert_frame_equal(df, orig)
df.iloc[:] = df.iloc[:, :]
tm.assert_frame_equal(df, orig)
@pytest.mark.parametrize(
"null", [pd.NaT, pd.NaT.to_numpy("M8[ns]"), pd.NaT.to_numpy("m8[ns]")]
)
def test_setting_mismatched_na_into_nullable_fails(
self, null, any_numeric_ea_dtype
):
# GH#44514 don't cast mismatched nulls to pd.NA
df = DataFrame({"A": [1, 2, 3]}, dtype=any_numeric_ea_dtype)
ser = df["A"]
arr = ser._values
msg = "|".join(
[
r"int\(\) argument must be a string, a bytes-like object or a "
"(real )?number, not 'NaTType'",
r"timedelta64\[ns\] cannot be converted to an? (Floating|Integer)Dtype",
r"datetime64\[ns\] cannot be converted to an? (Floating|Integer)Dtype",
"object cannot be converted to a FloatingDtype",
"'values' contains non-numeric NA",
]
)
with pytest.raises(TypeError, match=msg):
arr[0] = null
with pytest.raises(TypeError, match=msg):
arr[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
ser[0] = null
with pytest.raises(TypeError, match=msg):
ser[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
ser.iloc[0] = null
with pytest.raises(TypeError, match=msg):
ser.iloc[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
df.iloc[0, 0] = null
with pytest.raises(TypeError, match=msg):
df.iloc[:2, 0] = [null, null]
# Multi-Block
df2 = df.copy()
df2["B"] = ser.copy()
with pytest.raises(TypeError, match=msg):
df2.iloc[0, 0] = null
with pytest.raises(TypeError, match=msg):
df2.iloc[:2, 0] = [null, null]
def test_loc_expand_empty_frame_keep_index_name(self):
# GH#45621
df = DataFrame(columns=["b"], index=Index([], name="a"))
df.loc[0] = 1
expected = DataFrame({"b": [1]}, index=Index([0], name="a"))
tm.assert_frame_equal(df, expected)
def test_loc_expand_empty_frame_keep_midx_names(self):
# GH#46317
df = DataFrame(
columns=["d"], index=MultiIndex.from_tuples([], names=["a", "b", "c"])
)
df.loc[(1, 2, 3)] = "foo"
expected = DataFrame(
{"d": ["foo"]},
index=MultiIndex.from_tuples([(1, 2, 3)], names=["a", "b", "c"]),
)
tm.assert_frame_equal(df, expected)
class TestDataFrameIndexingUInt64:
def test_setitem(self, uint64_frame):
df = uint64_frame
idx = df["A"].rename("foo")
# setitem
assert "C" not in df.columns
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
assert "D" not in df.columns
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# With NaN: because uint64 has no NaN element,
# the column should be cast to object.
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(
df2.dtypes,
Series(
[np.dtype("uint64"), np.dtype("O"), np.dtype("O")],
index=["A", "B", "C"],
),
)
def test_object_casting_indexing_wraps_datetimelike(using_array_manager):
# GH#31649, check the indexing methods all the way down the stack
df = DataFrame(
{
"A": [1, 2],
"B": date_range("2000", periods=2),
"C": pd.timedelta_range("1 Day", periods=2),
}
)
ser = df.loc[0]
assert isinstance(ser.values[1], Timestamp)
assert isinstance(ser.values[2], pd.Timedelta)
ser = df.iloc[0]
assert isinstance(ser.values[1], Timestamp)
assert isinstance(ser.values[2], pd.Timedelta)
ser = df.xs(0, axis=0)
assert isinstance(ser.values[1], Timestamp)
assert isinstance(ser.values[2], pd.Timedelta)
if using_array_manager:
# remainder of the test checking BlockManager internals
return
mgr = df._mgr
mgr._rebuild_blknos_and_blklocs()
arr = mgr.fast_xs(0)
assert isinstance(arr[1], Timestamp)
assert isinstance(arr[2], pd.Timedelta)
blk = mgr.blocks[mgr.blknos[1]]
assert blk.dtype == "M8[ns]" # we got the right block
val = blk.iget((0, 0))
assert isinstance(val, Timestamp)
blk = mgr.blocks[mgr.blknos[2]]
assert blk.dtype == "m8[ns]" # we got the right block
val = blk.iget((0, 0))
assert isinstance(val, pd.Timedelta)
msg1 = r"Cannot setitem on a Categorical with a new category( \(.*\))?, set the"
msg2 = "Cannot set a Categorical with another, without identical categories"
class TestLocILocDataFrameCategorical:
@pytest.fixture
def orig(self):
cats = Categorical(["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = DataFrame({"cats": cats, "values": values}, index=idx)
return orig
@pytest.fixture
def exp_single_row(self):
# The expected values if we change a single row
cats1 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = DataFrame({"cats": cats1, "values": values1}, index=idx1)
return exp_single_row
@pytest.fixture
def exp_multi_row(self):
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# changed multiple rows
cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = DataFrame({"cats": cats2, "values": values2}, index=idx2)
return exp_multi_row
@pytest.fixture
def exp_parts_cats_col(self):
# changed part of the cats column
cats3 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = DataFrame({"cats": cats3, "values": values3}, index=idx3)
return exp_parts_cats_col
@pytest.fixture
def exp_single_cats_value(self):
# changed single value in cats col
cats4 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = DataFrame(
{"cats": cats4, "values": values4}, index=idx4
)
return exp_single_cats_value
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])
def test_loc_iloc_setitem_list_of_lists(self, orig, exp_multi_row, indexer):
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
key = slice(2, 4)
if indexer is tm.loc:
key = slice("j", "k")
indexer(df)[key, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
df = orig.copy()
with pytest.raises(TypeError, match=msg1):
indexer(df)[key, :] = [["c", 2], ["c", 2]]
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc, tm.at, tm.iat])
def test_loc_iloc_at_iat_setitem_single_value_in_categories(
self, orig, exp_single_cats_value, indexer
):
# - assign a single value -> exp_single_cats_value
df = orig.copy()
key = (2, 0)
if indexer in [tm.loc, tm.at]:
key = (df.index[2], df.columns[0])
# "b" is among the categories for df["cat"}]
indexer(df)[key] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# "c" is not among the categories for df["cat"]
with pytest.raises(TypeError, match=msg1):
indexer(df)[key] = "c"
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])
def test_loc_iloc_setitem_mask_single_value_in_categories(
self, orig, exp_single_cats_value, indexer
):
# mask with single True
df = orig.copy()
mask = df.index == "j"
key = 0
if indexer is tm.loc:
key = df.columns[key]
indexer(df)[mask, key] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])
def test_loc_iloc_setitem_full_row_non_categorical_rhs(
self, orig, exp_single_row, indexer
):
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
key = 2
if indexer is tm.loc:
key = df.index[2]
# not categorical dtype, but "b" _is_ among the categories for df["cat"]
indexer(df)[key, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# "c" is not among the categories for df["cat"]
with pytest.raises(TypeError, match=msg1):
indexer(df)[key, :] = ["c", 2]
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])
def test_loc_iloc_setitem_partial_col_categorical_rhs(
self, orig, exp_parts_cats_col, indexer
):
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
key = (slice(2, 4), 0)
if indexer is tm.loc:
key = (slice("j", "k"), df.columns[0])
# same categories as we currently have in df["cats"]
compat = Categorical(["b", "b"], categories=["a", "b"])
indexer(df)[key] = compat
tm.assert_frame_equal(df, exp_parts_cats_col)
# categories do not match df["cat"]'s, but "b" is among them
semi_compat = Categorical(list("bb"), categories=list("abc"))
with pytest.raises(TypeError, match=msg2):
# different categories but holdable values
# -> not sure if this should fail or pass
indexer(df)[key] = semi_compat
# categories do not match df["cat"]'s, and "c" is not among them
incompat = Categorical(list("cc"), categories=list("abc"))
with pytest.raises(TypeError, match=msg2):
# different values
indexer(df)[key] = incompat
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])
def test_loc_iloc_setitem_non_categorical_rhs(
self, orig, exp_parts_cats_col, indexer
):
# assign a part of a column with dtype != categorical -> exp_parts_cats_col
df = orig.copy()
key = (slice(2, 4), 0)
if indexer is tm.loc:
key = (slice("j", "k"), df.columns[0])
# "b" is among the categories for df["cat"]
indexer(df)[key] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
# "c" not part of the categories
with pytest.raises(TypeError, match=msg1):
indexer(df)[key] = ["c", "c"]
def test_loc_on_multiindex_one_level(self):
# GH#45779
df = DataFrame(
data=[[0], [1]],
index=MultiIndex.from_tuples([("a",), ("b",)], names=["first"]),
)
expected = DataFrame(
data=[[0]], index=MultiIndex.from_tuples([("a",)], names=["first"])
)
result = df.loc["a"]
tm.assert_frame_equal(result, expected)
class TestDepreactedIndexers:
@pytest.mark.parametrize(
"key", [{1}, {1: 1}, ({1}, "a"), ({1: 1}, "a"), (1, {"a"}), (1, {"a": "a"})]
)
def test_getitem_dict_and_set_deprecated(self, key):
# GH#42825
df = | DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) | pandas.DataFrame |
"""
Extracts and plots data from an hdf5 file containing peridynamic node information.
The hdf5 (h5) fields are assumed to be formatted as numpy arrays with dimensions of
[timestep, node], with higher-dimensional data having additional array dimensions. The
available datasets and how to access them are defined in OUTPUT_DICT.
The default plot properties are defined by the dictionaries VIEWPOINT and WINDOW. These
properties are altered according to the command line arguments in apply_plot_options().
The available command line arguments are defined in PARSER_DICT. Additional command line
arguments can be created by adding to PARSER_DICT, following the existing format. The
values in OUTPUT_DICT are lists of the arguments used by the parser to activate the
options.
Requires:
mayavi
optparse
os
sys
"""
from argparse import ArgumentParser
from pathlib import Path
import numpy as np
import pandas as pd
from gooey import Gooey, GooeyParser
from mayavi import mlab
#########################################################################################
# PARAMETERS #
#########################################################################################
DEBUG = True
# fmt: off
PARSER_DICT = {
# Label Command line inputs Dest Type Action Default Help
"filename": ["filename", None, None, str, "store", "simulation.h5", "Path and name of the h5 file to be visualized."],
"extents": ["-e", "--extents", "extents", str, "store", None, "Spatial extents of included points [-x, +x, -y, +y, -z, +z]. Write 'inf' to specify infinity."],
"selection": ["-s", "--show", "selection", str, "store", "dmg", "Dataset selected for viewing, chosen from the labels in OUTPUT_DICT."],
"grid_spacing": ["-d", "--grid_spacing", "gs", float, "store", 0.5, "Grid spacing of dataset. Sets size of datapoints."],
"write_image": ["-i", "--write", "write_image", None, "store_true", None, "Whether plot will be saved to file. If so, the plot will not appear on screen."],
"min_plot": ["-m", "--min_plot", "min_plot", float, "store", None, "Minimum displayed output value. Any datapoints below this are omitted."],
"max_plot": ["-M", "--max_plot", "max_plot", float, "store", None, "Maximum displayed output value. Any datapoints above this are omitted."],
"min_legend": ["-l", "--min_legend", "min_legend", float, "store", None, "Minimum color scale value. Datapoints below this are the same color."],
"max_legend": ["-L", "--max_legend", "max_legend", float, "store", None, "Maximum color scale value. Datapoints above this are the same color."],
"exaggeration": ["-x", "--exag", "exag", float, "store", 0.0, "Displacement exaggeration factor. 0.0 plots reference configuration."],
"timestep": ["-t", "--timestep", "timestep_output", int, "store", 1, "Timestep number to be viewed."],
"greyscale": ["-g", "--greyscale", "greyscale", None, "store_true", None, "Whether colorscale is greyscale (`Greys`), instead of blue-red"],
"view": ["-v", "--view", "view", str, "store", None, "[unusable] View angle, one of (x+, x-, y+, y-, z+, z-, iso)"],
"scalebar": ["-b", "--scalebar", "scalebar", None, "store_false",None, "Whether to enable the scalebar (scalebar enabled by default)"],
"list": ["-p", "--print", "print", None, "store_true", None, "Print information about the data available in the specified file and exit."]
}
# fmt: on
# VIEWS_DICT = {
# 'x+': mlab.gcf().scene.x_plus_view,
# 'x-': mlab.gcf().scene.x_minus_view,
# 'y+': mlab.gcf().scene.y_plus_view,
# 'y-': mlab.gcf().scene.y_minus_view,
# 'z+': mlab.gcf().scene.z_plus_view,
# 'z-': mlab.gcf().scene.z_minus_view,
# 'iso': mlab.gcf().scene.isometric_view,
# 'def': mlab.gcf().scene.isometric_view # Default for incorrect input
# }
VIEWPOINT = { # Viewpoint properties
"view": None,
"azimuth": 0,
"elevation": 0,
"roll": None,
"distance": None,
"focalpoint": (0, 0, 0),
"parallel_projection": True,
"parallel_scale": 3.8,
}
WINDOW = { # Window properties
"offscreen": False,
"size": (1400, 600),
"bgcolor": (1, 1, 1),
"fgcolor": (0, 0, 0),
"show_axes": True,
"colormap": "blue-red",
}
#########################################################################################
# FUNCTIONS #
#########################################################################################
@Gooey(use_cmd_args=True, show_success_modal=False, header_height=20)
def parse_options(parser_dict):
"""Define and parse command line options according to dictionary containing option
data.
Args:
parser_dict (dict): Dictionary containing necessary option parser information
Returns:
options (dict): Dictionary containing parsed or default option values
"""
parser = GooeyParser()
for option in parser_dict.values():
if not option[0].startswith("-"):
parser.add_argument(
option[0],
type=option[3],
action=option[4],
default=option[5],
help=option[6],
widget="FileChooser",
)
elif option[4] not in ["store_true", "store_false"]:
parser.add_argument(
option[0],
option[1],
dest=option[2],
type=option[3],
action=option[4],
default=option[5],
help=option[6],
)
else:
parser.add_argument(
option[0], option[1], dest=option[2], action=option[4], help=option[6],
)
args = parser.parse_args()
return args.__dict__
def options_convert_extents(options):
"""Convert extents string from input to list.
If options has a field 'extents' and 'extents' is not nothing, convert it to a list
Args:
options (dict): Dictionary containing extent field to be converted (if it exists)
Return:
options (dict): Dictionary containing converted extent field (if it exists)
"""
if options["extents"]:
options["extents"] = eval(options["extents"])
else:
options["extents"] = [-np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf]
return options
def open_h5(filename):
"""Open h5 file and return root directory structure
Args:
filename (str): name of h5 file to be read
Returns:
(h5py File): h5 data structure with fields accessible [by dot notation] TODO
"""
return pd.read_hdf(filename, key="data", mode="r")
def set_limits(output, plot_limits=[None, None], legend_limits=[None, None]):
"""Set plot and legend limits according to inputs.
If limits are input and valid, return them. Otherwise, set them to the according
limit in the data. If limits are input and out of order, reverse them.
Args:
output (np array, nx1): output dataset being plotted
plot_limits (float, 2x1 list, optional): [min, max] limits of values that will be
displayed in plot
legend_limits (float, 2x1 list, optional): [min, max] limits of colorbar scale
Returns:
plot_limits (float, 2x1 list): [min, max] limits of plotted values
legend_limits (float, 2x1 list): [min, max] limits of colorbar scale
"""
if all(plot_limits) and plot_limits[0] > plot_limits[1]:
plot_limits.reverse()
if all(legend_limits) and legend_limits[0] > legend_limits[1]:
legend_limits.reverse()
if plot_limits[0] is None:
plot_limits[0] = min(output)
if plot_limits[1] is None:
plot_limits[1] = max(output)
if legend_limits[0] is None:
legend_limits[0] = min(output)
if legend_limits[1] is None:
legend_limits[1] = max(output)
return plot_limits, legend_limits
def apply_plot_options(options, viewpoint, window):
"""Adjust viewpoint and window properties according to input options.
Args:
options (dict): Dictionary containing parsed or default option values
viewpoint (dict): dictionary containing viewpoint properties (orientation, scale)
window (dict): dictionary containing window properties (visibility, size, color)
Returns:
viewpoint (dict): dictionary containing viewpoint properties after changes
window (dict): dictionary containing window properties after changes
"""
if options["write_image"]:
window["offscreen"] = True
if options["greyscale"]:
window["colormap"] = "Greys"
# if options['view'] is not None:
# if DEBUG: print('DEBUG: Storing view as', options['view'])
# viewpoint['view'] = VIEWS_DICT.get(options['view'], 'def')
# if DEBUG: print('DEBUG: View stored as', type(viewpoint['view']), viewpoint['view'])
return viewpoint, window
def set_image_filename(h5_filename, timestep_output, selection, exag):
"""Set the filename of the saved image based on h5 name, timestep, and output
selection.
Args:
h5_filename (str): name of h5 file
timestep (int/float): timestep being viewed
selection (str): selected output dataset
exag (float): exaggeration factor
Returns:
image_filename (str): name of image to be saved
"""
h5path = Path(h5_filename)
primer_path = h5path.parent
case_name = h5path.stem
folder_name = primer_path / f"{selection}_{exag}"
folder_name.mkdir(exist_ok=True)
return folder_name / f"{case_name}_{timestep_output:06d}.png"
def plot_data(datapoints, plot_options, viewpoint, window, image_filename="image.png"):
"""Plot the processed data.
Given the data in "datapoints", plot it in the window defined by the properties in
"window", viewing the data as defined in "viewpoint". If specified, save the plot
with the specified (or default) filename.
Args:
datapoints (dict): dictionary containing the datapoint coordinates, values, and
scale, and the colorbar limits
viewpoint (dict): dictionary containing viewpoint properties (orientation, scale)
window (dict): dictionary containing window properties (visibility, size, color)
image_filename (str, optional): name of saved plot image
"""
mlab.options.offscreen = window["offscreen"] # Set on- or off-screen visibility
mlab.figure(
size=window["size"], # Open figure with size and colors
bgcolor=window["bgcolor"], #
fgcolor=window["fgcolor"],
) #
mlab.gcf().scene.parallel_projection = viewpoint[
"parallel_projection"
] # Set parallel projection state
high = mlab.points3d(
datapoints["x1"] + datapoints["u1"] * plot_options["exag"],
datapoints["x2"] + datapoints["u2"] * plot_options["exag"],
datapoints["x3"] + datapoints["u3"] * plot_options["exag"],
datapoints[plot_options["data_name"]],
scale_factor=plot_options["scale"],
vmin=plot_options["legend_limits"][0],
vmax=plot_options["legend_limits"][1],
scale_mode="none",
mode="cube",
reset_zoom=False,
resolution=5,
colormap=window["colormap"],
)
if viewpoint["view"] is not None:
if DEBUG:
print("DEBUG: Setting view by preset")
# viewpoint['view']()
elif viewpoint["azimuth"] is not None:
if DEBUG:
print("DEBUG: Setting view by angles")
mlab.view(
azimuth=viewpoint["azimuth"], # Set camera location
elevation=viewpoint["elevation"], #
roll=viewpoint["roll"], #
distance=viewpoint["distance"], #
focalpoint=viewpoint["focalpoint"],
) #
if options["scalebar"]:
mlab.scalarbar(orientation="vertical") # Enable scalebar (vertical)
mlab.gcf().scene.camera.parallel_scale = viewpoint[
"parallel_scale"
] # Set view scale
if not window["offscreen"]:
high.scene.show_axes = window["show_axes"] # Set axes triad visibility
mlab.show()
else:
mlab.savefig(image_filename)
print("Saved", image_filename)
mlab.close()
def list_h5_data(h5: pd.DataFrame) -> None:
print(f"Max node number: {h5.index.max()[1]}")
print(f"Available time steps: \n\t{list(h5.index.levels[0])}")
print(f"Available data fields: \n\t{list(h5.columns)}")
#########################################################################################
# EXECUTION #
#########################################################################################
# Read and condition command line arguments
options = parse_options(PARSER_DICT)
options = options_convert_extents(options)
h5_filename = options["filename"]
# Read data from h5 file
data = open_h5(h5_filename)
if options["print"]:
list_h5_data(data)
exit(0)
# Extract desired datasets
coords = data.loc[options["timestep_output"], ("x1", "x2", "x3")]
disp = data.loc[options["timestep_output"], ("u1", "u2", "u3")]
output = data.loc[options["timestep_output"], options["selection"]]
extents = options["extents"]
extent_mask = (
(coords["x1"] >= extents[0])
& (coords["x1"] <= extents[1])
& (coords["x2"] >= extents[2])
& (coords["x2"] <= extents[3])
& (coords["x3"] >= extents[4])
& (coords["x3"] <= extents[5])
)
plot_limits, legend_limits = set_limits(
output,
[options["min_plot"], options["max_plot"]],
[options["min_legend"], options["max_legend"]],
)
value_mask = (output >= plot_limits[0]) & (output <= plot_limits[1])
datapoints = | pd.concat([coords, disp, output], axis=1) | pandas.concat |
from datetime import datetime, time, date
from functools import partial
from dateutil import relativedelta
import calendar
from pandas import DateOffset, datetools, DataFrame, Series, Panel
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.resample import _get_range_edges
from pandas.core.groupby import DataFrameGroupBy, PanelGroupBy, BinGrouper
from pandas.tseries.resample import TimeGrouper
from pandas.tseries.offsets import Tick
from pandas.tseries.frequencies import _offset_map, to_offset
import pandas.lib as lib
import numpy as np
from trtools.monkey import patch, patch_prop
def _is_tick(offset):
return isinstance(offset, Tick)
## TODO See if I still need this. All this stuff was pre resample
def first_day(year, month, bday=True):
"""
Return first day of month. Default to business days
"""
weekday, days_in_month = calendar.monthrange(year, month)
if not bday:
return 1
if weekday <= 4:
return 1
else:
return 7-weekday+1
class MonthStart(DateOffset):
"""
Really the point of this is for DateRange, creating
a range where the month is anchored on day=1 and not the end
"""
def apply(self, other):
first = first_day(other.year, other.month)
if other.day == first:
result = other + relativedelta.relativedelta(months=1)
result = result.replace(day=first_day(result.year, result.month))
else:
result = other.replace(day=first)
return datetime(result.year, result.month, result.day)
def onOffset(self, someDate):
return someDate.day == first_day(someDate.year, someDate.month)
def daily_group(df):
daterange_func = partial(DatetimeIndex, freq=datetools.day)
return down_sample(df, daterange_func)
def weekly_group(df):
daterange_func = partial(DatetimeIndex, freq="W@MON")
return down_sample(df, daterange_func)
def monthly_group(df):
daterange_func = partial(DatetimeIndex, freq=MonthStart())
return down_sample(df, daterange_func)
def down_sample(obj, daterange_func):
if isinstance(obj, Panel):
index = obj.major_axis
else:
index = obj.index
start = datetime.combine(index[0].date(), time(0))
end = datetime.combine(index[-1].date(), time(0))
range = daterange_func(start=start, end=end)
grouped = obj.groupby(range.asof)
grouped._range = range
return grouped
# END TODO
def cols(self, *args):
return self.xs(list(args), axis=1)
def dropna_get(x, pos):
try:
return x.dropna().iget(pos)
except:
return None
def aggregate_picker(grouped, grouped_indices, col=None):
"""
In [276]: g.agg(np.argmax).high
Out[276]:
key_0
2007-04-27 281
2007-04-30 0
2007-05-01 5
2007-05-02 294
2007-05-03 3
2007-05-04 53
Should take something in that form and return a DataFrame with the proper date indexes and values...
"""
index = []
values = []
for key, group in grouped:
if col:
group = group[col]
sub_index = grouped_indices[key]
index.append(group.index[sub_index])
values.append(group.iget_value(sub_index))
return {'index':index, 'values':values}
# old version
def _kv_agg(grouped, func, col=None):
"""
Works like agg but returns index label and value for each hit
"""
if col:
sub_indices = grouped.agg({col: func})[col]
else:
sub_indices = grouped.agg(func)
data = aggregate_picker(grouped, sub_indices, col=col)
return TimeSeries(data['values'], index=data['index'])
def kv_agg(grouped, func, col=None):
"""
Simpler version that is a bit faster. Really, I don't use aggregate_picker,
which makes it slightly faster.
"""
index = []
values = []
for key, group in grouped:
if col:
group = group[col]
sub_index = func(group)
val = group.iget_value(sub_index)
values.append(val)
index.append(group.index[sub_index])
return TimeSeries(values, index=index)
def set_time(arr, hour, minute):
"""
Given a list of datetimes, set the time on all of them
"""
results = []
t = time(hour, minute)
for date in arr:
d = datetime.combine(date.date(), t)
results.append(d)
return results
def reset_time(df, hour, minute):
if isinstance(df, (DataFrame, Series)):
df.index = set_time(df.index, hour, minute)
if isinstance(df, Panel):
df.major_axis = set_time(df.major_axis, hour, minute)
return df
def max_groupby(grouped, col=None):
df = kv_agg(grouped, np.argmax, col)
return df
def trading_hours(df):
# assuming timestamp marks end of bar
inds = df.index.indexer_between_time(time(9,30),
time(16), include_start=False)
return df.take(inds)
times = np.vectorize(lambda x: x.time())
hours = np.vectorize(lambda x: x.time().hour)
minutes = np.vectorize(lambda x: x.time().minute)
def time_slice(series, hour=None, minute=None):
"""
Will vectorize a function taht returns a boolean array if value matches the hour
and/or minute
"""
bh = hour is not None
bm = minute is not None
if bh and bm:
t = time(hour, minute)
vec = np.vectorize(lambda x: x.time() == t)
if bh and not bm:
vec = np.vectorize(lambda x: x.time().hour == hour)
if not bh and bm:
vec = np.vectorize(lambda x: x.time().minute == minute)
return vec(series.index)
def end_asof(index, label):
"""
Like index.asof but places the timestamp to the end of the bar
"""
if label not in index:
loc = index.searchsorted(label, side='left')
if loc > 0:
return index[loc]
else:
return np.nan
return label
# TODO Forget where I was using this. I think pandas does this now.
class TimeIndex(object):
"""
Kind of like a DatetimeIndex, except it only cares about the time component of a Datetime object.
"""
def __init__(self, times):
self.times = times
def asof(self, date):
"""
Follows price is right rules. Will return the closest time that is equal or below.
If time is after the last date, it will just return the date.
"""
testtime = date.time()
last = None
for time in self.times:
if testtime == time:
return date
if testtime < time:
# found spot
break
last = time
# TODO should I anchor this to the last time?
if last is None:
return date
new_date = datetime.combine(date.date(), last)
return new_date
def get_time_index(freq, start=None, end=None):
if start is None:
start = "1/1/2012 9:30AM"
if end is None:
end = "1/1/2012 4:00PM"
ideal = DatetimeIndex(start=start, end=end, freq=freq)
times = [date.time() for date in ideal]
return TimeIndex(times)
def get_anchor_index(index, freq):
ideal = get_time_index(freq)
start = index[0]
start = ideal.asof(start)
end = index[-1]
start, end = _get_range_edges(index, offset=freq, closed='right')
ind = | DatetimeIndex(start=start, end=end, freq=freq) | pandas.tseries.index.DatetimeIndex |
# -*- coding: utf-8 -*-
# Version 1.0
# Date: Jan 2 2020
from bokeh.plotting import figure, curdoc
from bokeh.models import ColumnDataSource, HoverTool, ColorBar, LinearColorMapper, Legend, BasicTickFormatter, \
LegendItem, Span, BasicTicker, LabelSet, Panel, Tabs
from bokeh.models.widgets import DataTable, Select, TableColumn, Slider, MultiSelect, RadioButtonGroup, Div, Button, \
CheckboxGroup, PreText, Paragraph, FileInput, TextAreaInput, HTMLTemplateFormatter
from bokeh.layouts import column, row, widgetbox
from bokeh.palettes import Spectral6, Set1, Category20, RdBu, RdBu3, Oranges, Blues
from bokeh.transform import linear_cmap, transform
from bokeh.models.ranges import FactorRange
from bokeh.transform import factor_cmap
from bokeh.models.tickers import FixedTicker, SingleIntervalTicker
from bokeh import events
from bokeh.models.callbacks import CustomJS
from math import pi
from collections import OrderedDict
import pandas as pd
import numpy as np
from code.clustering import get_elbow_plot, get_tsne, clustering_data
from code.regression import get_corr_plot, get_regression_plot, get_colors
from code.logistic_regression import get_logreg_output
from code.classification import get_classify_output
from code.data_sources import load_data_sources
import warnings
import os
import io
warnings.filterwarnings("ignore", category=DeprecationWarning)
"""
CODE
"""
class plot_attributes(object):
"""[summary]
Arguments:
object {figure} -- Unformatted plot
Returns:
[figure] -- Formatted plot
"""
def plot_format(self, plot):
plot.background_fill_color = self.background_fill_color
plot.border_fill_color = self.border_fill_color
plot.xaxis.formatter = self.x_axis_format
plot.yaxis.formatter = self.y_axis_format
plot.title.align = self.title_align
plot.title.text_font = self.text_font
plot.axis.axis_label_text_font = self.axis_label_text_font
plot.axis.axis_label_text_font_size = self.axis_label_text_font_size
plot.title.text_font_size = self.text_font_size
return plot
class landing_page():
def __init__(self):
self.note = None
def landing_note(self):
self.note = Div(text="""<br><br> Machine Learning Tool: <br> This is a tool to get hands-on experience
with Machine Learning concepts like Regression, Classification, Clustering. </br></br>
<li>The tool was built to make it as a medium to get hands-on visual experience to different aspect of
data science like exploring/visualizing different data types, building models to make predictions,
evaluating the models.</li> </br>
<li>At this point, model optimization/selection is not an option since datasets are pre-built.
This could be implemented as a future update.</li>
<br><br></br></br>
<b>Disclaimer:</b> As a data scientist, this is not the <i>only</i> way to learn/practice data science concepts.
For someone with relatively less experience in coding/data-science concepts, this is a method to
facilitate interest and give a brief idea about the concepts.
</br></br></br>""",
style={'font-size': '14pt', 'color': 'black',"font":'Font Awesome\ 5 Free'},
width=1200, sizing_mode='stretch_both', css_classes=['div_landing'])
self.alert_loading = Div(text='', css_classes=['hidden'], visible=False)
self.alert_loading.js_on_change('text', self.callback_notification)
tab_landing = Panel(child=column(self.note),
title="Home")
return tab_landing
class eda_plots(plot_attributes):
def __init__(self):
self.active_df = None
self.table_eda = None
self.explore_data_select = None
self.button_eda_plot = None
self.slider_bins = None
self.log_x_cb = None
self.log_y_cb = None
self.log_hist_cb = None
self.button_hist_plot = None
self.plots = None
self.hover_scatter = None
self.eda_df = None
self.button_count_plot = None
self.plot_count_plot = None
self.reset_data_eda()
def reset_data_eda(self):
self.source_scatter.data = dict(x=[], y=[], color=[])
self.source_histogram.data = dict(top=[], left=[], right=[])
self.source_count_plot.data = dict(x=[], y=[])
self.source_eda.data = {}
self.table_eda.columns = []
self.select_x_axis.options = ["None"]
self.select_y_axis.options = ["None"]
self.select_color.options = ['None']
self.select_hist.options = ["None"]
self.select_count_plot.options = ["None"]
self.select_x_axis.value = "None"
self.select_y_axis.value = "None"
self.select_color.value = 'None'
self.select_hist.value = "None"
self.select_count_plot.value = "None"
self.plot_scatter.xaxis.axis_label = ''
self.plot_scatter.yaxis.axis_label = ''
self.plot_hist.xaxis.axis_label = ''
self.plot_hist.yaxis.axis_label = ''
self.plot_count_plot.xaxis.axis_label = ''
self.plot_count_plot.yaxis.axis_label = ''
self.data_source_eda.text = ""
def create_eda_figure(self):
active_df = self.explore_data_select.value
select_x_axis = self.select_x_axis.value
select_y_axis = self.select_y_axis.value
if active_df != "Select dataset":
ticker_x_dict, ticker_y_dict = {}, {}
xs, ys = [], []
if select_x_axis != "None" and select_y_axis != "None":
if self.log_x_cb.active:
if self.log_x_cb.active[0] == 0:
xs = np.log(self.eda_df[select_x_axis].values + 1)
else:
xs = self.eda_df[select_x_axis].values
if self.log_y_cb.active:
if self.log_y_cb.active[0] == 0:
ys = np.log(self.eda_df[select_y_axis].values + 1)
else:
ys = self.eda_df[select_y_axis].values
self.plot_scatter.xaxis.axis_label = select_x_axis
self.plot_scatter.yaxis.axis_label = select_y_axis
color_dict = {}
select_color = self.select_color.value
if select_color != "None":
color_factors = self.eda_df[select_color].unique().tolist()
for i in range(0, len(color_factors)):
color_dict[str(color_factors[i])] = Category20[20][i]
scat_color = pd.Series(
self.eda_df[select_color].astype(str)).map(color_dict)
self.source_scatter.data = dict(x=xs, y=ys, color=scat_color)
else:
scat_color = ['dodgerblue'] * len(xs)
self.source_scatter.data = dict(x=xs, y=ys, color=scat_color)
def create_hist_figure(self):
active_df = self.explore_data_select.value
if active_df != "Select dataset":
hist, edges = [], []
if self.select_hist.value != 'None':
self.plot_hist.xaxis.axis_label = self.select_hist.value
self.plot_hist.yaxis.axis_label = 'Count'
if self.log_hist_cb.active:
if self.log_hist_cb.active[0] == 0:
log_hist = np.log(
self.eda_df[self.select_hist.value].values + 1)
else:
log_hist = self.eda_df[self.select_hist.value].values
hist, edges = np.histogram(
log_hist, bins=self.slider_bins.value)
self.source_histogram.data = dict(
top=hist, left=edges[:-1], right=edges[1:])
def create_count_figure(self):
active_df = self.explore_data_select.value
if active_df != "Select dataset":
count_column, count_value = [], []
if self.select_count_plot.value != 'None':
self.plot_count_plot.xaxis.axis_label = self.select_count_plot.value
self.plot_count_plot.yaxis.axis_label = 'Count'
count_df = self.eda_df[self.select_count_plot.value].value_counts(
).to_frame()
count_column, count_value = count_df.index.tolist(
), count_df[self.select_count_plot.value].values.tolist()
count_column = [str(i) for i in count_column]
self.plot_count_plot.x_range.factors = list(count_column)
self.source_count_plot.data = dict(
x=list(count_column), y=list(count_value))
def eda_table(self, attr, old, new):
active_df = self.explore_data_select.value
data_source_text = load_data_sources()
if active_df != "Select dataset":
self.reset_data_eda()
self.file_path = str(self.cwd + self.data_path + str(self.eda_data_source.get(active_df)))
self.eda_df = pd.read_csv(self.file_path)
self.eda_df = self.eda_df.fillna(self.eda_df.mean())
self.eda_df.columns = [x.upper() for x in self.eda_df.columns]
self.source_eda.data = dict(self.eda_df)
self.table_eda.columns = [TableColumn(
field=cols, title=cols, width=90) for cols in self.eda_df.columns]
filter_objects = {}
filter_numeric = {}
likely_cat = {}
for var in self.eda_df.columns:
filter_objects[var] = self.eda_df[var].dtype == np.float64 or self.eda_df[var].dtype == np.int64
filter_numeric[var] = str(self.eda_df[var].dtype) == 'object' or self.eda_df[var].nunique() <= 20
likely_cat[var] = self.eda_df[var].nunique() <= 20
filter_objects = [
k for k, v in filter_objects.items() if v is True]
self.select_x_axis.options = ["None"] + filter_objects
self.select_y_axis.options = ["None"] + filter_objects
self.select_hist.options = ["None"] + filter_objects
likely_cat = [k for k, v in likely_cat.items() if v is True]
self.select_color.options = ['None'] + likely_cat
filter_numeric = [
k for k, v in filter_numeric.items() if v is True]
self.select_count_plot.options = ["None"] + filter_numeric
data_source_df = data_source_text[data_source_text['Name'] == active_df]
data_text = "<center>Data Source</center>\n\n<b>Title:</b> "+data_source_df['Dataset'].tolist()[0] + "<br><br>" + \
"<b>Source Link:</b> <a href="+ data_source_df['Link'].tolist()[0] +""" target="_blank">"""+data_source_df['Link'].tolist()[0]+"</a><br>" + \
"<b>Description:</b>" + data_source_df['Description'].tolist()[0] + "<br><br>"
self.data_source_eda.text = data_text
else:
self.reset_data_eda()
def eda_button_enable(self, attr, old, new):
if (self.select_x_axis.value != 'None') and (self.select_y_axis.value != "None"):
self.button_eda_plot.disabled = False
else:
self.button_eda_plot.disabled = True
if self.select_hist.value != "None":
self.button_hist_plot.disabled = False
else:
self.button_hist_plot.disabled = True
if self.select_count_plot.value != "None":
self.button_count_plot.disabled = False
else:
self.button_count_plot.disabled = True
def exploration_plots(self):
df_exploration = pd.DataFrame()
self.source_eda = ColumnDataSource(data=dict(df_exploration))
eda_columns = [TableColumn(field=cols, title=cols) for cols in df_exploration.columns]
self.table_eda = DataTable(source=self.source_eda, columns=eda_columns, width=1200,
height=300, fit_columns=True)
x_scat, y_scat, scat_color = [], [], []
self.source_scatter = ColumnDataSource(
data=dict(x=x_scat, y=y_scat, color=scat_color))
self.hover_scatter = HoverTool(
tooltips=[("X", "@x{1.11}"),
("Y", "@y{1.11}")])
self.plot_scatter = figure(title="Scatter Plot", plot_height=600, plot_width=800,
tools=['pan,box_zoom,reset'] + [self.hover_scatter])
self.plot_scatter.scatter(x='x', y='y', size=10, line_color="white", alpha=0.6,
hover_color='white', hover_alpha=0.5, source=self.source_scatter, fill_color='color')
self.plot_scatter = self.plot_format(self.plot_scatter)
self.plot_scatter.min_border_left = 75
self.plot_scatter.min_border_bottom = 75
hist, edges = [], []
self.source_histogram = ColumnDataSource(
data=dict(top=hist, left=edges[:-1], right=edges[1:]))
hover_hist = HoverTool(
tooltips=[("X", "@left{1.11} ~ @right{1.11}"),
("Count", "@top{int}")])
self.plot_hist = figure(title='Histogram', plot_height=600, plot_width=800,
tools=['pan,box_zoom,reset'] + [hover_hist])
self.plot_hist.quad(top='top', bottom=0, left='left', right='right', source=self.source_histogram,
fill_color='dodgerblue', line_color="white", fill_alpha=0.8)
self.plot_hist = self.plot_format(self.plot_hist)
self.plot_hist.min_border_left = 50
self.plot_hist.min_border_bottom = 50
count_column, count_value = [], []
self.source_count_plot = ColumnDataSource(
data=dict(x=count_column, y=count_value))
hover_count_plot = HoverTool(tooltips=[("Category:", "@x"),
("Count:", "@y{int}")])
self.plot_count_plot = figure(title="Count Plot", plot_height=600, plot_width=800,
tools=['pan,box_zoom,reset']+[hover_count_plot], x_range=[])
self.plot_count_plot.vbar(x='x', top='y', width=0.9, source=self.source_count_plot,
fill_color='dodgerblue',
line_color="white", fill_alpha=0.8)
self.plot_count_plot.background_fill_color = self.background_fill_color
self.plot_count_plot.border_fill_color = self.border_fill_color
self.plot_count_plot.title.align = self.title_align
self.plot_count_plot.title.text_font = self.text_font
self.plot_count_plot.axis.axis_label_text_font = self.axis_label_text_font
self.plot_count_plot.axis.axis_label_text_font_size = self.axis_label_text_font_size
self.plot_count_plot.title.text_font_size = self.text_font_size
self.plot_count_plot.min_border_top = 50
self.plot_count_plot.min_border_bottom = 75
self.plot_count_plot.xaxis.major_label_orientation = pi / 4
self.explore_data_select = Select(title="Dataset:", value="Select dataset",
options=["Select dataset"] + list(self.eda_data_source.keys()))
self.select_x_axis = Select(
title="X-Axis:", value="None", options=["None"])
self.select_y_axis = Select(
title="Y-Axis:", value="None", options=["None"])
self.select_color = Select(
title="Color:", value="None", options=["None"])
self.button_eda_plot = Button(label="Draw Plot")
self.button_eda_plot.disabled = True
self.select_hist = Select(
title="Histogram Value:", value="None", options=["None"])
self.slider_bins = Slider(title="Histogram Bins", value=20, start=5.0, end=50, step=1,
callback_policy='mouseup', css_classes=['custom_slider'])
self.log_x_cb = CheckboxGroup(
labels=["Log transform: x-axis"], active=[])
self.log_y_cb = CheckboxGroup(
labels=["Log transform: y-axis"], active=[])
self.log_hist_cb = CheckboxGroup(
labels=["Log transform axis"], active=[])
self.button_hist_plot = Button(label="Draw Histogram")
self.button_hist_plot.disabled = True
self.select_count_plot = Select(
title="Count Plot Value:", value="None", options=["None"])
self.button_count_plot = Button(label="Draw Count Plot")
self.button_count_plot.disabled = True
self.select_x_axis.on_change('value', self.eda_button_enable)
self.select_y_axis.on_change('value', self.eda_button_enable)
self.select_hist.on_change('value', self.eda_button_enable)
self.select_count_plot.on_change('value', self.eda_button_enable)
self.explore_data_select.on_change("value", self.eda_table)
self.button_eda_plot.on_click(self.create_eda_figure)
self.button_hist_plot.on_click(self.create_hist_figure)
self.button_count_plot.on_click(self.create_count_figure)
self.data_source_eda = Div(text='', width = 800, height = 200, css_classes=['itemconfiguration'])
tab_eda = Panel(child=column(row(self.explore_data_select, self.data_source_eda), self.table_eda,
row(column(self.select_x_axis, self.log_x_cb, self.select_y_axis, self.log_y_cb,
self.select_color, self.button_eda_plot), self.plot_scatter),
row(column(self.select_hist, self.log_hist_cb, self.slider_bins,
self.button_hist_plot), self.plot_hist),
row(column(self.select_count_plot,
self.button_count_plot), self.plot_count_plot)),
title="Data Exploration")
return tab_eda
class linear_regression(plot_attributes):
"""
Linear Regression Tab
"""
def __init__(self):
self.color_bar = None
self.plot_hist_resid = None
self.reg_target_ms = None
self.source_corr = None
self.plot_corr = None
self.table_reg = None
self.button_reg = None
self.hline = None
self.hover_corr = None
self.hover_reg = None
self.hover_resid = None
self.hover_resid_hist = None
self.legend_reg = None
self.plot_reg = None
self.plot_resid = None
self.reg_data_select = None
self.reg_features_ms = None
self.reg_scatter = None
self.active_df = None
self.reg_df = None
self.normalize_linreg = None
self.reset_data_reg()
def reset_data_reg(self):
self.source_reg.data = {}
self.source_reg_scat.data = dict(actual=[], predict=[])
self.source_reg_resid.data = dict(predict=[], residual=[])
self.source_hist_resid.data = dict(top=[], bottom=[], right=[])
self.legend_reg.items = []
self.table_reg.columns = []
self.color_bar_reg.scale_alpha = 0
self.color_bar_reg.major_label_text_alpha = 0
self.reg_features_ms.options = ["ALL"]
self.reg_features_ms.value = ["ALL"]
self.reg_target_ms.options = ['SELECT TARGET']
self.reg_target_ms.value = 'SELECT TARGET'
self.button_logreg.disabled = True
top, bottom, left, right, labels, nlabels, color_list, corr = get_corr_plot(
pd.DataFrame())
self.corr_plot(top, bottom, left, right, labels,
nlabels, color_list, corr)
def corr_plot(self, top, bottom, left, right, labels, nlabels, color_list, corr):
self.source_corr.data = dict(
top=top, bottom=bottom, left=left, right=right, color=color_list, corr=corr)
self.plot_corr.x_range.start, self.plot_corr.x_range.end = 0, nlabels
self.plot_corr.y_range.start, self.plot_corr.y_range.end = 0, nlabels
ticks = [tick + 0.5 for tick in list(range(nlabels))]
tick_dict = OrderedDict([[tick, labels[ii]]
for ii, tick in enumerate(ticks)])
self.color_bar_reg.scale_alpha = 1
self.color_bar_reg.major_label_text_alpha = 1
self.plot_corr.xaxis.ticker = ticks
self.plot_corr.yaxis.ticker = ticks
self.plot_corr.xaxis.major_label_overrides = tick_dict
self.plot_corr.yaxis.major_label_overrides = tick_dict
def reg_plot(self):
features = self.reg_features_ms.value
label = self.reg_target_ms.value
active_norm = self.normalize_linreg.active
if label != "SELECT TARGET":
if 'ALL' in features:
df_columns = self.reg_df.columns.values.tolist()
df_columns.remove(label)
features_df = self.reg_df.loc[:, df_columns]
else:
if label in features:
features.remove(label)
features_df = self.reg_df.loc[:, features]
else:
features_df = self.reg_df.loc[:, features]
target_df = self.reg_df.loc[:, label]
actual_reg, predict_reg, text, MAE, RMSE, residual, \
slope, intercept = get_regression_plot(
features_df, target_df, active_norm)
self.plot_reg.x_range.start, self.plot_reg.x_range.end = actual_reg.min(), actual_reg.max()
self.plot_reg.y_range.start, self.plot_reg.y_range.end = predict_reg.min(), predict_reg.max()
self.plot_resid.x_range.start, self.plot_resid.x_range.end = predict_reg.min(), predict_reg.max()
self.plot_resid.y_range.start, self.plot_resid.y_range.end = residual.min(), residual.max()
self.source_reg_scat.data = dict(
actual=list(actual_reg), predict=list(predict_reg))
self.source_reg_resid.data = dict(
predict=list(predict_reg), residual=list(residual))
self.legend_reg.items = [LegendItem(label=text[0], renderers=[self.reg_scatter]),
LegendItem(label="MAE - " + str(MAE),
renderers=[self.reg_scatter]),
LegendItem(label="RMSE - " + str(RMSE), renderers=[self.reg_scatter])]
vhist, vedges = np.histogram(residual, bins=50)
vmax = max(vhist) * 1.1
self.plot_hist_resid.x_range.start, self.plot_hist_resid.x_range.end = 0, vmax
self.plot_hist_resid.y_range.start, self.plot_hist_resid.y_range.end = residual.min(), residual.max()
self.hline.line_alpha = 0.5
self.source_hist_resid.data = dict(
top=vedges[1:], bottom=vedges[:-1], right=vhist)
self.error_count += 1
self.alert_reg.text = str(self.error_count)+" Regression Completed"
def create_figure_reg(self, attr, old, new):
self.active_df = self.reg_data_select.value
if self.active_df != "Select dataset":
self.reset_data_reg()
self.file_path = str(
self.cwd + self.data_path + str(self.regression_data_source.get(self.active_df)))
self.reg_df = pd.read_csv(self.file_path)
self.reg_df = self.reg_df.fillna(self.reg_df.mean())
self.reg_df.columns = [x.upper() for x in self.reg_df.columns]
self.source_reg.data = dict(self.reg_df)
self.table_reg.columns = [TableColumn(
field=cols, title=cols, width=90) for cols in self.reg_df.columns]
self.reg_features_ms.options = ['ALL'] + list(self.reg_df.columns)
likely_target = {}
for var in self.reg_df.columns:
likely_target[var] = self.reg_df[var].nunique() > self.reg_df.shape[0]*0.1
likely_target = [k for k, v in likely_target.items() if v is True]
self.reg_target_ms.options = [
'SELECT TARGET'] + list(likely_target)
top, bottom, left, right, labels, nlabels, color_list, corr = get_corr_plot(self.reg_df)
self.corr_plot(top, bottom, left, right, labels, nlabels, color_list, corr)
self.button_reg.disabled = True
else:
self.reset_data_reg()
def button_enable(self, attr, old, new):
if self.reg_target_ms.value != 'SELECT TARGET':
self.button_reg.disabled = False
else:
self.button_reg.disabled = True
def lin_reg(self):
df_reg = pd.DataFrame()
self.source_reg = ColumnDataSource(data=dict(df_reg))
reg_columns = [TableColumn(field=cols, title=cols)
for cols in df_reg.columns]
self.table_reg = DataTable(source=self.source_reg, columns=reg_columns, width=1200, height=300,
fit_columns=True)
top, bottom, left, right, color, corr = [], [], [], [], [], []
self.source_corr = ColumnDataSource(data=dict(top=top, bottom=bottom, left=left, right=right,
color=color, corr=corr))
self.hover_corr = HoverTool(tooltips=[("Correlation", "@corr{1.11}")])
self.plot_corr = figure(plot_width=750, plot_height=650, title="Correlation Matrix",
toolbar_location='left', tools=[self.hover_corr])
self.plot_corr.quad(top='top', bottom='bottom', left='left', right='right', color='color',
line_color='white', source=self.source_corr)
self.plot_corr = self.plot_format(self.plot_corr)
self.plot_corr.xgrid.grid_line_color = None
self.plot_corr.ygrid.grid_line_color = None
self.plot_corr.xaxis.major_label_orientation = pi / 4
self.plot_corr.min_border_left = 110
self.plot_corr.min_border_bottom = 110
self.plot_corr.y_range.flipped = True
corr_colors = list(reversed(RdBu[9]))
self.reg_mapper = LinearColorMapper(
palette=corr_colors, low=-1, high=1)
self.color_bar_reg = ColorBar(color_mapper=self.reg_mapper, location=(0, 0),
ticker=BasicTicker(
desired_num_ticks=len(corr_colors)),
scale_alpha=0, major_label_text_alpha=0)
self.plot_corr.add_layout(self.color_bar_reg, 'right')
self.color_bar_reg.background_fill_color = 'whitesmoke'
actual_reg, predict_reg = [], []
self.source_reg_scat = ColumnDataSource(
data=dict(actual=actual_reg, predict=predict_reg))
self.hover_reg = HoverTool(tooltips=[("Actual", "@actual{int}"),
("Predicted", "@predict{int}")])
self.plot_reg = figure(plot_height=500, plot_width=900,
tools=['pan,box_zoom,reset,wheel_zoom'] + [self.hover_reg])
self.reg_scatter = self.plot_reg.scatter(x='actual', y='predict', size=7, line_color="white", alpha=0.6,
hover_color='white',
hover_alpha=0.5, source=self.source_reg_scat,
fill_color='dodgerblue', )
self.legend_reg = Legend(items=[LegendItem(label="", renderers=[
self.reg_scatter])], location='bottom_right')
self.plot_reg.add_layout(self.legend_reg)
self.plot_reg = self.plot_format(self.plot_reg)
self.plot_reg.xaxis.axis_label = "Actual Value"
self.plot_reg.yaxis.axis_label = "Predicted Value"
residual, predict_reg = [], []
self.source_reg_resid = ColumnDataSource(
data=dict(predict=predict_reg, residual=residual))
self.hover_resid = HoverTool(tooltips=[("Predicted", "@predict{int}"),
("Residual", "@residual{int}")],
names=['resid'])
self.plot_resid = figure(plot_height=500, plot_width=700,
tools=['pan,box_zoom,reset,wheel_zoom'] + [self.hover_resid])
self.hline = Span(location=0, dimension='width', line_color='black', line_width=3,
line_alpha=0, line_dash="dashed")
self.plot_resid.renderers.extend([self.hline])
self.plot_resid.scatter(x='predict', y='residual', size=7, line_color="white", alpha=0.6, hover_color='white',
hover_alpha=0.5, source=self.source_reg_resid, fill_color='dodgerblue', name='resid')
self.plot_resid = self.plot_format(self.plot_resid)
self.plot_resid.xaxis.axis_label = "Predicted Value"
self.plot_resid.yaxis.axis_label = "Residual Value"
vhist, vedges = [], []
self.source_hist_resid = ColumnDataSource(
data=dict(top=vedges[1:], bottom=vedges[:-1], right=vhist))
self.hover_resid_hist = HoverTool(tooltips=[("Count", "@right{int}")])
self.plot_hist_resid = figure(toolbar_location=None, plot_width=200, plot_height=self.plot_resid.plot_height,
y_range=self.plot_resid.y_range, min_border=10, y_axis_location="right",
tools=[self.hover_resid_hist] + ['pan'])
self.plot_hist_resid.quad(left=0, bottom='bottom', top='top', right='right', color="dodgerblue",
line_color="white", source=self.source_hist_resid)
self.plot_hist_resid.ygrid.grid_line_color = None
self.plot_hist_resid.xaxis.major_label_orientation = np.pi / 4
self.plot_hist_resid = self.plot_format(self.plot_hist_resid)
self.reg_data_select = Select(title="Dataset:", value="Select dataset",
options=["Select dataset"] + list(self.regression_data_source.keys()))
self.reg_features_ms = MultiSelect(
title="Select features:", value=["ALL"], options=["ALL"])
self.normalize_linreg = RadioButtonGroup(
labels=["Actual Data", "Normalize Data"], active=0)
self.reg_target_ms = Select(title="Select target for regression:", value="SELECT TARGET",
options=["SELECT TARGET"])
self.button_reg = Button(label="Calculate regression")
self.button_reg.disabled = True
self.reg_data_select.on_change("value", self.create_figure_reg)
self.reg_target_ms.on_change('value', self.button_enable)
self.button_reg.on_click(self.reg_plot)
self.div_whitespace = Div(text="""""", height=100)
self.alert_reg = Div(text='', css_classes=['hidden'], visible=False)
self.alert_reg.js_on_change('text', self.callback_notification)
tab_reg = Panel(child=column(self.reg_data_select, self.table_reg, self.plot_corr,
row(column(self.reg_features_ms, self.normalize_linreg,
self.reg_target_ms, self.button_reg),
column(self.plot_reg, row(self.plot_resid, self.plot_hist_resid),
self.alert_reg, self.div_whitespace))),
title="Linear Regression")
return tab_reg
class logistic_regression(plot_attributes):
"""
Tab for Logistic Regression
"""
def __init__(self):
self.active_df = None
self.logreg_df = None
self.legend_roc = None
self.roc_line = None
self.hover_logreg_cm = None
self.color_bar_logreg_cm = None
self.table_class_rep = None
self.button_logreg = None
self.hover_logreg_roc = None
self.labels_logreg_cm = None
self.logreg_roc_plot = None
self.normalize_logreg = None
self.div_report_title = None
self.reset_data_logreg()
def reset_data_logreg(self):
self.source_logreg.data = {}
self.source_class_rep_logreg.data = {}
self.source_logreg_cm.data = dict(Actual=[], Prediction=[], value=[])
self.source_logreg_roc.data = dict(fpr_roc=[], tpr_roc=[])
self.source_logreg_const_roc.data = dict(
const_roc_x=[], const_roc_y=[])
self.table_logreg.columns = []
self.table_class_rep_logreg.columns = []
self.legend_roc.items = []
self.color_bar_logreg_cm.scale_alpha = 0
self.color_bar_logreg_cm.major_label_text_alpha = 0
self.logreg_features_ms.options = ["ALL"]
self.logreg_features_ms.value = ["ALL"]
self.logreg_target_ms.options = ['SELECT TARGET']
self.logreg_target_ms.value = 'SELECT TARGET'
self.button_logreg.disabled = True
def logreg_button_enable(self, attr, old, new):
if self.logreg_target_ms.value != 'SELECT TARGET':
self.button_logreg.disabled = False
else:
self.button_logreg.disabled = True
def create_figure_logreg(self, attr, old, new):
self.active_df = self.logreg_data_select.value
if self.active_df != "Select dataset":
self.reset_data_logreg()
self.file_path = str(self.cwd + self.data_path +
str(self.logreg_data_source.get(self.active_df)))
logreg_df = pd.read_csv(self.file_path)
logreg_df = logreg_df.fillna(logreg_df.mean())
logreg_df.columns = [x.upper() for x in logreg_df.columns]
self.logreg_df = logreg_df
self.source_logreg.data = dict(logreg_df)
self.table_logreg.columns = [TableColumn(field=cols, title=cols, width=90) for cols in
self.logreg_df.columns]
self.logreg_features_ms.options = [
"ALL"] + logreg_df.columns.values.tolist()
likely_cat = {}
for var in logreg_df.columns:
likely_cat[var] = logreg_df[var].nunique() == 2 and set(
logreg_df[var].unique()) == set([0, 1])
likely_cat = [k for k, v in likely_cat.items() if v is True]
self.logreg_target_ms.options = ['SELECT TARGET'] + likely_cat
self.button_logreg.disabled = True
else:
self.reset_data_logreg()
def logreg_plot(self):
features = self.logreg_features_ms.value
label = self.logreg_target_ms.value
logreg_df = self.logreg_df
active_norm = self.normalize_logreg.active
if label != "SELECT TARGET":
if 'ALL' in features:
df_columns = logreg_df.columns.values.tolist()
df_columns.remove(label)
features_df = logreg_df.loc[:, df_columns]
else:
if label in features:
features.remove(label)
features_df = logreg_df.loc[:, features]
else:
features_df = logreg_df.loc[:, features]
target_df = logreg_df.loc[:, label]
accuracy_score, class_report_df, confusion_df, \
logit_roc_auc, fpr, tpr, thresholds = get_logreg_output(
features_df, target_df, active_norm)
self.source_class_rep_logreg.data = dict(class_report_df)
self.table_class_rep_logreg.columns = [TableColumn(field=cols, title=cols, width=90) for cols in
class_report_df.columns]
self.table_class_rep_logreg.index_position = None
self.logreg_cm_mapper.low, self.logreg_cm_mapper.high = confusion_df.value.values.min(
), confusion_df.value.values.max()
self.color_bar_logreg_cm.scale_alpha = 1
self.color_bar_logreg_cm.major_label_text_alpha = 1
self.logreg_cm_plot.x_range.start, self.logreg_cm_plot.x_range.end = confusion_df.Actual.min(), \
confusion_df.Actual.max()
self.logreg_cm_plot.y_range.start, self.logreg_cm_plot.y_range.end = confusion_df.Prediction.min(), \
confusion_df.Prediction.max()
self.logreg_cm_plot.xaxis.ticker = sorted(target_df.unique())
self.logreg_cm_plot.yaxis.ticker = sorted(target_df.unique())
self.logreg_cm_plot.xaxis.axis_label = "Actual"
self.logreg_cm_plot.yaxis.axis_label = "Predicted"
self.source_logreg_cm.data = confusion_df
self.source_logreg_roc.data = dict(fpr_roc=fpr, tpr_roc=tpr)
self.logreg_roc_plot.xaxis.axis_label = "False Positive Rate"
self.logreg_roc_plot.yaxis.axis_label = "True Positive Rate"
self.legend_roc.items = [LegendItem(label="Logistic Regression (area = " + str(logit_roc_auc) + ")",
renderers=[self.roc_line])]
self.source_logreg_const_roc.data = dict(
const_roc_x=[0, 1], const_roc_y=[0, 1])
self.error_count += 1
self.alert_logreg.text = str(
self.error_count)+" Logistic Regression Completed"
def logreg(self):
df_logreg = pd.DataFrame()
self.source_logreg = ColumnDataSource(data=dict(df_logreg))
logreg_columns = [TableColumn(field=cols, title=cols)
for cols in df_logreg.columns]
self.table_logreg = DataTable(source=self.source_logreg, columns=logreg_columns, width=1200, height=300,
fit_columns=True)
df_class_report = pd.DataFrame()
self.source_class_rep_logreg = ColumnDataSource(
data=dict(df_class_report))
class_rep_columns_logreg = [TableColumn(
field=cols, title=cols) for cols in df_class_report.columns]
self.table_class_rep_logreg = DataTable(source=self.source_class_rep_logreg, columns=class_rep_columns_logreg,
width=600, height=200, fit_columns=True)
logreg_cm_colors = list(reversed(Blues[9]))
actual_cm, predicted_cm, value_cm = [], [], []
self.source_logreg_cm = ColumnDataSource(
data=dict(Actual=actual_cm, Prediction=predicted_cm, value=value_cm))
self.logreg_cm_mapper = LinearColorMapper(
palette=logreg_cm_colors, low=0, high=100)
self.labels_logreg_cm = LabelSet(x='Actual', y='Prediction', text='value', level='overlay', x_offset=0,
y_offset=-10,
source=self.source_logreg_cm, render_mode='canvas', text_align='center',
text_font='times',
text_color='#FF0000', text_font_style='bold', text_font_size='16px')
self.hover_logreg_cm = HoverTool(tooltips=[("Actual", "@Actual"),
("Predicted", "@Prediction"),
("Value", "@value")])
self.logreg_cm_plot = figure(plot_width=400, plot_height=300, title="Confusion Matrix", toolbar_location=None,
tools=[self.hover_logreg_cm], x_axis_location="above")
self.logreg_cm_plot.rect(x="Actual", y="Prediction", width=.9, height=.9, source=self.source_logreg_cm,
line_color='black', fill_color=transform('value', self.logreg_cm_mapper))
self.logreg_cm_plot.y_range.flipped = True
self.color_bar_logreg_cm = ColorBar(color_mapper=self.logreg_cm_mapper, location=(0, 0),
ticker=BasicTicker(
desired_num_ticks=len(logreg_cm_colors)),
scale_alpha=0, major_label_text_alpha=0)
self.logreg_cm_plot.add_layout(self.color_bar_logreg_cm, 'right')
self.color_bar_logreg_cm.background_fill_color = "whitesmoke"
self.logreg_cm_plot = self.plot_format(self.logreg_cm_plot)
self.logreg_cm_plot.add_layout(self.labels_logreg_cm)
self.logreg_cm_plot.min_border_left = 50
self.logreg_cm_plot.min_border_top = 50
self.hover_logreg_roc = HoverTool(tooltips=[("False Positive Rate", "@fpr_roc"),
("True Positive Rate", "@tpr_roc")],
names=['roc'])
fpr_roc, tpr_roc = [], []
self.source_logreg_roc = ColumnDataSource(
data=dict(fpr_roc=fpr_roc, tpr_roc=tpr_roc))
const_roc_x, const_roc_y = [], []
self.source_logreg_const_roc = ColumnDataSource(
data=dict(const_roc_x=const_roc_x, const_roc_y=const_roc_y))
self.logreg_roc_plot = figure(plot_width=500, plot_height=500, title="ROC AUC", toolbar_location=None,
tools=[self.hover_logreg_roc], x_range=(-0.04, 1.04), y_range=(-0.04, 1.04))
self.roc_line = self.logreg_roc_plot.line(x="fpr_roc", y="tpr_roc", line_width=4, source=self.source_logreg_roc,
line_color='dodgerblue', name='roc')
self.logreg_roc_plot.line(x="const_roc_x", y="const_roc_y", line_width=2, line_dash='dashed',
source=self.source_logreg_const_roc, line_color='orangered')
self.legend_roc = Legend(items=[LegendItem(label="", renderers=[
self.roc_line])], location='bottom_right')
self.logreg_roc_plot.add_layout(self.legend_roc)
self.logreg_roc_plot = self.plot_format(self.logreg_roc_plot)
self.logreg_roc_plot.min_border_left = 50
self.logreg_roc_plot.min_border_bottom = 50
self.logreg_data_select = Select(title="Dataset:", value="Select dataset",
options=["Select dataset"] + list(self.logreg_data_source.keys()))
self.logreg_features_ms = MultiSelect(
title="Select features:", value=["ALL"], options=["ALL"])
self.normalize_logreg = RadioButtonGroup(
labels=["Actual Data", "Normalize Data"], active=0)
self.logreg_target_ms = Select(title="Select target for Logistic regression:", value="SELECT TARGET",
options=["SELECT TARGET"])
self.button_logreg = Button(label="Calculate regression")
self.button_logreg.disabled = True
self.logreg_data_select.on_change("value", self.create_figure_logreg)
self.logreg_target_ms.on_change('value', self.logreg_button_enable)
self.button_logreg.on_click(self.logreg_plot)
self.div_report_title = Div(
text="""<center>Classification Report</center>""", width=600)
self.alert_logreg = Div(text='', css_classes=['hidden'], visible=False)
self.alert_logreg.js_on_change('text', self.callback_notification)
tab_logreg = Panel(child=column(self.logreg_data_select, self.table_logreg,
row(column(self.logreg_features_ms, self.normalize_logreg,
self.logreg_target_ms, self.button_logreg),
column(self.div_report_title, self.table_class_rep_logreg, self.logreg_cm_plot,
self.logreg_roc_plot, self.alert_logreg))),
title="Logistic Regression")
return tab_logreg
class classification(plot_attributes):
def __init__(self):
self.source_classify = None
def create_figure_classify(self, attr, old, new):
self.active_df = self.classify_data_select.value
if self.active_df != "Select dataset":
self.file_path = str(
self.cwd + self.data_path + str(self.classify_data_source.get(self.active_df)))
classify_df = pd.read_csv(self.file_path)
classify_df = classify_df.fillna(classify_df.mean())
classify_df.columns = [x.upper() for x in classify_df.columns]
self.classify_df = classify_df
self.source_classify.data = dict(classify_df)
self.table_classify.columns = [TableColumn(field=cols, title=cols, width=90) for cols in
self.classify_df.columns]
self.classify_features_ms.options = [
"ALL"] + classify_df.columns.values.tolist()
likely_cat = {}
for var in classify_df.columns:
likely_cat[var] = classify_df[var].nunique() <= 20
likely_cat = [k for k, v in likely_cat.items() if v is True]
self.classify_target_ms.options = ['SELECT TARGET'] + likely_cat
self.button_classify.disabled = True
else:
self.source_classify.data = {}
self.table_classify.columns = []
self.classify_features_ms.options = ["ALL"]
self.classify_features_ms.value = ["ALL"]
self.classify_target_ms.options = ['SELECT TARGET']
self.classify_target_ms.value = 'SELECT TARGET'
self.button_classify.disabled = True
self.source_classify_cm.data = {}
self.source_classify_fi.data = {}
self.source_class_rep_classify.data = {}
def classify_button_enable(self, attr, old, new):
if self.classify_target_ms.value != "SELECT TARGET":
self.button_classify.disabled = False
else:
self.button_classify.disabled = True
def classify_plot(self):
features = self.classify_features_ms.value
label = self.classify_target_ms.value
classify_df = self.classify_df
active_norm = self.normalize_classify.active
if label != "SELECT TARGET":
if 'ALL' in features:
df_columns = classify_df.columns.values.tolist()
df_columns.remove(label)
features_df = classify_df.loc[:, df_columns]
else:
if label in features:
features.remove(label)
features_df = classify_df.loc[:, features]
else:
features_df = classify_df.loc[:, features]
target_df = classify_df.loc[:, label]
accuracy_score, class_report_df, confusion_df, \
rf_feature_labels, rf_feature_importance = get_classify_output(
features_df, target_df, active_norm)
self.source_class_rep_classify.data = dict(class_report_df)
self.table_class_rep_classify.columns = [TableColumn(field=cols, title=cols, width=90) for cols in
class_report_df.columns]
self.table_class_rep_classify.index_position = None
self.classify_cm_mapper.low, self.classify_cm_mapper.high = confusion_df.value.values.min(), \
confusion_df.value.values.max()
self.color_bar_classify_cm.scale_alpha = 1
self.color_bar_classify_cm.major_label_text_alpha = 1
if str(confusion_df['Actual'].dtype) == 'object' or str(confusion_df['Prediction'].dtype) == 'object':
self.classify_cm_plot.xaxis.ticker = list(
set(pd.Categorical(confusion_df['Actual']).codes))
ticker_x_dict = dict(
enumerate(pd.Categorical(confusion_df['Actual']).categories))
confusion_df['Actual'] = pd.Categorical(
confusion_df['Actual']).codes
self.classify_cm_plot.xaxis.major_label_overrides = ticker_x_dict
self.classify_cm_plot.xaxis.major_label_orientation = pi / 4
self.classify_cm_plot.yaxis.ticker = list(
set(pd.Categorical(confusion_df['Prediction']).codes))
ticker_y_dict = dict(enumerate(pd.Categorical(
confusion_df['Prediction']).categories))
confusion_df['Prediction'] = pd.Categorical(
confusion_df['Prediction']).codes
self.classify_cm_plot.yaxis.major_label_overrides = ticker_y_dict
else:
self.classify_cm_plot.x_range.start, self.classify_cm_plot.x_range.end = confusion_df.Actual.min(), \
confusion_df.Actual.max()
self.classify_cm_plot.y_range.start, self.classify_cm_plot.y_range.end = confusion_df.Prediction.min(), \
confusion_df.Prediction.max()
self.classify_cm_plot.xaxis.ticker = sorted(target_df.unique())
self.classify_cm_plot.yaxis.ticker = sorted(target_df.unique())
self.classify_cm_plot.xaxis.axis_label = "Actual"
self.classify_cm_plot.yaxis.axis_label = "Predicted"
self.source_classify_cm.data = confusion_df
rf_df = pd.DataFrame(dict({'rf_features': rf_feature_labels,
'rf_importance': rf_feature_importance})).nlargest(15, "rf_importance")
self.source_classify_fi.data = dict(rf_df)
self.classify_fi_plot.x_range.factors = rf_df['rf_features'].values.tolist(
)
self.error_count += 1
self.alert_classify.text = str(
self.error_count)+" Classification completed"
def classify(self):
df_classify = pd.DataFrame()
self.source_classify = ColumnDataSource(data=dict(df_classify))
classify_columns = [TableColumn(field=cols, title=cols)
for cols in df_classify.columns]
self.table_classify = DataTable(source=self.source_classify, columns=classify_columns, width=1200, height=300,
fit_columns=True)
df_class_report = pd.DataFrame()
self.source_class_rep_classify = ColumnDataSource(
data=dict(df_class_report))
class_rep_columns_classify = [TableColumn(
field=cols, title=cols) for cols in df_class_report.columns]
self.table_class_rep_classify = DataTable(source=self.source_class_rep_classify, columns=class_rep_columns_classify, width=600, height=200,
fit_columns=True)
classify_cm_colors = list(reversed(Blues[9]))
actual_cm, predicted_cm, value_cm = [], [], []
self.source_classify_cm = ColumnDataSource(data=dict(Actual=actual_cm, Prediction=predicted_cm,
value=value_cm))
self.classify_cm_mapper = LinearColorMapper(
palette=classify_cm_colors, low=0, high=100)
self.labels_classify_cm = LabelSet(x='Actual', y='Prediction', text='value', level='overlay', x_offset=0,
y_offset=-10,
source=self.source_classify_cm, render_mode='canvas', text_align='center',
text_font='times',
text_color='#FF0000', text_font_style='bold', text_font_size='16px')
self.hover_classify_cm = HoverTool(tooltips=[("Actual", "@Actual"),
("Predicted", "@Prediction"),
("Value", "@value")])
self.classify_cm_plot = figure(plot_width=600, plot_height=550, title="Confusion Matrix", toolbar_location=None,
tools=[self.hover_logreg_cm], x_axis_location="above")
self.classify_cm_plot.rect(x="Actual", y="Prediction", width=.9, height=.9, source=self.source_classify_cm,
line_color='black', fill_color=transform('value', self.classify_cm_mapper))
self.classify_cm_plot.y_range.flipped = True
self.color_bar_classify_cm = ColorBar(color_mapper=self.classify_cm_mapper, location=(0, 0),
ticker=BasicTicker(
desired_num_ticks=len(classify_cm_colors)),
scale_alpha=0, major_label_text_alpha=0)
self.classify_cm_plot.add_layout(self.color_bar_classify_cm, 'right')
self.color_bar_classify_cm.background_fill_color = "whitesmoke"
self.classify_cm_plot = self.plot_format(self.classify_cm_plot)
self.classify_cm_plot.add_layout(self.labels_classify_cm)
self.classify_cm_plot.min_border_left = 100
self.classify_cm_plot.min_border_top = 100
self.classify_cm_plot.min_border_bottom = 50
rf_features = []
rf_importance = []
self.hover_classify_fi = HoverTool(tooltips=[("Feature", "@rf_features"),
("Importance Score", "@rf_importance{0.02f}")])
self.source_classify_fi = ColumnDataSource(
data=dict(rf_features=rf_features, rf_importance=rf_importance))
self.classify_fi_plot = figure(x_range=[], plot_width=600, plot_height=600, toolbar_location=None,
title="Feature Importance", tools=[self.hover_classify_fi])
self.classify_fi_plot.vbar(x='rf_features', top='rf_importance', bottom=0, width=0.9,
source=self.source_classify_fi, line_color='white', fill_color='dodgerblue')
self.classify_fi_plot.background_fill_color = self.background_fill_color
self.classify_fi_plot.border_fill_color = self.border_fill_color
self.classify_fi_plot.yaxis.formatter = self.x_axis_format
self.classify_fi_plot.title.align = self.title_align
self.classify_fi_plot.title.text_font = self.text_font
self.classify_fi_plot.axis.axis_label_text_font = self.axis_label_text_font
self.classify_fi_plot.axis.axis_label_text_font_size = '8pt'
self.classify_fi_plot.title.text_font_size = self.text_font_size
self.classify_fi_plot.xaxis.major_label_orientation = pi / 4
self.classify_fi_plot.min_border_left = 50
self.classify_fi_plot.min_border_bottom = 100
self.classify_data_select = Select(title="Dataset:", value="Select dataset",
options=["Select dataset"] + list(self.classify_data_source.keys()))
self.classify_features_ms = MultiSelect(
title="Select features:", value=["ALL"], options=["ALL"])
self.normalize_classify = RadioButtonGroup(
labels=["Actual Data", "Normalize Data"], active=0)
self.classify_target_ms = Select(title="Select target for Classification:", value="SELECT TARGET",
options=["SELECT TARGET"])
self.button_classify = Button(label="Perform classification")
self.button_classify.disabled = True
self.classify_data_select.on_change(
'value', self.create_figure_classify)
self.classify_target_ms.on_change("value", self.classify_button_enable)
self.button_classify.on_click(self.classify_plot)
self.div_report_title = Div(
text="""<center>Classification Report</center>""", width=600)
self.alert_classify = Div(text='', css_classes=[
'hidden'], visible=False)
self.alert_classify.js_on_change('text', self.callback_notification)
tab_classify = Panel(child=column(self.classify_data_select, self.table_classify,
row(column(self.classify_features_ms, self.normalize_classify, self.classify_target_ms,
self.button_classify),
column(self.div_report_title, self.table_class_rep_classify, column(self.classify_cm_plot, self.classify_fi_plot, self.alert_classify)))),
title="Classification")
return tab_classify
class clustering(plot_attributes):
"""
Tab for Clustering
"""
def __init__(self):
self.source_clustering = None
self.clust_df = None
self.source_clust = None
self.mapper = None
self.clust_scat = None
self.clust_slider = None
self.button_cluster = None
self.clus_data_select = None
self.clust_features_ms = None
self.clust_norm_rbg = None
self.hover_clust = None
self.table_clustering = None
def cluster_plot(self):
active_features = self.clust_features_ms.value
active_norm = self.clust_norm_rbg.active
active_clust_no = self.clust_slider.value
source_clust_data = clustering_data(self.clust_df, active_features, active_norm, active_clust_no,
self.clustering_data_source, self.mapper, self.clust_scat)
self.source_clust.data = source_clust_data
self.error_count += 1
self.alert_cluster.text = str(self.error_count)+" Clustering Completed"
def clustering_plot(self, attr, old, new):
self.active_df = str(self.clus_data_select.value)
if self.active_df != "Select dataset":
self.button_cluster.disabled = False
self.file_path = str(
self.cwd + self.data_path + str(self.clustering_data_source.get(self.active_df)))
clust_df = pd.read_csv(self.file_path)
clust_df = clust_df.fillna(clust_df.mean())
clust_df.columns = [x.upper() for x in clust_df.columns]
self.clust_df = clust_df
self.source_clustering.data = dict(clust_df)
self.table_clustering.columns = [TableColumn(field=cols, title=cols, width=90) for cols in
self.clust_df.columns]
self.clust_features_ms.options = ['ALL'] + list(clust_df.columns)
else:
self.button_cluster.disabled = True
def cluster(self):
df_clustering = | pd.DataFrame() | pandas.DataFrame |
import os, re, json, datetime, random, csv
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import pandas as pd
import numpy as np
import seaborn as sn
from string import ascii_uppercase
import utils.dataGenerator as datagen
import utils.dataGenerator4D as datagen4D
metrics_index = ['Sensitivity', 'Specificity', 'Precision', 'Negative predictive value', 'Fall out', 'False negative rate', 'False discovery rate', 'Accuracy', 'F1 Score']
def filter_files_by_regex(files:list, regex:str):
filtered_list = [val for val in files if re.search(regex, val)]
return filtered_list
def build_regex_for_subjects(subjects:list):
user_regex_string ="^("
for subject in subjects:
user_regex_string = (user_regex_string + subject + '|')
user_regex_string = user_regex_string[:-1] + ')'
user_regex_string = re.compile(user_regex_string)
return user_regex_string
def build_regex_for_movement(movements:list):
user_regex_string ="("
for movement in movements:
user_regex_string = (user_regex_string + movement + '|')
user_regex_string = user_regex_string[:-1] + ')'
user_regex_string = re.compile(user_regex_string)
return user_regex_string
def extract_info_from_config(cfg:json):
return cfg["input-rows"], cfg["input-columns"], cfg["channels"], cfg["movements"], cfg["batch-size"], cfg["train-steps"], cfg["validation-steps"], cfg["test-steps"], cfg["epochs"]
def split_dataset(files:list, cfg:json):
movements_regex_string = build_regex_for_movement(cfg["movements"])
# Load train set
train_user_regex_string = build_regex_for_subjects(cfg["train-subjects"])
train_files = filter_files_by_regex(files, train_user_regex_string)
train_files = filter_files_by_regex(train_files, movements_regex_string)
# Use only original files if desired
if cfg["no-augmentation"]:
train_files = filter_files_by_regex(train_files, r'-0.csv$')
# Load test set
test_user_regex_string = build_regex_for_subjects(cfg["test-subjects"])
test_files = filter_files_by_regex(files, test_user_regex_string)
test_files = filter_files_by_regex(test_files, movements_regex_string)
# Use only original files for testing
test_files = filter_files_by_regex(test_files, r'-0.csv$')
# Load validation set
validation_user_regex_string = build_regex_for_subjects(cfg["validation-subjects"])
validation_files = filter_files_by_regex(files, validation_user_regex_string)
validation_files = filter_files_by_regex(validation_files, movements_regex_string)
# Use only original files for validation
validation_files = filter_files_by_regex(validation_files, r'-0.csv$')
print("Original train files: " + str(len(filter_files_by_regex(train_files, r'-0.csv$'))))
print("Total train files: " + str(len(train_files)))
print("Original validation files: " + str(len(filter_files_by_regex(validation_files, r'-0.csv$'))))
print("Total validation files: " + str(len(validation_files)))
print("Original test files: " + str(len(filter_files_by_regex(test_files, r'-0.csv$'))))
print("Total test files: " + str(len(test_files)))
return train_files, validation_files, test_files
def balance_data_set(files:list, cfg:json, set:str):
movement_samples = {}
for movement in cfg["movements"]:
movement_regex_string = re.compile(movement)
files_filtered = filter_files_by_regex(files, movement_regex_string)
movement_samples[movement] = len(files_filtered)
min_key = min(movement_samples, key=movement_samples.get)
final_files = []
print("Under balancing data-set by movement " + min_key + " with " + str(movement_samples[min_key]) + " total files.")
for movement in cfg["movements"]:
movement_regex_string = re.compile(movement)
files_filtered = filter_files_by_regex(files, movement_regex_string)
random.shuffle(files_filtered)
final_files = final_files + files_filtered[:movement_samples[min_key]]
print("Final " + set + " data-set has " + str(len(final_files)) + " images.")
return final_files
def loadCfgJson(file_path:str):
with open(file_path) as f:
return json.load(f)
def create_folder(folder_path: str, folderBaseName:str=''):
datetime_object = datetime.datetime.now()
folder_path = folder_path + '/' + folderBaseName + str(datetime_object)
os.mkdir(folder_path)
return folder_path
def create_outcome_file(outcome_path:str, model, test_loss, test_accuracy, history_callback, comments:str):
history = history_callback.history
with open(outcome_path + '/outcome.txt', 'w') as file:
file.write('##################################################\n')
file.write('# MODEL SUMMARY #\n')
file.write('##################################################\n')
model.summary(print_fn=lambda x: file.write(x + '\n'))
file.write('\n\n')
file.write('##################################################\n')
file.write('# TRAIN OUTCOME #\n')
file.write('##################################################\n')
for i in range(len(history["loss"])):
file.write( 'loss: ' + str(format(history["loss"][i], ".4f")) +
' | accuracy: ' + str(format(history["accuracy"][i], ".4f")) +
' | val_loss: ' + str(format(history["val_loss"][i], ".4f")) +
' | val_accuracy: ' + str(format(history["val_accuracy"][i], ".4f")) + '\n')
file.write('\n\n')
file.write('##################################################\n')
file.write('# TEST OUTCOME #\n')
file.write('##################################################\n')
file.write( 'loss: ' + str(format(test_loss, ".4f")) + ' | accuracy: ' + str(format(test_accuracy, ".4f")))
file.write('\n\n')
file.write('##################################################\n')
file.write('# NOTES #\n')
file.write('##################################################\n')
file.write(comments)
file.write('\n\n')
def create_config_output_file(outcome_path:str, cfg:json):
with open(outcome_path + '/config.json', 'w') as outfile:
json.dump(cfg, outfile)
def save_model_and_weights(outcome_path:str, model):
model_json = model.to_json()
with open(outcome_path + '/model.json', "w") as json_file:
json_file.write(model_json)
model.save_weights(outcome_path + '/model.h5')
def addCallbacks(callbacks:json, callback_list: list, outcome_path:str):
modelCheckPoint = False
for callback in callbacks:
if(callback["type"] == "earlyStop"):
callback_list.append(tf.keras.callbacks.EarlyStopping(monitor=callback["monitor"], patience=callback["patience"]))
if(callback["type"] == "modelCheckPoint"):
modelCheckPoint = True
callback_list.append(tf.keras.callbacks.ModelCheckpoint(monitor=callback["monitor"], save_weights_only=callback["save_weights_only"], save_best_only=callback["save_best_only"]
, filepath= outcome_path, mode=callback["mode"] ))
return callback_list, modelCheckPoint
def initialize_folder(path):
try:
os.mkdir(path)
except OSError:
print ("Creation of the directory %s failed" % path)
else:
print ("Successfully created the directory %s " % path)
def calculate_confusion_matrix_metrics(confusion_matrix, movements):
FP = confusion_matrix.sum(axis=0) - np.diag(confusion_matrix)
FN = confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)
TP = np.diag(confusion_matrix)
TN = confusion_matrix.sum() - (FP + FN + TP)
FP = FP.astype(float)
FN = FN.astype(float)
TP = TP.astype(float)
TN = TN.astype(float)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
# Overall accuracy for each class
ACC = (TP+TN)/(TP+FP+FN+TN)
# F1 Score
FONE = 2*TP/(2*TP + FP + FN)
metrics_df = pd.DataFrame([TPR, TNR, PPV, NPV, FPR, FNR, FDR, ACC, FONE] ,index= metrics_index ,columns=movements)
metrics_df['Average'] = metrics_df.mean(numeric_only=True, axis=1)
return metrics_df
def create_confusion_matrix(prediction:list, file_path:str, movements:list):
predicted_labels = prediction.argmax(axis=-1)
with open(file_path+ '/test.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
test_labels= []
for row in csv_reader:
array = row[0].replace("\n", " ").replace("[", " ").replace("]", " ")[1:]
labels = array.split(" ")
for label in labels:
if label.isdigit():
test_labels.append(int(label))
test_labels = np.array(test_labels)
final_confusion_matrix = confusion_matrix(test_labels, predicted_labels)
# os.remove(file_path+ '/test.csv')
columns = np.array(movements)
# Build & save confusion matrix
save_dataframe_as_heatMap(nparray=final_confusion_matrix, indexNames=columns \
,columNames=columns, saveFolder=file_path, imgBaseName='/confusion-matrix', format='d')
# Build & save confusion metrics matrix
metrics_df=calculate_confusion_matrix_metrics(final_confusion_matrix, movements)
metrics_df.to_csv(file_path+'/confusion-matrix-metrics.csv')
fig = plt.figure(figsize = (len(metrics_index),len(columns)))
sn.set(font_scale=1.4) # for label size
sn.heatmap(metrics_df, annot=True, cmap='Blues', annot_kws={"size": 10}) # font size
fig.tight_layout()
plt.savefig(file_path + '/confusion-matrix-metrics.png')
# Build & save normalized confusion matrix
row_sums = final_confusion_matrix.sum(axis=1)
normalized_aggregated_confusion_matrix = final_confusion_matrix / row_sums[:, np.newaxis]
save_dataframe_as_heatMap(nparray=normalized_aggregated_confusion_matrix, indexNames=columns \
,columNames=columns, saveFolder=file_path, imgBaseName='/confusion-matrix-normalized')
def restrict_to_physcial_gpu():
gpus = tf.config.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only use the first GPU
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
except RuntimeError as e:
# Visible devices must be set before GPUs have been initialized
print(e)
def set_memory_growth():
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
# For K-Fold directories build average confusion matrices
def build_average_confusion_matrix(kFoldFolder:str):
_, folders, _ = next(os.walk(kFoldFolder))
confusion_matrix_metrics = []
confusion_matrixs = []
columns = []
for folder in folders:
confusion_matrix = | pd.read_csv(kFoldFolder + '/' + folder + '/confusion-matrix.csv', header=None) | pandas.read_csv |
from datetime import timedelta
import pytest
from pandas import PeriodIndex, Series, Timedelta, date_range, period_range, to_datetime
import pandas._testing as tm
class TestToTimestamp:
def test_to_timestamp(self):
index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
series = Series(1, index=index, name="foo")
exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
result = series.to_timestamp(how="end")
exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
assert result.name == "foo"
exp_index = | date_range("1/1/2001", end="1/1/2009", freq="AS-JAN") | pandas.date_range |
# Written by i3s
import os
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import seaborn as sns
import time
from sklearn.model_selection import KFold
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.cross_decomposition import PLSRegression
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
def proj_l1ball(y, eta):
"""
Note that the y should be better 1D or after some element-wise operation, the results will turn to be un predictable.
This function will automatically reshape the y as (m,), where m is the y.size, or the y.shape[0]*y.shape[1].
"""
if type(y) is not np.ndarray:
y = np.array(y)
if y.ndim > 1:
y = np.reshape(y, (-1,))
return np.maximum(
np.absolute(y)
- np.amax(
[
np.amax(
(np.cumsum(np.sort(np.absolute(y), axis=0)[::-1], axis=0) - eta)
/ (np.arange(y.shape[0]) + 1)
),
0,
]
),
0,
) * np.sign(y)
def centroids(XW, Y, k):
Y = np.reshape(Y, -1)
d = XW.shape[1]
mu = np.zeros((k, d))
"""
since in python the index starts from 0 not from 1,
here the Y==i will be change to Y==(i+1)
Or the values in Y need to be changed
"""
for i in range(k):
C = XW[Y == (i + 1), :]
mu[i, :] = np.mean(C, axis=0)
return mu
def class2indicator(y, k):
if len(y.shape) > 1:
# Either throw exception or transform y, here the latter is chosen.
# Note that a list object has no attribute 'flatten()' as np.array do,
# We use x = np.reshape(y,-1) instead of x = y.flatten() in case of
# the type of 'list' of argument y
y = np.reshape(y, -1)
n = len(y)
Y = np.zeros((n, k)) # dtype=float by default
"""
since in python the index starts from 0 not from 1,
here the y==i in matlab will be change to y==(i+1)
"""
for i in range(k):
Y[:, i] = y == (i + 1)
return Y
def nb_Genes(w):
# Return the number of selected genes from the matrix (numpy.ndarray) w
d = w.shape[0]
ind_genes = np.zeros((d, 1))
for i in range(d):
if np.linalg.norm(w[i, :]) > 0:
ind_genes[i] = 1
indGene_w = np.where(ind_genes == 1)[0]
nbG = int(np.sum(ind_genes))
return nbG, indGene_w
def select_feature_w(w, featurenames):
k = w.shape[1]
d = w.shape[0]
lst_features = []
lst_norm = []
for i in range(k):
s_tmp = w[:, i] # the i-th column
f_tmp = np.abs(s_tmp) # the absolute values of this column
ind = np.argsort(f_tmp)[
::-1
] # the indices of the sorted abs column (descending order)
f_tmp = np.sort(f_tmp)[::-1] # the sorted abs column (descending order)
nonzero_inds = np.nonzero(f_tmp)[0] # the nonzero indices
lst_f = []
lst_n = []
if len(nonzero_inds) > 0:
nozero_ind = nonzero_inds[-1] # choose the last nonzero index
if nozero_ind == 0:
lst_f.append(featurenames[ind[0]])
lst_n.append(s_tmp[ind[0]])
else:
for j in range(nozero_ind + 1):
lst_f.append(featurenames[ind[j]])
lst_n = s_tmp[ind[0 : (nozero_ind + 1)]]
lst_features.append(lst_f)
lst_norm.append(lst_n)
n_cols_f = len(lst_features)
n_rows_f = max(map(len, lst_features)) # maxmum subset length
n_cols_n = len(lst_norm)
n_rows_n = max(map(len, lst_norm))
for i in range(n_cols_f):
ft = np.array(lst_features[i])
ft.resize(n_rows_f, refcheck=False)
nt = np.array(lst_norm[i])
nt.resize(n_rows_n, refcheck=False)
if i == 0:
features = ft
normW = nt
continue
features = np.vstack((features, ft))
normW = np.vstack((normW, nt))
features = features.T
normW = normW.T
return features, normW
def compute_accuracy(idxR, idx, k):
"""
# ===============================
#----- INPUT
# idxR : real labels
# idx : estimated labels
# k : number of class
#----- OUTPUT
# ACC_glob : global accuracy
# tab_acc : accuracy per class
# ===============================
"""
# Note that Python native sum function works better on list than on numpy.array
# while numpy.sum function works better on numpy.array than on list.
# So it will choose numpy.array as the default type for idxR and idx
if type(idxR) is not np.array:
idxR = np.array(idxR)
if type(idx) is not np.array:
idx = np.array(idx)
if idxR.ndim == 2 and 1 not in idxR.shape:
idxR = np.reshape(idxR, (-1, 1))
if idx.ndim == 1:
idx = np.reshape(idx, idxR.shape)
# Global accuracy
y = np.sum(idxR == idx)
ACC_glob = y / len(idxR)
# Accuracy per class
tab_acc = np.zeros((1, k))
"""
since in python the index starts from 0 not from 1,
here the idx(ind)==j in matlab will be change to idx[ind]==(j+1)
"""
for j in range(k):
ind = np.where(idxR == (j + 1))[0]
if len(ind) == 0:
tab_acc[0, j] = 0.0
else:
tab_acc[0, j] = int(np.sum(idx[ind] == (j + 1))) / len(ind)
return ACC_glob, tab_acc
def predict_L1(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 1)
# print(distmu)
# sns.kdeplot(np.array(distmu), shade=True, bw=0.1)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
return Ytest
# function to compute the \rho value
def predict_L1_molecule(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
confidence = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 1)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
confidence[i] = (distmu[0, 1] - distmu[0, 0]) / (distmu[0, 1] + distmu[0, 0])
return Ytest, confidence
# =============================Plot functions=================================================
# function to plot the distribution of \rho
def rhoHist(rho, n_equal_bins):
"""
# ===============================
#----- INPUT
# rho : df_confidence
# n_equal_bins : the number of histogram bins
#
#----- OUTPUT
# plt.show()
# ===============================
"""
# The leftmost and rightmost bin edges
first_edge, last_edge = rho.min(), rho.max()
bin_edges = np.linspace(
start=first_edge, stop=last_edge, num=n_equal_bins + 1, endpoint=True
)
_ = plt.hist(rho, bins=bin_edges)
plt.title("Histogram of confidence score")
plt.show()
def pd_plot(X, Yr, W, flag=None):
plt.figure()
X_transform = np.dot(X, W)
# cluster 1
index1 = np.where(Yr == 1)
X_1 = X_transform[index1[0], :]
c1 = np.mean(X_1, axis=0)
# plt.scatter(X_1[:,0],X_1[:,8],c='b', label='cluster1')
# cluster 2
index2 = np.where(Yr == 2)
X_2 = X_transform[index2[0], :]
c2 = np.mean(X_2, axis=0)
if flag == True:
plt.scatter(c1[0], c1[1], c="y", s=100, marker="*", label="center1")
plt.scatter(c2[0], c2[1], c="c", s=100, marker="*", label="center2")
plt.plot(X_1[:, 0], X_1[:, 1], "ob", label="cluster1")
plt.plot(X_2[:, 0], X_2[:, 1], "^r", label="cluster2")
plt.title("Primal_Dual")
plt.legend()
plt.show()
def pca_plot(X, Yr, W, flag=None):
plt.figure()
# if flag==True:
# X=np.dot(X,W)
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
X_norm = X_pca
# cluster 1
index1 = np.where(Yr == 1)
X_1 = X_norm[index1[0], :]
c1 = np.mean(X_1, axis=0)
# plt.scatter(X_1[:,0],X_1[:,8],c='b', label='cluster1')
# cluster 2
index2 = np.where(Yr == 2)
X_2 = X_norm[index2[0], :]
c2 = np.mean(X_2, axis=0)
# plt.scatter(X_2[:,0],X_2[:,8],c='g',label='cluster2')
if flag == True:
plt.scatter(c1[0], c1[1], c="y", s=100, marker="*", label="center1")
plt.scatter(c2[0], c2[1], c="c", s=100, marker="*", label="center2")
plt.plot(X_1[:, 0], X_1[:, 1], "ob", label="cluster1")
plt.plot(X_2[:, 0], X_2[:, 1], "^r", label="cluster2")
plt.title("PCA")
plt.legend()
plt.show()
def Predrejection(df_confidence, eps, num_eps):
"""
# =====================================================================
# It calculates the false rate according to the value of epsilon
#
#----- INPUT
# df_confidence : dataframe which contains predicted label,
# original label and rho
# eps : the threshold
# num_eps : the number of epsilon that can be tested
#----- OUTPUT
# FalseRate : An array that contains the falserate according to epsilon
# =====================================================================
"""
Yr = np.array(df_confidence["Yoriginal"])
Yr[np.where(Yr == 2)] = -1
Ypre = np.array(df_confidence["Ypred"])
Ypre[np.where(Ypre == 2)] = -1
rho = df_confidence["rho"]
epsList = np.arange(0, eps, eps / num_eps)
falseRate = []
rejectSample = []
for epsilon in epsList:
index = np.where((-epsilon < rho) & (rho < epsilon))
Yr[index] = 0
Ypre[index] = 0
Ydiff = Yr - Ypre
rejectRate = len(index[0]) / len(Yr)
error = len(np.where(Ydiff != 0)[0]) / len(Yr)
falseRate.append(error)
rejectSample.append(rejectRate)
plt.figure()
plt.plot(epsList, falseRate)
plt.xlabel("Confidence score prediction")
plt.ylabel("FN+FP (ratio)")
# plot the number of rejected samples
plt.figure()
plt.plot(epsList, rejectSample)
plt.xlabel("Confidence score prediction")
plt.ylabel(" Reject samples (ratio) ")
return np.array(falseRate)
# ==============================================================================
def predict_FISTA(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 2)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
return Ytest
def normest(X, tol=1.0e-6, maxiter=100):
# import necessary modules
import scipy.sparse
import numpy as np
import warnings
if scipy.sparse.issparse(X):
x = np.array(np.sum(np.abs(X), axis=0))
x = np.reshape(x, max(x.shape))
elif type(X) == np.matrix:
x = np.sum(np.abs(np.asarray(X)), axis=0)
x = np.reshape(x, max(x.shape))
else:
x = np.sum(np.abs(X), axis=0)
norm_e = np.linalg.norm(x)
if norm_e == 0:
return norm_e
x = x / norm_e
norm_e0 = 0
count = 0
while np.abs(norm_e - norm_e0) > tol * norm_e:
norm_e0 = norm_e
Xx = np.matmul(X, x)
if np.count_nonzero(Xx) == 0:
Xx = np.random.rand(Xx.shape[0])
x = np.matmul(X.T, Xx)
normx = np.linalg.norm(x)
norm_e = normx / np.linalg.norm(Xx)
x = x / normx
count += 1
if count > maxiter:
warnings.warn(
"Normest::NotConverge:the number of iterations exceeds {} times.\nThe error is {}, the tolerance is {}".format(
maxiter, np.abs(norm_e - norm_e0), tol
),
RuntimeWarning,
)
break
return norm_e
def merge_topGene_norm(topGenes, normW, clusternames):
"""
# =====================================================================
# It merge the two output from function select_features_w into a new
# pandas.DataFrame whose columns will be the elements in clusternames
# and each of the column will have two subcolumns: topGenes and weight
#
#----- INPUT
# topGenes : ndarray of top Genes chosen by select_features_w
# normW : normWeight of each genes given by select_features_w
# clusternames : A list of the names of each class.
#----- OUTPUT
# df_res : A DataFrame with each colum the first subcolumn the genes
# and second subcolumn their norm of weight
# =====================================================================
"""
if topGenes.shape != normW.shape:
raise ValueError("The dimension of the two input should be the same")
m, n = topGenes.shape
nbC = len(clusternames)
res = np.dstack((topGenes, normW))
res = res.reshape(m, 2 * n)
lst_col = []
for i in range(nbC):
lst_col.append((clusternames[i], "topGenes"))
lst_col.append((clusternames[i], "Weights"))
df_res = pd.DataFrame(res, columns=lst_col)
df_res.columns = pd.MultiIndex.from_tuples(
df_res.columns, names=["CluserNames", "Attributes"]
)
return df_res
def merge_topGene_norm_acc(
topGenes,
normW,
clusternames,
acctest,
nbr_features=30,
saveres=False,
file_tag=None,
outputPath="../results/",
):
"""
# =============================================================================================== \n
# Based on the function merge_topGebe_norm, replace the column name for \n
# normW by the accuracy \n
#----- INPUT \n
# topGenes (ndarray or DataFrame) : Top Genes chosen by select_features_w \n
# normW (ndarray or DataFrame) : The normWeight of each genes given by select_features_w \n
# clusternames (list or array) : A list of the names of each class \n
# acctest (list or array) : The list of the test accuracy \n
# saveres (optional, boolean) : True if we want to save the result to local \n
# file_tag (optional, string) : A file tag which will be the prefix of the file name \n
# outputPath (optional, string) : The output Path of the file \n
# ----- OUTPUT \n
# df_res : A DataFrame with each colum the first subcolumn the genes \n
# and second subcolumn their norm of weight \n
# =============================================================================================== \n
"""
if type(topGenes) is pd.DataFrame:
topGenes = topGenes.values
if type(normW) is pd.DataFrame:
normW = normW.values
if topGenes.shape != normW.shape:
raise ValueError("The dimension of the two input should be the same")
m, n = topGenes.shape
nbC = len(clusternames)
res = np.dstack((topGenes, normW))
res = res.reshape(m, 2 * n)
lst_col = []
acctest_mean = acctest.values.tolist()[4]
for i in range(nbC):
lst_col.append((clusternames[i], "topGenes"))
astr = str(acctest_mean[i])
lst_col.append((astr, "Weights"))
df_res = pd.DataFrame(res[0:nbr_features, :], columns=lst_col)
df_res.columns = pd.MultiIndex.from_tuples(
df_res.columns, names=["CluserNames", "Attributes"]
)
if saveres:
df_res.to_csv(
"{}{}_Heatmap of Acc_normW_Topgenes.csv".format(outputPath, file_tag),
sep=";",
)
return df_res
def compare_2topGenes(
topGenes1,
topGenes2,
normW1=None,
normW2=None,
lst_col=None,
nbr_limit=30,
printOut=False,
):
"""
#=======================================================================================
# Compare column by column the elements between to topGenes, it choose for
# each column first "nbr" elements to check.
# The two topGenes should be in same size of columns
# ----- INPUT
# topGenes1, topGenes2 (DataFrame) : Two topGenes to be compared
# normW1, normW2 (DataFrame,optional): Two matrix of weights correspondent. Default: None
# lst_col (list, optional) : If given, only the chosen column will be compared. Default: None
# nbr_limit (scalar, optional) : Number of the lines to be compared. Default: 30
# printOut (boolean, optional) : If True, the comparison result will be shown on screen. Default: False
# ----- OUTPUT
# out (string) : It returns a string of the comparing result as output.
#=======================================================================================
"""
import pandas as pd
import numpy as np
if type(topGenes1) != type(topGenes2):
raise ValueError("The two topGenes to be compared should be of the same type.")
if type(topGenes1) is not pd.DataFrame:
col = ["C" + str(i) for i in topGenes1.shape[1]]
topGenes1 = pd.DataFrame(topGenes1, columns=col)
topGenes2 = pd.DataFrame(topGenes2, columns=col)
out = []
out.append("Comparing the two TopGenes:\n")
# After the benchmark, the appended list and then converted to whole string seems to be the least consuming
list_name = list(topGenes1.columns)
if lst_col is not None:
list_name = [list_name[ind] for ind in lst_col]
for name in list_name:
out.append(
"{0:{fill}{align}40}\n".format(" Class %s " % name, fill="=", align="^")
)
col_1 = np.array(topGenes1[[name]], dtype=str)
col_2 = np.array(topGenes2[[name]], dtype=str)
# Here np.nozero will return a tuple of 2 array corresponding the first
# and the second dimension while the value of second dimension will
# always be 0. So the first dimension's last location+1 will be the length
# of nonzero arrays and that it's just the location of the first zero
# element
length_nonzero_1 = np.nonzero(col_1)[0][-1] + 1
length_nonzero_2 = np.nonzero(col_2)[0][-1] + 1
# np.nonzero will not detect '0.0' as zero type
if all(col_1 == "0.0"):
length_nonzero_1 = 0
if all(col_2 == "0.0"):
length_nonzero_2 = 0
length_min = min(length_nonzero_1, length_nonzero_2)
# Check if at least one of the classes contains only zero and avoid the error
if length_min == 0 and length_nonzero_1 == length_nonzero_2:
out.append(
"* Warning: No feature is selected for both two class\n Skipped for this class"
)
continue
elif length_min == 0 and length_nonzero_1 > 0:
out.append(
"* Warning: No feature is selected for this class in TopGenes2\n"
)
out.append(
"* All {} elements are included only in topGenes1:\n".format(
min(length_nonzero_1, nbr_limit)
)
)
for k in range(min(length_nonzero_1, nbr_limit)):
if normW1 is None:
out.append(" (%s)\n" % (str(col_1[k, 0])))
else:
out.append(
" (%s, %s)\n" % (str(col_1[k, 0]), normW1[[name]].iloc[k, 0])
)
continue
elif length_min == 0 and length_nonzero_2 > 0:
out.append(
"* Warning: No feature is selected for this class in TopGenes1\n"
)
out.append(
"* All {} elements are included only in topGenes2:\n".format(
min(length_nonzero_2, nbr_limit)
)
)
for k in range(min(length_nonzero_2, nbr_limit)):
if normW2 is None:
out.append(" (%s)\n" % (str(col_2[k, 0])))
else:
out.append(
" (%s, %s)\n" % (str(col_2[k, 0]), normW2[[name]].iloc[k, 0])
)
continue
if length_min < nbr_limit:
length = length_min
out.append(
"* Warning: In this column, the 1st topGenes has {} nozero elements\n* while the 2nd one has {} nonzero elements\n".format(
length_nonzero_1, length_nonzero_2
)
)
out.append("* So only first %d elements are compared\n\n" % length_min)
else:
length = nbr_limit
set_1 = col_1[0:length]
set_2 = col_2[0:length]
set_common = np.intersect1d(set_1, set_2) # Have in common
set_o1 = np.setdiff1d(set_1, set_2) # Exclusively in topGenes1
set_o2 = np.setdiff1d(set_2, set_1) # Exclusively in topGenes2
lc = len(set_common)
# print exclusively in topGenes1
out.append(
"Included exclusively in first topGenes: {} elements in total.\n".format(
length - lc
)
)
if length - lc > 0:
if normW1 is None:
out.append("Details:(Name)\n")
else:
out.append("Details:(Name,Weight)\n")
idx_i, idx_j = np.where(topGenes1[[name]].isin(set_o1))
for i, j in zip(idx_i, idx_j):
if normW1 is None:
out.append(" (%s)\n" % str(set_1[i, j]))
else:
out.append(
" (%s, %s)\n"
% (str(set_1[i, j]), str(normW1[[name]].iloc[i, j]))
)
out.append("\nNumber of elements in common:{}\n".format(lc))
# print exclusively in topGenes1
out.append(
"\nIncluded exclusively in second topGenes: {} elements in total.\n".format(
length - lc
)
)
if length - lc > 0:
if normW2 is None:
out.append("Details:(Name)\n")
else:
out.append("Details:(Name,Weight)\n")
idx_i, idx_j = np.where(topGenes2[[name]].isin(set_o2))
for i, j in zip(idx_i, idx_j):
if normW2 is None:
out.append(" (%s)\n" % str(set_2[i, j]))
else:
out.append(
" (%s, %s)\n"
% (str(set_2[i, j]), str(normW2[[name]].iloc[i, j]))
)
out.append("{:-<40}\n".format(""))
out = "".join(out)
if printOut == True:
print(out)
return out
def heatmap_classification(
Ytest,
YR,
clusternames,
rotate=45,
draw_fig=False,
save_fig=False,
func_tag=None,
outputPath="../results/",
):
"""
#=====================================================
# It takes the predicted labels (Ytest), true labels (YR)
# and a list of the names of clusters (clusternames)
# as input and provide the heatmap matrix as the output
#=====================================================
"""
k = len(np.unique(YR)) # If we need to automatically find a k
Heatmap_matrix = np.zeros((k, k))
for i in np.arange(k) + 1:
for j in np.arange(k) + 1:
a = np.where(
Ytest[YR == i] == j, 1, 0
).sum() # number Ytest ==j where YR==i
b = np.where(YR == i, 1, 0).sum()
Heatmap_matrix[i - 1, j - 1] = a / b
# Plotting
if draw_fig == True:
plt.figure(figsize=(10, 6))
annot = False
if k > 10:
annot = False
if clusternames is not None:
axes = sns.heatmap(
Heatmap_matrix,
cmap="jet",
annot=annot,
fmt=".2f",
xticklabels=clusternames,
yticklabels=clusternames,
)
else:
axes = sns.heatmap(Heatmap_matrix, cmap="jet", annot=annot, fmt=".2f")
axes.set_xlabel("Predicted true positive", fontsize=14)
axes.set_ylabel("Ground true", fontsize=14)
axes.tick_params(labelsize=7)
plt.xticks(rotation=rotate)
axes.set_title("Heatmap of confusion Matrix", fontsize=14)
plt.tight_layout()
if save_fig == True:
plt.savefig(
"{}{}_Heatmap_of_confusion_Matrix.png".format(outputPath, func_tag)
)
return Heatmap_matrix
def heatmap_normW(
normW,
clusternames=None,
nbr_l=10,
rotate=45,
draw_fig=False,
save_fig=False,
func_tag=None,
outputPath="../results/",
):
"""
#=====================================================
# It takes the predicted labels (Ytest), true labels (YR)
# and the number of clusters (k) as input and provide the
# heatmap matrix as the output
#=====================================================
"""
A = np.abs(normW)
AN = A / A[0, :]
if normW.shape[0] < nbr_l:
nbr_l = normW.shape[0]
ANR = AN[0:nbr_l, :]
annot = False
if draw_fig == True:
plt.figure(figsize=(10, 6))
# axes2=sns.heatmap(ANR,cmap='jet',annot=annot,fmt='.3f')
if clusternames is None:
axes2 = sns.heatmap(
ANR,
cmap="jet",
annot=annot,
fmt=".3f",
yticklabels=np.linspace(1, nbr_l, num=nbr_l, endpoint=True, dtype=int),
)
else:
axes2 = sns.heatmap(
ANR,
cmap="jet",
annot=annot,
fmt=".3f",
xticklabels=clusternames,
yticklabels=np.linspace(1, nbr_l, num=nbr_l, endpoint=True, dtype=int),
)
plt.xticks(rotation=rotate)
axes2.set_ylabel("Features", fontsize=14)
axes2.set_xlabel("Clusters", fontsize=14)
axes2.tick_params(labelsize=7)
axes2.set_title("Heatmap of Matrix W", fontsize=14)
plt.tight_layout()
if save_fig == True:
plt.savefig("{}{}_Heatmap_of_signature.png".format(outputPath, func_tag))
return ANR
def drop_cells_with_ID(X, Y, ID, n_fold):
"""
# ====================================================================
# This function will detect whether the size of the first dimension of
# X is divisible by n_fold. If not, it will remove the n_diff rows from
# the biggest class(with the largest size in Y) where n_diff=len(Y)%n_fold
#
# ---- Input
# X : The data
# Y : The label
# n_fold : The number of fold
# --- Output
# X_new, Y_new : The new data and the new label
# =====================================================================
"""
m, d = X.shape
if m % n_fold == 0:
return X, Y, ID
n_diff = m % n_fold
# choose in the biggest class to delete
# Find the biggest class
lst_count = []
for i in np.unique(Y):
lst_count.append(np.where(Y == i, 1, 0).sum())
ind_max = np.unique(Y)[np.argmax(lst_count)]
lst_inds = np.where(Y == ind_max)[0]
# Delete n_diff elements in the biggest class
lst_del = np.random.choice(lst_inds, n_diff)
X_new = np.delete(X, lst_del, 0)
Y_new = np.delete(Y, lst_del, 0)
ID_new = np.delete(ID, lst_del, 0)
return X_new, Y_new, ID_new
def drop_cells(X, Y, n_fold):
"""
# ====================================================================
# This function will detect whether the size of the first dimension of
# X is divisible by n_fold. If not, it will remove the n_diff rows from
# the biggest class(with the largest size in Y) where n_diff=len(Y)%n_fold
#
# ---- Input
# X : The data
# Y : The label
# n_fold : The number of fold
# --- Output
# X_new, Y_new : The new data and the new label
# =====================================================================
"""
m, d = X.shape
if m % n_fold == 0:
return X, Y
n_diff = m % n_fold
# choose in the biggest class to delete
# Find the biggest class
lst_count = []
for i in np.unique(Y):
lst_count.append(np.where(Y == i, 1, 0).sum())
ind_max = np.unique(Y)[np.argmax(lst_count)]
lst_inds = np.where(Y == ind_max)[0]
# Delete n_diff elements in the biggest class
lst_del = np.random.choice(lst_inds, n_diff)
X_new = np.delete(X, lst_del, 0)
Y_new = np.delete(Y, lst_del, 0)
return X_new, Y_new
# ===================== Algorithms =======================================
def FISTA_Primal(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# niter : The number of iterations
# gamma : The hyper parameter gamma
# eta : The eta to calculate the projection on l1 ball
# * isEpsilon is not used in the original file in Matlab
# --- Output
# w : The projection matrix
# mu : The centers
# nbGenes_fin : The number of genes of the final step
# loss : The loss for each iteration
# ====================================================================
"""
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = ["niter", "eta", "gamma"] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta = param["eta"]
gamma = param["gamma"]
n, d = X.shape
# === With class2indicator():
# Y = class2indicator(YR,k)
# === With Onehotencoder:
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
loss = np.zeros(niter)
XtX = np.matmul(X.T, X)
XtY = np.matmul(X.T, Y)
w_old = np.ones((d, k))
w_loc = w_old
t_old = 1
for i in range(niter):
grad_w = np.matmul(XtX, w_loc) - XtY
# gradient step
V = w_loc - gamma * grad_w
V = np.reshape(V, d * k)
# Projection on the l1 ball
V = proj_l1ball(V, eta)
# Reshape back
w_new = np.reshape(V, (d, k))
# Chambolle method
t_new = (i + 6) / 4 # or i+6 since pyhton starts from 0 ?
w_loc_new = w_new + ((t_old - 1) / t_new) * (w_new - w_old)
w_old = w_new
w_loc = w_loc_new
t_old = t_new
loss[i] = np.linalg.norm(Y - np.matmul(X, w_loc), "fro") ** 2
# end iteratons
w = w_loc
mu = centroids(np.matmul(X, w), YR, k)
nbGenes_fin = nb_Genes(w)[0]
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss
def primal_dual_L1N(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# param : A type dict paramter which must have keys:
# 'niter', 'eta', 'tau', 'rho','sigma', 'beta', 'tau2' and 'delta'
# Normally speaking:
# (The default value for beta is 0.25.)
# (IF not given, the value of the 'tau2' will be calculated by
# tau2 = 0.25*(1/(np.sqrt(m)*normY)). Note that this normY is
# the 2-norm of the OneHotEncode of the YR given.)
# (Default value of the 'delta' is 1.0)
# --- Output
# w : The projection matrix of size (d,k)
# mu : The centers of classes
# nbGenes_fin : The number of genes of the final result
# loss : The loss for each iteration
# Z : The dual matrix of size (m,k)
# =====================================================================
"""
m, d = X.shape
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
# normY = np.linalg.norm(Y,2)
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = [
"niter",
"eta",
"tau",
"rho",
"sigma",
"delta",
"tau2",
"beta",
] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta = param["eta"]
tau = param["tau"]
rho = param["rho"]
sigma = param["sigma"]
delta = param["delta"]
tau2 = param["tau2"]
# beta = param['beta']
# === END check block ===
# Initialization
w_old = np.ones((d, k))
Z_old = np.ones((m, k))
mu_old = np.eye(k, k)
Ik = np.eye(k, k)
loss = np.zeros(niter)
# Main Block
for i in range(niter):
V = w_old + tau * np.matmul(X.T, Z_old)
# Reshape
V = np.reshape(V, d * k)
V = proj_l1ball(V, eta)
V[np.where(np.abs(V) < 0.001)] = 0
# Reshape back
w_new = np.reshape(V, (d, k))
# no gamma here
# w_new = w_new + gamma*(w_new - w_old) =>
w = 2 * w_new - w_old
mu_new = (mu_old + rho * tau2 * Ik - tau2 * np.matmul(Y.T, Z_old)) / (
1 + tau2 * rho
)
# mu = mu_new + gamma*(mu_new - mu_old) =>
mu = 2 * mu_new - mu_old
Z = (Z_old + sigma * (np.matmul(Y, mu) - np.matmul(X, w))) / (1 + sigma * delta)
Z_new = np.maximum(np.minimum(Z, 1), -1)
mu_old = mu_new
w_old = w_new
Z_old = Z_new
loss[i] = np.linalg.norm(
np.matmul(Y, mu_new) - np.matmul(X, w_new), 1
) + 0.5 * (np.linalg.norm(Ik - mu_new, "fro") ** 2)
# End loop
Z = Z_old
w = w_new
mu = mu_new
nbGenes_fin = nb_Genes(w)[0]
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss, Z
def primal_dual_Nuclear(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# param : A type dict paramter which must have keys:
# 'niter', 'eta_star', 'tau', 'rho','sigma', 'tau2','delta'
# and 'gamma'
# Normally speaking:
# (The default value for beta is 0.25.)
# (IF not given, the value of the 'tau2' will be calculated by
# tau2 = 0.25*(1/(np.sqrt(m)*normY)). Note that this normY is
# the 2-norm of the OneHotEncode of the YR given.)
# (Default value of the 'delta' is 1.0)
# --- Output
# w : The projection matrix of size (d,k)
# mu : The centers of classes
# nbGenes_fin : The number of genes of the final result
# loss : The loss for each iteration
# Z : The dual matrix of size (m,k)
# =====================================================================
"""
m, d = X.shape
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = [
"niter",
"eta_star",
"tau",
"rho",
"sigma",
"tau2",
"beta",
] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta_star = param["eta_star"]
delta = param["delta"]
tau = param["tau"]
rho = param["rho"]
sigma = param["sigma"]
tau2 = param["tau2"]
# === END check block ===
# Initialization
w_old = np.ones((d, k))
Z_old = np.ones((m, k))
mu_old = np.eye(k, k)
Ik = np.eye(k, k)
loss = np.zeros(niter)
# Main Block
for i in range(niter):
V = w_old + tau * np.matmul(X.T, Z_old)
# Nuclear constraint
L, S0, R = np.linalg.svd(V, full_matrices=False)
norm_nuclear = S0.sum()
vs1 = proj_l1ball(S0.reshape((-1,)), eta_star)
S1 = vs1.reshape(S0.shape)
w = np.matmul(L, S1[..., None] * R)
w = 2 * w - w_old
mu_new = (mu_old + rho * tau2 * Ik - tau2 * np.matmul(Y.T, Z_old)) / (
1 + tau2 * rho
)
mu = 2 * mu_new - mu_old
Z = (Z_old + sigma * (np.matmul(Y, mu) - np.matmul(X, w))) / (1 + sigma * delta)
Z_new = np.maximum(np.minimum(Z, 1), -1)
mu_old = mu_new
w_old = w
Z_old = Z_new
loss[i] = np.linalg.norm(np.matmul(Y, mu_new) - np.matmul(X, w), 1) + 0.5 * (
np.linalg.norm(Ik - mu_new, "fro") ** 2
)
# End loop
Z = Z_old
mu = mu_new
nbGenes_fin, _ = nb_Genes(w)
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss, Z
# ================================== Part 2 ====================================
# ===================== Base Launch functions (scripts) ========================
def basic_run_eta(
func_algo,
func_predict,
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta=None,
eta_star=None,
gamma=1,
nfold=4,
rng=1,
showres=True,
keepfig=False,
saveres=False,
outputPath="../results/",
):
"""
# =====================================================================
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# - func_algo (necessary) : The function of the algorithm
# - func_predict (necessary) : The function to predict
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma, etc (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - showres (optional) : Boolean value. True if we want to show
# the results, plot the figures etc.
#
# - saveres (optional) : Boolean value. True to save the results
#
# - outputPath (optional) : String value. The output path.
#
# - Output:
# - mu : The centroids
# - nbm : Number of genes
# - accG : Global accuracy
# - loss : Loss for each iterations
# - W_mean : Mean weight matrix for all folds
# - timeElapsed : Time elapsed for one fold
# - (And the tables) : df_topGenes, df_normW, df_topG_normW,
# df_topGenes_mean, df_normW_mean,
# df_topG_normW_mean, df_acctest
# ======================================================================
"""
np.random.seed(rng) # reproducible
if not os.path.exists(outputPath): # make the directory if it does not exist
os.makedirs(outputPath)
n, d = X.shape
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
# Normalize the mean of datas (Deprecated)
# m = np.mean(X,axis=0)
# X = X-m
# normX = normest(X)
# X = X/normX
# YR = np.array(YR).reshape(-1,1)
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY ** 2
# Dropping the cells randomly if the n%d is not zero
# For more details please see instructions in drop_cells
X, YR = drop_cells(X, YR, nfold)
param = {}
param["niter"] = niter
param["rho"] = rho
param["tau"] = tau
tau2 = beta * (1 / (np.sqrt(n) * normY))
param["tau2"] = tau2
eps = 1 / (1 + tau2 * rho * 0.25)
sigma = 1.0 / (tau + (tau2 * eps * normY2)) # Converge until 2.6 for L1Nel
param["sigma"] = sigma
param["delta"] = delta
param["beta"] = beta
param["eta"] = eta
param["eta_star"] = eta_star
param["gamma"] = gamma
# Initialization
nbG = np.zeros(nfold, dtype=int) # Number of genes for each fold
accuracy_train = np.zeros((nfold, k + 1))
accuracy_test = np.zeros((nfold, k + 1))
W0 = np.zeros((d, k, nfold)) # w in each fold
mu0 = np.zeros((k, k, nfold))
W_mean = np.zeros((d, k))
# Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
# Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold, niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
print("\nStarts trainning for")
print("{:>6}:{:<6}".format("niter", niter))
if "fista" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "or" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "_l2" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
elif "nuclear" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta_star", eta_star))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
else:
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
Y_PDS = np.zeros(YR.shape)
meanclassi = np.zeros(nfold)
kf = KFold(n_splits=nfold, random_state=rng, shuffle=True)
w_all, mu_all, nbGenes_all, loss_all = func_algo(X, YR, k, param)[0:4]
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
print("-> {} classification...".format(func_algo.__name__))
# ========== Training =========
Xtrain = X[train_ind]
Xtest = X[test_ind]
Ytrain = YR[train_ind]
Ytest = YR[test_ind]
startTime = time.perf_counter()
w, mu, nbGenes, loss = func_algo(Xtrain, Ytrain, k, param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
print("-> Completed.\n-> Time Elapsed:{:.4}s".format(timeElapsed))
W0[:, :, i] = w
mu0[:, :, i] = mu
# Z0[:,:,i] = Z
loss_iter0[i, :] = loss
# ========== Accuracy =========
Ytrain_pred = func_predict(Xtrain, w, mu)
Ytest_pred = func_predict(Xtest, w, mu)
accuracy_train[i, 0], accuracy_train[i, 1 : k + 1] = compute_accuracy(
Ytrain, Ytrain_pred, k
)
accuracy_test[i, 0], accuracy_test[i, 1 : k + 1] = compute_accuracy(
Ytest, Ytest_pred, k
)
meanclassi[i] = np.mean(accuracy_test[i, 1 : k + 1])
nbG[i] = nbGenes
Y_PDS[test_ind] = Ytest_pred
print("{:-<30}".format(""))
# end kfold loop
nbm = int(nbG.mean())
accG = np.mean(accuracy_test[:, 0], axis=0)
Meanclass = meanclassi.mean()
W_mean = np.mean(W0, axis=2)
mu_mean = np.mean(mu0, axis=2)
# Z_mean= np.mean(Z0,axis=2)
normfro = np.linalg.norm(w, "fro")
print("Training step ends.\n")
# Class size
Ctab = []
size_class = np.zeros(k) # Size of each class (real)
size_class_est = np.zeros(k) # Size of each class (estimated)
for j in range(k):
size_class[j] = (YR == (j + 1)).sum()
size_class_est[j] = (Y_PDS == (j + 1)).sum()
Ctab.append("Class {}".format(j + 1))
df_szclass = pd.DataFrame(size_class, index=Ctab, columns=["Class Size"])
df_szclass_est = pd.DataFrame(size_class_est, index=Ctab, columns=["Class Size"])
# Data accuracy
accuracy_train = np.vstack((accuracy_train, np.mean(accuracy_train, axis=0)))
accuracy_test = np.vstack((accuracy_test, np.mean(accuracy_test, axis=0)))
ind_df = []
for i_fold in range(nfold):
ind_df.append("Fold {}".format(i_fold + 1))
ind_df.append("Mean")
columns = ["Global"]
if clusternames is None:
columns += Ctab
else:
columns += clusternames
df_accTrain = pd.DataFrame(accuracy_train, index=ind_df, columns=columns)
df_acctest = pd.DataFrame(accuracy_test, index=ind_df, columns=columns)
# Feature selection
print("Selecting features from whole dataset...", end="")
w, mu, nbGenes, loss = func_algo(X, YR, k, param)[0:4]
topGenes, normW = select_feature_w(w, genenames)
topGenes_mean, normW_mean = select_feature_w(W_mean, genenames)
# Mean of each fold
df_topGenes_mean = pd.DataFrame(topGenes_mean, columns=clusternames)
df_normW_mean = pd.DataFrame(normW_mean, columns=clusternames)
df_topG_normW_mean = merge_topGene_norm(topGenes_mean, normW_mean, clusternames)
# All data
df_topGenes = pd.DataFrame(topGenes, columns=clusternames)
df_normW = pd.DataFrame(normW, columns=clusternames)
df_topG_normW = merge_topGene_norm(topGenes, normW, clusternames)
print("Completed.\n")
# Two heatmaps
M_heatmap_classification = heatmap_classification(
Y_PDS, YR, clusternames, rotate=60
)
M_heatmap_signature = heatmap_normW(normW, clusternames, nbr_l=30, rotate=60)
# Results
if showres == True:
print("Size class (real):")
print(df_szclass)
print("\nSize class (estimated):")
print(df_szclass_est)
print("\nAccuracy Train")
print(df_accTrain)
print("\nAccuracy Test")
print(df_acctest)
if keepfig == False:
plt.close("all")
fig_lossIter = plt.figure(figsize=(8, 6))
plt.plot(np.arange(niter, dtype=int) + 1, loss)
msg_eta = "$\eta$:%d" % eta if eta is not None else ""
msg_etaS = "$\eta*$:%d" % eta_star if eta_star is not None else ""
plt.title(
"loss for each iteration {} {}\n ({})".format(
msg_eta, msg_etaS, func_algo.__name__
),
fontsize=18,
)
plt.ylabel("Loss", fontsize=18)
plt.xlabel("Iteration", fontsize=18)
plt.xticks(np.linspace(1, niter, num=6, endpoint=True, dtype=int))
plt.xlim(left=1, right=niter)
plt.ylim((0, 1))
# Saving Result
if saveres == True:
# define two nametags
nametag_eta = "_eta-%d" % eta if eta is not None else ""
nametag_etaS = "_etaStar-%d" % eta_star if eta_star is not None else ""
# save loss
filename_loss = "loss_{}_beta-{}_delta-{}{}{}_niter-{}.txt".format(
func_algo.__name__, beta, delta, nametag_eta, nametag_etaS, niter
)
np.savetxt(outputPath + filename_loss, loss)
# define function name tag for two heatmaps
func_tag = func_algo.__name__ + nametag_eta + nametag_etaS
# Save heatmaps
filename_heat = "{}{}_Heatmap_of_confusion_Matrix.npy".format(
outputPath, func_tag
)
np.save(filename_heat, M_heatmap_classification)
filename_heat = "{}{}_Heatmap_of_signature_Matrix.npy".format(
outputPath, func_tag
)
np.save(filename_heat, M_heatmap_signature)
df_acctest.to_csv(
"{}{}{}{}_AccuracyTest.csv".format(
outputPath, func_algo.__name__, nametag_eta, nametag_etaS
),
sep=";",
)
df_topG_normW.to_csv(
"{}{}{}{}_TopGenesAndNormW.csv".format(
outputPath, func_algo.__name__, nametag_eta, nametag_etaS
),
sep=";",
)
# Other possiblilities to save
# fig_lossIter.savefig('{}{}{}{}_niter-{}_loss_iters.png'.format(outputPath,func_algo.__name__,nametag_eta,nametag_etaS,niter))
# All data
# df_topGenes.to_csv('{}{}_TopGenes.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_normW.to_csv('{}{}_NormW.csv'.format(outputPath,func_algo.__name__),sep=';')
# Mean of each fold
# df_topGenes_mean.to_csv('{}{}_TopGenes_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_normW_mean.to_csv('{}{}_NormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_topG_normW_mean.to_csv('{}{}_TopGenesAndNormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
return (
mu_mean,
nbm,
accG,
loss,
W_mean,
timeElapsed,
df_topGenes,
df_normW,
df_topG_normW,
df_topGenes_mean,
df_normW_mean,
df_topG_normW_mean,
df_acctest,
w_all,
)
# ===================== ========================================================
def getPredLabel(Ypred):
for i in range(Ypred.shape[0]):
if Ypred[i] > 1.5:
Ypred[i] = 2
if Ypred[i] <= 1.5:
Ypred[i] = 1
return Ypred
# =====================Functions used to compare different algorithms========================================================
def getCoefs(alg, model):
if alg == "RF":
coef = model.feature_importances_
if alg == "svm":
coef = model.coef_.transpose()
if alg == "plsda":
coef = model.coef_
return coef
# =====================Functions used to compute the ranked features and their weights=======================
def TopGenbinary(w, feature_names):
n = len(w)
difference = np.zeros(n)
for i in range(n):
difference[i] = w[i][0] - w[i][1]
df1 = pd.DataFrame(feature_names, columns=["pd"])
df1["weights"] = difference
# =====Sort the difference based on the absolute value=========
df1["sort_helper"] = df1["weights"].abs()
df2 = df1.sort_values(by="sort_helper", ascending=False).drop("sort_helper", axis=1)
# ==== end_sort=============
return df2
def rankFeatureHelper(alg, coef, feature_names):
df1 = pd.DataFrame(feature_names, columns=[alg])
df1["weights"] = coef
df1["sort_helper"] = df1["weights"].abs()
df2 = df1.sort_values(by="sort_helper", ascending=False).drop("sort_helper", axis=1)
return df2
def rankFeatures(X, Yr, algList, feature_names):
# flag=0
featureList = []
for alg in algList:
if alg == "svm":
clf = SVC(probability=True, kernel="linear")
model = clf.fit(X, Yr.ravel())
coef = model.coef_.transpose()
df_rankFeature = rankFeatureHelper(alg, coef, feature_names)
featureList.append(df_rankFeature)
if alg == "RF":
clf = RandomForestClassifier(n_estimators=400, random_state=10, max_depth=3)
model = clf.fit(X, Yr.ravel())
coef = model.feature_importances_
df_rankFeature = rankFeatureHelper(alg, coef, feature_names)
featureList.append(df_rankFeature)
if alg == "plsda":
clf = PLSRegression(n_components=4, scale=False)
model = clf.fit(X, Yr.ravel())
coef = model.coef_
df_rankFeature = rankFeatureHelper(alg, coef, feature_names)
featureList.append(df_rankFeature)
# if flag == 0:
# df_rankFeature = TopGenbinary(coef, feature_names)
# flag =1
# else:
# df_feature = TopGenbinary(coef, feature_names)
# df_rankFeature
return featureList
# ===============================Compute the \rho==============================
def basic_run_eta_molecule(
X,
YR,
ID,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta=500,
gamma=1,
nfold=4,
random_seed=1,
):
"""
# =====================================================================
# This function is used to compute the df_confidence
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# The function of the algorithm: primal_dual_L1N
# The function to predict: predict_L1_molecule
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - Output:
# - Yprediction : list of Predicted labels
# ======================================================================
"""
np.random.seed(random_seed) # reproducible
n, d = X.shape
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY ** 2
# Dropping the cells randomly if the n%d is not zero
# See more details in drop_cells
X, YR, Ident = drop_cells_with_ID(X, YR, ID, nfold)
dico = dict(list(enumerate(Ident)))
ref = pd.DataFrame.from_dict(dico, orient="index")
param = {}
param["niter"] = niter
param["rho"] = rho
param["tau"] = tau
tau2 = beta * (1 / (np.sqrt(n) * normY))
param["tau2"] = tau2
eps = 1 / (1 + tau2 * rho * 0.25)
sigma = 1.0 / (tau + (tau2 * eps * normY2)) # Converge until 2.6 for L1Nel
param["sigma"] = sigma
param["delta"] = delta
param["beta"] = beta
param["eta"] = eta
param["gamma"] = gamma
# Initialization
nbG = np.zeros(nfold, dtype=int) # Number of genes for each fold
W0 = np.zeros((d, k, nfold)) # w in each fold
mu0 = np.zeros((k, k, nfold))
# Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
# Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold, niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
print("\nStarts trainning for")
print("{:>6}:{:<6}".format("niter", niter))
print("{:>6}:{:<6}".format("eta", eta))
if "fista" in primal_dual_L1N.__name__.lower():
print("{:>6}:{:<6}".format("gamma", delta))
elif "or" in primal_dual_L1N.__name__.lower():
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "_l2" in primal_dual_L1N.__name__.lower():
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
else:
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
Yprediction = []
Confidence = []
# accuracy_train = np.zeros((nfold,k+1))
# accuracy_test = np.zeros((nfold,k+1))
ID = []
Ident = []
kf = KFold(n_splits=nfold, random_state=random_seed, shuffle=True)
w_all, mu_all, nbGenes_all, loss_all = primal_dual_L1N(X, YR, k, param)[0:4]
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
print("-> {} classification...".format(primal_dual_L1N.__name__))
# ========== Training =========
dico = dico
Xtrain = X[train_ind]
Ytrain = YR[train_ind]
Xtest = X[test_ind]
startTime = time.perf_counter()
w, mu, nbGenes, loss = primal_dual_L1N(Xtrain, Ytrain, k, param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
print("-> Completed.\n-> Time Elapsed:{:.4}s".format(timeElapsed))
W0[:, :, i] = w
mu0[:, :, i] = mu
loss_iter0[i, :] = loss
# ========== Prediction =========
Ypred, conf = predict_L1_molecule(Xtest, w, mu)
Yprediction.append(Ypred)
Confidence.append(conf)
ID.append(test_ind)
Ident.append(ref.iloc[test_ind])
nbG[i] = nbGenes
print("{:-<30}".format(""))
# end kfold loop
return Yprediction, Confidence, ID, Ident, YR, ref
# ===================== Base Launch functions (scripts) ========================
def basic_run_eta_compare(
func_algo,
func_predict,
X,
YR,
k,
alglist,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta=None,
eta_star=None,
gamma=1,
nfold=4,
rng=1,
showres=False,
keepfig=False,
saveres=False,
outputPath="../results/",
):
"""
# =====================================================================
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# - func_algo (necessary) : The function of the algorithm
# - func_predict (necessary) : The function to predict
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma, etc (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - showres (optional) : Boolean value. True if we want to show
# the results, plot the figures etc.
#
# - saveres (optional) : Boolean value. True to save the results
#
# - alglist (optional) : The seed to control the random funcion
#
# - outputPath (optional) : String value. The output path.
#
#
# - Output:
# - mu : The centroids
# - nbm : Number of genes
# - accG : Global accuracy
# - loss : Loss for each iterations
# - W_mean : Mean weight matrix for all folds
# - timeElapsed : Time elapsed for one fold
# - (And the tables) : df_topGenes, df_normW, df_topG_normW,
# df_topGenes_mean, df_normW_mean,
# df_topG_normW_mean, df_acctest
# ======================================================================
"""
np.random.seed(rng) # reproducible
if not os.path.exists(outputPath): # make the directory if it does not exist
os.makedirs(outputPath)
n, d = X.shape
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
# Normalize the mean of datas (Deprecated)
# m = np.mean(X,axis=0)
# X = X-m
# normX = normest(X)
# X = X/normX
# YR = np.array(YR).reshape(-1,1)
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY ** 2
# Dropping the cells randomly if the n%d is not zero
# For more details please see instructions in drop_cells
X, YR = drop_cells(X, YR, nfold)
param = {}
param["niter"] = niter
param["rho"] = rho
param["tau"] = tau
tau2 = beta * (1 / (np.sqrt(n) * normY))
param["tau2"] = tau2
eps = 1 / (1 + tau2 * rho * 0.25)
sigma = 1.0 / (tau + (tau2 * eps * normY2)) # Converge until 2.6 for L1Nel
param["sigma"] = sigma
param["delta"] = delta
param["beta"] = beta
param["eta"] = eta
param["eta_star"] = eta_star
param["gamma"] = gamma
# Initialization
nbG = np.zeros(nfold, dtype=int) # Number of genes for each fold
accuracy_train = np.zeros((nfold, k + 1))
accuracy_test = np.zeros((nfold, k + 1))
auc_train = np.zeros((nfold))
auc_test = np.zeros((nfold))
sil_train = np.zeros((nfold))
W0 = np.zeros((d, k, nfold)) # w in each fold
mu0 = np.zeros((k, k, nfold))
W_mean = np.zeros((d, k))
# Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
# Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold, niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
# print('\nStarts trainning for')
# print('{:>6}:{:<6}'.format('niter',niter))
Y_PDS = np.zeros(YR.shape)
meanclassi = np.zeros(nfold)
kf = KFold(n_splits=nfold, random_state=rng, shuffle=True)
numalg = len(alglist)
accuracy_train_comp = np.zeros((nfold, numalg))
accuracy_test_comp = np.zeros((nfold, numalg))
AUC_train_comp = np.zeros((nfold, numalg * 4))
AUC_test_comp = np.zeros((nfold, numalg * 4))
timeElapsedMatrix = np.zeros((nfold, numalg + 1))
w_all, mu_all, nbGenes_all, loss_all = func_algo(X, YR, k, param)[0:4]
# 4-flod cross validation
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
# ========== Training =========
Xtrain = X[train_ind]
Xtest = X[test_ind]
Ytrain = YR[train_ind]
Ytest = YR[test_ind]
Ytr = pd.get_dummies(Ytrain.ravel()).values.T.T
Yte = pd.get_dummies(Ytest.ravel())
startTime = time.perf_counter()
w, mu, nbGenes, loss = func_algo(Xtrain, Ytrain, k, param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
timeElapsedMatrix[i][numalg] = timeElapsed
print("-> Time Elapsed:{:.4}s".format(timeElapsed))
W0[:, :, i] = w
mu0[:, :, i] = mu
# Z0[:,:,i] = Z
loss_iter0[i, :] = loss
# ========== Accuracy =========
Ytrain_pred = func_predict(Xtrain, w, mu)
Ytest_pred = func_predict(Xtest, w, mu)
accuracy_train[i, 0], accuracy_train[i, 1 : k + 1] = compute_accuracy(
Ytrain, Ytrain_pred, k
)
accuracy_test[i, 0], accuracy_test[i, 1 : k + 1] = compute_accuracy(
Ytest, Ytest_pred, k
)
if (
np.unique(Ytest).shape[0] == 2
and np.unique(Ytest_pred.astype("int64")).shape[0] == 2
):
auc_test[i] = roc_auc_score(Ytest_pred.astype("int64"), Ytest)
auc_train[i] = roc_auc_score(Ytrain_pred.astype("int64"), Ytrain)
meanclassi[i] = np.mean(accuracy_test[i, 1 : k + 1])
nbG[i] = nbGenes
Y_PDS[test_ind] = Ytest_pred
# start loop of other algorithms' comparison
for j in range(numalg):
alg = alglist[j]
if alg == "svm":
tuned_parameters = [
{"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]},
{"kernel": ["linear"], "C": [1, 10, 100, 1000]},
]
clf = GridSearchCV(SVC(), tuned_parameters)
# clf = SVC(probability=True,kernel='linear')
if alg == "RF":
clf = RandomForestClassifier(
n_estimators=400, random_state=10, max_depth=3
)
if alg == "plsda":
clf = PLSRegression(n_components=4, scale=False)
# build the model
startTime = time.perf_counter()
# clf = OneVsRestClassifier(clf)
model = clf.fit(Xtrain, Ytrain.ravel())
# model = clf.fit(X,Ytr)
# if (alg == 'svm'):
# print(clf.best_params_)
endTime = time.perf_counter()
timeElapsedMatrix[i][j] = endTime - startTime
if k > 2:
Ypred_test = np.around(
model.predict(Xtest)
).ravel() # getPredLabel(model.predict(Xtest))
Ypred_train = np.around(
model.predict(Xtrain)
).ravel() # getPredLabel(model.predict(Xtrain))
else:
Ypred_test = getPredLabel(model.predict(Xtest))
Ypred_train = getPredLabel(model.predict(Xtrain))
accuracy_test_comp[i][j] = accuracy_score(Ypred_test.astype("int64"), Ytest)
accuracy_train_comp[i][j] = accuracy_score(
Ypred_train.astype("int64"), Ytrain
)
# print("sil = ", metrics.silhouette_score(model.x_scores_, Ypred_train) )
if alg == "plsda":
sil_train[i] = metrics.silhouette_score(model.x_scores_, Ypred_train)
if (
np.unique(Ytest).shape[0] == 2
and np.unique(Ypred_test.astype("int64")).shape[0] == 2
):
AUC_test_comp[i][j * 4] = roc_auc_score(
Ypred_test.astype("int64"), Ytest
)
AUC_train_comp[i][j * 4] = roc_auc_score(
Ypred_train.astype("int64"), Ytrain
)
# F1 precision recal
AUC_train_comp[i][
j * 4 + 1 : j * 4 + 4
] = metrics.precision_recall_fscore_support(
Ytrain, Ypred_train.astype("int64"), average="macro"
)[
:-1
]
AUC_test_comp[i][
j * 4 + 1 : j * 4 + 4
] = metrics.precision_recall_fscore_support(
Ytest, Ypred_test.astype("int64"), average="macro"
)[
:-1
]
# end kfold loop
nbm = int(nbG.mean())
accG = np.mean(accuracy_test[:, 0], axis=0)
Meanclass = meanclassi.mean()
W_mean = np.mean(W0, axis=2)
mu_mean = np.mean(mu0, axis=2)
# Z_mean= np.mean(Z0,axis=2)
normfro = np.linalg.norm(w, "fro")
# Class size
Ctab = []
size_class = np.zeros(k) # Size of each class (real)
size_class_est = np.zeros(k) # Size of each class (estimated)
for j in range(k):
size_class[j] = (YR == (j + 1)).sum()
size_class_est[j] = (Y_PDS == (j + 1)).sum()
Ctab.append("Class {}".format(j + 1))
df_szclass = pd.DataFrame(size_class, index=Ctab, columns=["Class Size"])
df_szclass_est = pd.DataFrame(size_class_est, index=Ctab, columns=["Class Size"])
# Data accuracy
accuracy_train = np.vstack((accuracy_train, np.mean(accuracy_train, axis=0)))
accuracy_test = np.vstack((accuracy_test, np.mean(accuracy_test, axis=0)))
# auc_train = np.vstack((auc_train,np.mean(auc_train,axis=0)))
# auc_test = np.vstack((auc_test,np.mean(auc_test,axis=0)))
ind_df = []
for i_fold in range(nfold):
ind_df.append("Fold {}".format(i_fold + 1))
ind_df.append("Mean")
columns = ["Global"]
if clusternames is None:
columns += Ctab
else:
columns += clusternames
df_accTrain = pd.DataFrame(accuracy_train, index=ind_df, columns=columns)
df_acctest = pd.DataFrame(accuracy_test, index=ind_df, columns=columns)
# Data accuracy1
ind_df_comp = []
for i_fold in range(nfold):
ind_df_comp.append("Fold {}".format(i_fold + 1))
df_comp = | pd.DataFrame(accuracy_test_comp, index=ind_df_comp, columns=alglist) | pandas.DataFrame |
import plotly.express as px
import pandas as pd
import sys
from functools import reduce
data = | pd.read_csv("../data/RKI_COVID19.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 14:00:18 2020
updated on Thu Oct 15 18:07:45 2020
@author: <NAME>
"""
#reproducability
from numpy.random import seed
seed(1)
import tensorflow as tf
tf.random.set_seed(1)
import numpy as np
from bayes_opt import BayesianOptimization
from bayes_opt.logger import JSONLogger
from bayes_opt.event import Events
from bayes_opt.util import load_logs
import os
import glob
import pandas as pd
import keras as ks
import datetime
from scipy import stats
from matplotlib import pyplot
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
def load_RM_GW_and_HYRAS_Data(i):
pathGW = "./GWData"
pathHYRAS = "./MeteoData"
pathconnect = "/"
GWData_list = glob.glob(pathGW+pathconnect+'*.csv');
Well_ID = GWData_list[i]
Well_ID = Well_ID.replace(pathGW+'\\', '')
Well_ID = Well_ID.replace('_GWdata.csv', '')
GWData = pd.read_csv(pathGW+pathconnect+Well_ID+'_GWdata.csv',
parse_dates=['Date'],index_col=0, dayfirst = True,
decimal = '.', sep=',')
HYRASData = pd.read_csv(pathHYRAS+pathconnect+Well_ID+'_HYRASdata.csv',
parse_dates=['Date'],index_col=0, dayfirst = True,
decimal = '.', sep=',')
data = pd.merge(GWData, HYRASData, how='inner', left_index = True, right_index = True)
#introduce GWL t-1 as additional Input
GWData_shift1 = GWData
GWData_shift1.index = GWData_shift1.index.shift(periods = 7, freq = 'D')
GWData_shift1.rename(columns={"GWL": "GWLt-1"},inplace=True)
data = pd.merge(data, GWData_shift1, how='inner', left_index = True, right_index = True)
return data, Well_ID
def split_data(data, GLOBAL_SETTINGS):
dataset = data[(data.index < GLOBAL_SETTINGS["test_start"])] #separate testdata
TrainingData = dataset[0:round(0.8 * len(dataset))]
StopData = dataset[round(0.8 * len(dataset))+1:round(0.9 * len(dataset))]
StopData_ext = dataset[round(0.8 * len(dataset))+1-GLOBAL_SETTINGS["seq_length"]:round(0.9 * len(dataset))] #extend data according to dealys/sequence length
OptData = dataset[round(0.9 * len(dataset))+1:]
OptData_ext = dataset[round(0.9 * len(dataset))+1-GLOBAL_SETTINGS["seq_length"]:] #extend data according to dealys/sequence length
TestData = data[(data.index >= GLOBAL_SETTINGS["test_start"]) & (data.index <= GLOBAL_SETTINGS["test_end"])] #Testdaten entsprechend dem angegebenen Testzeitraum
TestData_ext = pd.concat([dataset.iloc[-GLOBAL_SETTINGS["seq_length"]:], TestData], axis=0) # extend Testdata to be able to fill sequence later
return TrainingData, StopData, StopData_ext, OptData, OptData_ext, TestData, TestData_ext
def extract_PI1_testdata(data, GLOBAL_SETTINGS):
dataset = data[(data.index < GLOBAL_SETTINGS["test_start"])] #separate testdata
start = dataset.shape[0]-1
Testdata_PI1 = data['GWL'][start:-1]
return Testdata_PI1
# split a multivariate sequence into samples
def split_sequences(data, GLOBAL_SETTINGS):
X, y = list(), list()
for i in range(len(data)):
# find the end of this pattern
end_ix = i + GLOBAL_SETTINGS["seq_length"]
out_end_ix = end_ix + GLOBAL_SETTINGS["output_seq_length"]
# check if we are beyond the dataset
if out_end_ix > len(data):
break
# gather input and output parts of the pattern
seq_x, seq_y = data[i:end_ix, 1:], data[end_ix:out_end_ix, 0]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
def gwmodel(ini,GLOBAL_SETTINGS,X_train, Y_train,X_stop, Y_stop):
# define model
seed(ini)
tf.random.set_seed(ini)
model = ks.models.Sequential()
model.add(ks.layers.LSTM(GLOBAL_SETTINGS["hidden_size"], unit_forget_bias = True,
dropout = GLOBAL_SETTINGS["dropout"]))
model.add(ks.layers.Dense(GLOBAL_SETTINGS["output_seq_length"], activation='linear'))
optimizer = ks.optimizers.Adam(lr=GLOBAL_SETTINGS["learning_rate"], epsilon=10E-3, clipnorm=GLOBAL_SETTINGS["clip_norm"], clipvalue=GLOBAL_SETTINGS["clip_value"])
model.compile(loss='mse', optimizer=optimizer, metrics=['mse'])
# early stopping
es = ks.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience=5)
# fit network
model.fit(X_train, Y_train, validation_data=(X_stop, Y_stop), epochs=GLOBAL_SETTINGS["epochs"], verbose=1,
batch_size=GLOBAL_SETTINGS["batch_size"], callbacks=[es])
return model
# this is the optimizer function but checks only if paramters are integers and calls real optimizer function
def bayesOpt_function(pp,hiddensize, seqlength, batchsize,rH,T,Tsin):
#means basically conversion to rectangular function
hiddensize_int = int(hiddensize)
seqlength_int = int(seqlength)
batchsize_int = int(batchsize)
pp = int(pp)
rH = int(round(rH))
T = int(round(T))
Tsin = int(round(Tsin))
return bayesOpt_function_with_discrete_params(pp, hiddensize_int, seqlength_int, batchsize_int, rH, T, Tsin)
#this is the real optimizer function
def bayesOpt_function_with_discrete_params(pp,hiddensize_int, seqlength_int, batchsize_int, rH, T, Tsin):
assert type(hiddensize_int) == int
assert type(seqlength_int) == int
assert type(batchsize_int) == int
assert type(rH) == int
assert type(T) == int
assert type(Tsin) == int
#[...]
# fixed settings for all experiments
GLOBAL_SETTINGS = {
'pp': pp,
'batch_size': batchsize_int,
'clip_norm': True,
'clip_value': 1,
'dropout': 0,
'epochs': 30,
'hidden_size': hiddensize_int,
'learning_rate': 1e-3,
'seq_length': seqlength_int,
'output_seq_length': 12,
'test_start': pd.to_datetime('02012012', format='%d%m%Y'),
'test_end': pd.to_datetime('28122015', format='%d%m%Y')
}
## load data
data, Well_ID = load_RM_GW_and_HYRAS_Data(GLOBAL_SETTINGS["pp"])
# inputs
if rH == 0:
data = data.drop(columns='rH')
if T == 0:
data = data.drop(columns='T')
if Tsin == 0:
data = data.drop(columns='Tsin')
#scale data
scaler = MinMaxScaler(feature_range=(-1, 1))
# scaler = StandardScaler()
scaler_gwl = MinMaxScaler(feature_range=(-1, 1))
scaler_gwl.fit(pd.DataFrame(data['GWL']))
data_n = pd.DataFrame(scaler.fit_transform(data), index=data.index, columns=data.columns)
#split data
TrainingData, StopData, StopData_ext, OptData, OptData_ext, TestData, TestData_ext = split_data(data, GLOBAL_SETTINGS)
TrainingData_n, StopData_n, StopData_ext_n, OptData_n, OptData_ext_n, TestData_n, TestData_ext_n = split_data(data_n, GLOBAL_SETTINGS)
# #sequence data
# X_train, Y_train = to_supervised(TrainingData_n.values, GLOBAL_SETTINGS)
# X_stop, Y_stop = to_supervised(StopData_ext_n.values, GLOBAL_SETTINGS)
# X_opt, Y_opt = to_supervised(OptData_ext_n.values, GLOBAL_SETTINGS)
# X_test, Y_test = to_supervised(TestData_ext_n.values, GLOBAL_SETTINGS)
X_train, Y_train = split_sequences(TrainingData_n.values, GLOBAL_SETTINGS)
X_stop, Y_stop = split_sequences(StopData_ext_n.values, GLOBAL_SETTINGS)
X_opt, Y_opt = split_sequences(OptData_ext_n.values, GLOBAL_SETTINGS)
X_test, Y_test= split_sequences(TestData_ext_n.values, GLOBAL_SETTINGS)
#build and train model with idifferent initializations
inimax = 5
forecast_idx = OptData_ext_n.index.day < 8
forecast_idx = forecast_idx[GLOBAL_SETTINGS["seq_length"]:len(OptData_ext_n)-GLOBAL_SETTINGS["output_seq_length"]+1]
X_opt_reduced = X_opt[forecast_idx]
Y_opt_reduced = Y_opt[forecast_idx]
optresults_members = np.zeros((len(OptData_n), len(X_opt_reduced), inimax))
optresults_members[:] = np.nan
for ini in range(inimax):
print("BayesOpt-Iteration {} - ini-Ensemblemember {}".format(len(optimizer.res)+1, ini+1))
# f = open('log_full.txt', "a")
# print("BayesOpt-Iteration {} - ini-Ensemblemember {}".format(len(optimizer.res)+1, ini+1), file = f)
# f.close()
model = gwmodel(ini,GLOBAL_SETTINGS,X_train, Y_train, X_stop, Y_stop)
idx = 0
for i in range(0,len(X_opt_reduced)):
opt_sim_n = model.predict(X_opt_reduced[i,:,:].reshape(1,X_opt_reduced.shape[1],X_opt_reduced.shape[2]))
opt_sim = scaler_gwl.inverse_transform(opt_sim_n)
while forecast_idx[idx] == False:
idx = idx + 1
optresults_members[idx:idx+GLOBAL_SETTINGS["output_seq_length"], i, ini] = opt_sim.reshape(-1,)
idx = idx+1
opt_sim_median = np.nanmedian(optresults_members,axis = 2)
# get scores
errors = np.zeros((opt_sim_median.shape[1],6))
errors[:] = np.nan
for i in range(0,opt_sim_median.shape[1]):
sim = np.asarray(opt_sim_median[:,i].reshape(-1,1))
sim = sim[~np.isnan(sim)].reshape(-1,1)
obs = np.asarray(scaler_gwl.inverse_transform(Y_opt_reduced[i,:].reshape(-1,1)))
err = sim-obs
err_rel = (sim-obs)/(np.max(data['GWL'])-np.min(data['GWL']))
err_nash = obs - np.mean(np.asarray(data['GWL'][(data.index < GLOBAL_SETTINGS["test_start"])]))
errors[i,0] = 1 - ((np.sum(err ** 2)) / (np.sum((err_nash) ** 2))) #NSE
r = stats.linregress(sim[:,0], obs[:,0])
errors[i,1] = r.rvalue ** 2 #R2
errors[i,2] = np.sqrt(np.mean(err ** 2)) #RMSE
errors[i,3] = np.sqrt(np.mean(err_rel ** 2)) * 100 #rRMSE
errors[i,4] = np.mean(err) #Bias
errors[i,5] = np.mean(err_rel) * 100 #rBias
m_error = np.median(errors,axis = 0).reshape(1,-1)
print("total elapsed time = {}".format(datetime.datetime.now()-time1))
print("(pp) elapsed time = {}".format(datetime.datetime.now()-time_single))
# f = open('log_full.txt', "a")
# print("elapsed time = {}".format(datetime.datetime.now()-time1), file = f)
# f.close()
return m_error[0,0]+m_error[0,1]
def simulate_testset(pp,hiddensize_int, seqlength_int, batchsize_int, rH, T, Tsin):
# fixed settings for all experiments
GLOBAL_SETTINGS = {
'pp': pp,
'batch_size': batchsize_int,
'clip_norm': True,
'clip_value': 1,
'dropout': 0,
'epochs': 30,
'hidden_size': hiddensize_int,
'learning_rate': 1e-3,
'seq_length': seqlength_int,
'output_seq_length': 12,
'test_start': pd.to_datetime('02012012', format='%d%m%Y'),
'test_end': pd.to_datetime('28122015', format='%d%m%Y')
}
## load data
data, Well_ID = load_RM_GW_and_HYRAS_Data(GLOBAL_SETTINGS["pp"])
# inputs
if rH == 0:
data = data.drop(columns='rH')
if T == 0:
data = data.drop(columns='T')
if Tsin == 0:
data = data.drop(columns='Tsin')
#scale data
scaler = MinMaxScaler(feature_range=(-1, 1))
# scaler = StandardScaler()
scaler_gwl = MinMaxScaler(feature_range=(-1, 1))
scaler_gwl.fit( | pd.DataFrame(data['GWL']) | pandas.DataFrame |
import datetime
import os
from typing import List, Dict, Optional
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import pandas as pd
from pydantic import BaseModel
API_URL = os.environ.get("API_URL", None)
if API_URL is None:
raise ValueError("API_URL not known")
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["GET"],
allow_headers=["*"],
)
class CountyReport(BaseModel):
name: str
fips: Optional[int]
sources: Dict[str, List[str]] # {url: [var1, var2]}
class StateReport(CountyReport):
counties: List[CountyReport]
class Reports(BaseModel):
week_end: datetime.datetime
week_start: datetime.datetime
updates: List[StateReport]
def aggregate_week_fips_updates(x):
return pd.Series(
{
"state": x["state"].iloc[0],
"name": x["fullname"].iloc[0],
"sources": (
x.groupby("source").apply(lambda y: list(y["variable_name"])).to_dict()
),
}
)
def aggregate_week_updates(week_state):
out = {}
out["name"] = state = week_state.name[1]
state_only = week_state.query("name == @state")
out["sources"] = {}
for ss in list(state_only["sources"]):
out["sources"].update(ss)
non_state: pd.DataFrame = week_state.query("name != @state")
out["counties"] = []
def group_county_sources(c_df: pd.DataFrame):
c_out = {
"name": c_df["name"].iloc[0],
"fips": c_df["fips"].iloc[0],
"sources": {},
}
for c_src in list(c_df["sources"]):
c_out["sources"].update(c_src)
return c_out
out["counties"] = list(non_state.groupby("fips").apply(group_county_sources))
return pd.Series(out)
def flatten_by_date(x):
return x.drop(["start_date", "state"], axis=1).to_dict(orient="records")
def get_reports():
df = pd.read_json(f"{API_URL}/us_covid_variable_start_date")
df["start_date"] = | pd.to_datetime(df["start_date"]) | pandas.to_datetime |
from pippin.classifiers.classifier import Classifier
from pippin.config import mkdirs
from pippin.dataprep import DataPrep
from pippin.snana_fit import SNANALightCurveFit
from pippin.snana_sim import SNANASimulation
from pippin.task import Task
import pandas as pd
import os
from astropy.io import fits
import numpy as np
class Aggregator(Task):
def __init__(self, name, output_dir, dependencies, options):
super().__init__(name, output_dir, dependencies=dependencies)
self.passed = False
self.classifiers = [d for d in dependencies if isinstance(d, Classifier)]
self.output_df = os.path.join(self.output_dir, "merged.csv")
self.output_df_key = os.path.join(self.output_dir, "merged.key.gz")
self.id = "SNID"
self.type_name = "SNTYPE"
self.options = options
self.include_type = bool(options.get("INCLUDE_TYPE", False))
self.plot = bool(options.get("PLOT", False))
self.colours = ['#f95b4a', '#3d9fe2', '#ffa847', '#c4ef7a', '#e195e2', '#ced9ed', '#fff29b']
def _check_completion(self, squeue):
return Task.FINISHED_SUCCESS if self.passed else Task.FINISHED_FAILURE
def check_regenerate(self, force_refresh):
new_hash = self.get_hash_from_string(self.name + str(self.include_type) + str(self.plot))
old_hash = self.get_old_hash(quiet=True)
if new_hash != old_hash:
self.logger.info("Hash check failed, regenerating")
return new_hash
elif force_refresh:
self.logger.debug("Force refresh deteted")
return new_hash
else:
self.logger.info("Hash check passed, not rerunning")
return False
def get_underlying_sim_task(self):
check = []
for task in self.dependencies:
for t in task.dependencies:
check.append(t)
if isinstance(task, SNANALightCurveFit):
check += task.dependencies
for task in check:
if isinstance(task, SNANASimulation) or isinstance(task, DataPrep):
return task
self.logger.error(f"Unable to find a simulation or data dependency for aggregator {self.name}")
return None
def load_prediction_file(self, filename):
df = pd.read_csv(filename, comment="#")
columns = df.columns
if len(columns) == 1 and "VARNAME" in columns[0]:
df = pd.read_csv(filename, comment="#", sep=r"\s+")
if "VARNAMES:" in df.columns:
df = df.drop(columns="VARNAMES:")
remove_columns = [c for i, c in enumerate(df.columns) if i != 0 and "PROB_" not in c]
df = df.drop(columns=remove_columns)
return df
def _run(self, force_refresh):
new_hash = self.check_regenerate(force_refresh)
if new_hash:
mkdirs(self.output_dir)
prediction_files = [d.output["predictions_filename"] for d in self.classifiers]
df = None
for f in prediction_files:
dataframe = self.load_prediction_file(f)
dataframe = dataframe.rename(columns={dataframe.columns[0]: self.id})
if df is None:
df = dataframe
self.logger.debug(f"Merging on column {self.id} for file {f}")
else:
self.logger.debug(f"Merging on column {self.id} for file {f}")
df = | pd.merge(df, dataframe, on=self.id, how="outer") | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
import json
from typing import Optional
import pandas as pd
import plotly.graph_objs as go
from evidently import ColumnMapping
from evidently.analyzers.classification_performance_analyzer import ClassificationPerformanceAnalyzer
from evidently.model.widget import BaseWidgetInfo
from evidently.dashboard.widgets.widget import Widget, RED
class ClassSupportWidget(Widget):
def __init__(self, title: str, dataset: str = 'reference'):
super().__init__(title)
self.dataset = dataset # reference or current
def analyzers(self):
return [ClassificationPerformanceAnalyzer]
def calculate(self,
reference_data: pd.DataFrame,
current_data: Optional[pd.DataFrame],
column_mapping: ColumnMapping,
analyzers_results) -> Optional[BaseWidgetInfo]:
results = ClassificationPerformanceAnalyzer.get_results(analyzers_results)
target_name = results.columns.utility_columns.target
prediction_name = results.columns.utility_columns.prediction
if target_name is None or prediction_name is None:
if self.dataset == 'reference':
raise ValueError(f"Widget [{self.title}] requires 'target' and 'prediction' columns.")
return None
if self.dataset == 'current':
result_metrics = results.current_metrics
elif self.dataset == 'reference':
result_metrics = results.reference_metrics
if result_metrics is None:
raise ValueError(f"Widget [{self.title}] required 'reference' results from"
f" {ClassificationPerformanceAnalyzer.__name__} but no data found")
else:
raise ValueError(f"Widget [{self.title}] requires 'current' or 'reference' dataset value")
if result_metrics is None:
return None
# plot support bar
metrics_frame = | pd.DataFrame(result_metrics.metrics_matrix) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 18 21:53:21 2018
@author: jsulloa
"""
import numpy as np
from scipy.stats import randint, uniform
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.model_selection import GridSearchCV, StratifiedKFold, RandomizedSearchCV
from sklearn.naive_bayes import GaussianNB, ComplementNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn import metrics
def tune_clf_binary_grid(X, y_true, clf_name, n_splits_cv=5, refit_score='precision'):
"""
Tune a classifier using grid search cross validation
"""
scorers = {
'precision': metrics.make_scorer(metrics.precision_score),
'recall': metrics.make_scorer(metrics.recall_score),
'f1': metrics.make_scorer(metrics.f1_score),
'auc': metrics.make_scorer(metrics.roc_auc_score)}
if clf_name=='rf':
print("Tuning Random Forest")
clf = RandomForestClassifier(n_jobs=-1, class_weight='balanced_subsample')
param_grid = {'n_estimators' : [1, 5, 10, 100, 300, 500],
'max_features' : [2, 6, 10, 14, 18, 32]}
elif clf_name=='svm':
print("Tuning Support Vector Machine")
clf = svm.SVC(class_weight='balanced', probability=True)
param_grid = [ {'kernel': ['rbf'], 'gamma': [0.001, 0.01, 0.1, 1], 'C': [0.1, 1, 10, 10]}]
elif clf_name=='adb':
print("Tuning Ada Boost")
clf = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
param_grid = {'n_estimators':[50, 120],
'learning_rate':[0.1, 0.5, 1.],
'base_estimator__min_samples_split' : np.arange(2, 8, 2),
'base_estimator__max_depth' : np.arange(1, 4, 1)}
else:
print("Invalid option. Valid options are: 'rf', 'adb' and 'svm' ")
# Tune classifier with cross validation
skf = StratifiedKFold(n_splits=n_splits_cv)
grid_search = GridSearchCV(clf, param_grid, scoring=scorers,
refit=refit_score, cv=skf, return_train_score=True,
iid=True, n_jobs=-1, verbose=2)
# print basic info
print('Best score:', grid_search.best_score_)
print('Best parameters:', grid_search.best_params_)
return grid_search
def tune_clf_multiclass_grid(X, y_true, clf_name, n_splits_cv=5,
score='f1_weighted', verbose=2,
max_features_rf=[2, 6, 10, 14, 18]):
"""
Tune a classifier usinggrid search and cross validation
"""
if clf_name=='rf':
print("Tuning Random Forest")
clf = RandomForestClassifier(n_jobs=-1)
param_grid = {'n_estimators' : [1, 5, 10, 100, 300],
'max_features' : max_features_rf}
elif clf_name=='svm':
print("Tuning Support Vector Machine")
clf = svm.SVC()
param_grid = {'kernel': ['rbf'],
'gamma': [0.001, 0.01, 0.1, 1],
'C': [0.1, 1, 10, 100]}
elif clf_name=='adb':
print("Tuning Ada Boost")
clf = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
param_grid = {'n_estimators':[50, 120],
'learning_rate':[0.1, 0.5, 1.],
'base_estimator__min_samples_split' : np.arange(2, 8, 2),
'base_estimator__max_depth' : np.arange(1, 4, 1)}
else:
print("Invalid option. Valid options are: 'rf', 'svm' and 'adb'")
# Tune classifier with cross validation
skf = StratifiedKFold(n_splits=n_splits_cv)
grid_search = GridSearchCV(clf, param_grid, scoring=score,
refit=True, cv=skf, return_train_score=True,
iid=True, n_jobs=-1, verbose=2)
# print basic info
print('Best score:', grid_search.best_score_)
print('Best parameters:', grid_search.best_params_)
return grid_search
## NOTE_ NAME WAS CHANGED FROM tune_clf_multiclass_rand
def tune_clf_rand(X, y_true, clf_name, n_splits_cv=5,
n_iter=10, score='f1_weighted', verbose=2):
"""
Tune a classifier using randomized search and cross validation
Parameters:
X: array-like, dtype=float64, size=[n_samples, n_features]
array with observations and features
y_true: array, dtype=float64, size=[n_samples]
array with labels for each observation
clf_name: str
name of the classifier to be tuned, 'rf', 'adb' or 'svm'.
n_splits_cv: int
Number of folds for cross validation
n_iter: int, default=10
Number of parameter settings that are sampled
score: string, callable, list/tuple, dict or None, default: None
Score to evaluate prediction on the test set
verbose: int
Controls the verbosity: the higher, the more messages
"""
if clf_name=='rf':
print("Tuning Random Forest")
clf = RandomForestClassifier(n_jobs=-1)
param_grid = {'max_depth' : [3, None],
'n_estimators' : randint(1,1000),
'max_features' : randint(1,X.shape[1]-1)}
elif clf_name=='svm':
print("Tuning Support Vector Machine")
clf = svm.SVC(class_weight='balanced', probability=True)
param_grid = {'kernel': ['rbf'],
'gamma': uniform(0.01, 1),
'C': uniform(1,100)}
elif clf_name=='adb':
print("Tuning Ada Boost")
clf = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
param_grid = {'n_estimators':randint(1,1000),
'learning_rate':uniform(0.01, 1),
'base_estimator__min_samples_split' : randint(2,20),
'base_estimator__max_depth' : [3, None]}
elif clf_name=='knn':
print('Tuning KNN')
clf = KNeighborsClassifier()
param_grid = { 'n_neighbors': randint(1,50)}
elif clf_name=='cnb':
print('Tuning Complement Naive Bayes')
clf = ComplementNB()
param_grid = { 'alpha': uniform(0, 10)}
elif clf_name=='gnb':
print('Tuning Gaussian Naive Bayes')
clf = GaussianNB()
param_grid = { 'var_smoothing': uniform(1e-10, 10)}
elif clf_name=='ann':
print('Tuning Artificial Neural Networks')
clf = MLPClassifier(solver='lbfgs', max_iter=500)
param_grid = {'hidden_layer_sizes' :randint(5,100),
'alpha': uniform(1e-5,1)}
else:
print("Invalid option. Valid options are: 'rf', 'svm' and 'adb'")
# Tune classifier with cross validation
skf = StratifiedKFold(n_splits=n_splits_cv)
rand_search = RandomizedSearchCV(clf, param_grid, scoring=score, n_iter=n_iter,
refit=True, cv=skf, return_train_score=True,
iid=True, n_jobs=-1, verbose=2)
rand_search.fit(X, y_true)
# print basic info
print('Best score:', rand_search.best_score_)
print('Best parameters:', rand_search.best_params_)
return rand_search
def print_report_cv(clf_gs):
"""
Print report of GridSearch
Accepts only numerical y_true
"""
print("Grid scores on development set:")
print()
df_scores = pd.DataFrame.from_dict(clf_gs.cv_results_)
print(df_scores[['mean_fit_time',
'mean_test_f1',
'mean_test_recall',
'mean_test_precision',
'param_max_features',
'param_n_estimators']])
print()
print('Best parameters:')
print("\n".join("{}\t{}".format(k, v) for k, v in clf_gs.best_params_.items()))
print('Best score:', np.round(clf_gs.best_score_,3))
def plot_param_cv(clf_gs, scorer, param):
df_res = pd.DataFrame.from_dict(clf_gs.cv_results_)
score = 'mean_test_' + scorer
score_std = 'std_test_' + scorer
param = 'param_' + param
mean_value = df_res[score]
std_value = df_res[score_std]
# plot
plt.figure(figsize=(8, 6))
plt.errorbar(np.arange(len(mean_value)), mean_value, yerr=std_value, fmt='o')
plt.xticks(np.arange(len(mean_value)), df_res[param])
plt.xlabel(param)
plt.ylabel(score)
plt.box(on=None)
def print_report(y_true, y_pred, th=0.5, plot=True, curve_type='roc'):
"""
Print a report of binary classification performance
Parameters
----------
y_true: ndarray
Ground truth data
y_pred: ndarray
Predicted data
th: float
Thrshold to compute metrics precision, recall, accuracy and f1 score.
plot: bool, default True
Plot curves
curve_type: string, default 'precision_recall'
Type of curve to plot, 'precision_recall' or 'roc'
Returns
-------
model_eval: dict
Dictionary with multiple metrics for model evaluation
Note from Hands on Machine Learning with scikit-learn ():
Since the ROC curve is so similar to the precision/recall (or PR)
curve, you may wonder how to decide which one to use. As a rule of
thumb, you should prefer the PR curve whenever the positive class is
rare or when you care more about the false positives than the false
negatives, and the ROC curve otherwise.
"""
y_bin = y_pred>th
fpr, tpr, th_roc = metrics.roc_curve(y_true, y_pred, pos_label=1)
precisions, recalls, th_pr = metrics.precision_recall_curve(y_true, y_pred,
pos_label=1)
model_eval = {'auc' : metrics.auc(fpr, tpr),
'th' : th,
'confusion_matrix' : metrics.confusion_matrix(y_true, y_bin),
'precision': metrics.precision_score(y_true, y_bin),
'recall': metrics.recall_score(y_true, y_bin),
'accuracy': metrics.accuracy_score(y_true, y_bin),
'f1': metrics.f1_score(y_true, y_bin)}
print()
print('Area Under the Curve:',np.round(metrics.auc(fpr, tpr),decimals=4))
print('\nConfusion matrix for threshold =', th,':')
print(pd.DataFrame(metrics.confusion_matrix(y_true, y_bin),
columns=['pred_neg', 'pred_pos'], index=['neg', 'pos']))
print()
print(pd.DataFrame({
'Precision': [metrics.precision_score(y_true, y_bin)],
'Recall': [metrics.recall_score(y_true, y_bin)],
'Accuracy': [metrics.accuracy_score(y_true, y_bin)],
'F1 score': [metrics.f1_score(y_true, y_bin)]}))
# Trace ROC curve
if plot==True and curve_type=='roc':
plt.figure(figsize= (16,8))
plt.subplot(1,2,1) # left size
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('False postivie rate')
plt.ylabel('True positive rate')
plt.grid(1)
plt.subplot(1,2,2) # right size
plt.plot(th_roc, 1-fpr, label='1-False positive rate')
plt.plot(th_roc, tpr, label='True positive rate')
plt.xlim(-0.5,1.1)
plt.xlabel('Decision threshold')
plt.ylabel('Score')
plt.legend(loc='best')
plt.show()
elif plot==True and curve_type=='prc':
plt.figure(figsize= (16,8))
plt.subplot(1,2,1) # left size
plt.plot([1, 0], [0, 1], 'k--')
plt.plot(recalls, precisions)
plt.title('PR curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.grid(1)
plt.subplot(1,2,2) # right size
plt.plot(th_pr, precisions[:-1], label='Precision')
plt.plot(th_pr, recalls[:-1], label='Recall')
plt.xlim(-0.1,1.1)
plt.xlabel('Decision threshold')
plt.ylabel('Score')
plt.legend(loc='best')
plt.show()
else:
print('Error plotting: curve_type should be \'roc\' or \'prc\' ')
pass
return model_eval
def print_report_grid_search(clf, clf_name, X_test, y_test):
"""
Print report of classifier performance evaluated on new test data
Parameters
----------
clf : classifier previously tuned with GridSearchCV
clf_name: name of classifier. Valid options are 'svm', 'rf' ,'adb'
X_test: features for test data
y_test: labels for test data
Returns
-------
clf_gs: a tune classifier with GridSearchCV.
"""
# make the predictions
y_pred = clf.predict(X_test.values)
## Print report
print('Best params:')
print(clf.best_params_)
# confusion matrix on the test data.
print('\nConfusion matrix optimized on test data:')
print(pd.DataFrame(metrics.confusion_matrix(y_test, y_pred),
columns=['pred_neg', 'pred_pos'], index=['neg', 'pos']))
print('\nPrecision:', metrics.precision_score(y_test, y_pred),
'\nRecall:', metrics.recall_score(y_test, y_pred),
'\nAccuracy:', metrics.accuracy_score(y_test, y_pred))
# Compute performance at multiple thresholds
if clf_name=='rf':
y_test_score = clf.predict_proba(X_test)
fpr, tpr, th = metrics.roc_curve(y_test, y_test_score[:,1], pos_label=1)
elif clf_name=='svm':
y_test_score = clf.decision_function(X_test)
fpr, tpr, th = metrics.roc_curve(y_test, y_test_score, pos_label=1)
elif clf_name=='adb':
y_test_score = clf.decision_function(X_test)
fpr, tpr, th = metrics.roc_curve(y_test, y_test_score, pos_label=1)
# Trace ROC curve
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr, label=clf_name)
plt.grid(1)
def misclassif_idx(y_true, y_pred):
"""
Get indices of misclassified observations
Parameters
----------
y_true: ndarray, numeric
Ground truth labels in numeric format
y_pred: ndarray, numeric
Predicted labels as numeric, 0 or 1
Returns
-------
"""
idx_fp=np.where(0 > y_true - y_pred)
idx_fn=np.where(0 < y_true - y_pred)
return {'fp':idx_fp[0], 'fn': idx_fn[0]}
from sklearn.metrics import f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
def test_feature_set(fname_db):
""" Test feature set discrimination with Linear Discrimanant Analysis
TODO: Check if the approach is valid, set plot functionality plot=1
"""
# load file and assign to objects
df = pd.read_csv(fname_db)
df = df.dropna()
df = df.reset_index(drop=True)
shape_idx = [col for col in df if col.startswith('shp')]
X = df[shape_idx]
y_true = df.label.str.slice(0,1)
y_true = y_true.astype('int8')
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y_true).predict(X)
score = round(f1_score(y_pred=y_pred,y_true=y_true),3)
lda = LinearDiscriminantAnalysis(n_components=2)
X_lda = lda.fit(X, y_true).transform(X)
plt.figure()
plt.plot(X_lda[y_true==0],'o',color='navy', markersize=3)
plt.plot(X_lda[y_true==1],'o',color='darkorange', markersize=3)
plt.title('LDA on: '+fname_db)
plt.grid(True)
plt.show()
print('F1 score: ', score)
def get_max_df(df, column):
"""
Get maximum of a dataframe column
Parameters:
----------
df: pandas dataframe
A data frame with multiple columns
column: str
Name of the column to get the maximum
Returns:
-------
"""
idx_max = df[column].idxmax()
return df.loc[idx_max,:]
def pred_roi_to_file(path_pred, column_proba):
"""
Translate predictions on ROIs to file by keeping the ROI with maximum
probability. Note: This works only for binary classifiers
Parameters:
----------
path_pred: str
Full path to the ROIs predicted (xdata)
column_proba: str
Name of the column that has the positive predictions
Returns:
-------
y_pred_file: dataframe
Dataframe with name of files and the associated positive prediction
"""
# load file and assign to objects
y_pred_roi = | pd.read_csv(path_pred) | pandas.read_csv |
# *-* coding: utf-8 *-*
"""Read binary data from the IRIS Instruments Syscal Pro system
TODO: Properly sort out handling of electrode positions and conversion to
electrode numbers.
"""
import struct
from io import StringIO
import logging
import pandas as pd
import numpy as np
from reda.importers.utils.decorators import enable_result_transforms
logger = logging.getLogger(__name__)
def _convert_coords_to_abmn_X(data, **kwargs):
"""The syscal only stores positions for the electrodes. Yet, we need to
infer electrode numbers for (a,b,m,n) by means of some heuristics. This
heuristic uses the x-coordinates to infer an electrode spacing (y/z
coordinates are ignored). We also assume a constant spacing of electrodes
(i.e., a gap in electrode positions would indicate unused electrodes). This
is usually a good estimate as hardly anybody does change the electrode
positions stored in the Syscal system (talk to us if you do).
Note that this function can use user input to simplify the process by using
a user-supplied x0 value for the smallest electrode position (corresponding
to electrode 1) and a user-supplied spacing (removing the need to infer
from the positions).
Parameters
----------
data : Nx4 array|Nx4 :py:class:`pandas.DataFrame`
The x positions of a, b, m, n electrodes. N is the number of
measurements
x0 : float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing : float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
Returns
-------
data_new : Nx4 :py:class:`pandas.DataFrame`
The electrode number columns a,b,m,n
"""
assert data.shape[1] == 4, 'data variable must only contain four columns'
x0 = kwargs.get(
'x0',
data.min().min()
)
electrode_spacing = kwargs.get('spacing', None)
# try to determine from the data itself
if electrode_spacing is None:
electrode_positions = data.values
electrode_spacing = np.abs(
electrode_positions[:, 1:] - electrode_positions[:, 0:-1]
).min()
data_new = | pd.DataFrame() | pandas.DataFrame |
#%% md
## Read from MIMIC csv files
#%%
import pandas as pd
# files can be downloaded from https://mimic.physionet.org/gettingstarted/dbsetup/
med_file = 'PRESCRIPTIONS.csv'
diag_file = 'DIAGNOSES_ICD.csv'
procedure_file = 'PROCEDURES_ICD.csv'
# drug code mapping files (already in ./data/)
ndc2atc_file = 'ndc2atc_level4.csv'
cid_atc = 'drug-atc.csv'
ndc2rxnorm_file = 'ndc2rxnorm_mapping.txt'
# drug-drug interactions can be down https://www.dropbox.com/s/8os4pd2zmp2jemd/drug-DDI.csv?dl=0
ddi_file = 'drug-DDI.csv'
def process_procedure():
pro_pd = | pd.read_csv(procedure_file, dtype={'ICD9_CODE': 'category'}) | pandas.read_csv |
import re
import math
import pandas as pd
import numpy as np
import nltk
import heapq
import pickle
import datetime
from nltk.corpus import stopwords
from operator import itemgetter
# Loading the dictionary
with open('dictionary.pkl', 'rb') as f:
data = pickle.load(f)
# Loading the dictionary with term count
with open('newdictionary.pkl', 'rb') as f:
newdata = pickle.load(f)
# Read the csv file
ff = pd.DataFrame(pd.read_csv('Airbnb_Texas_Rentals.csv'))
ff = ff.fillna('0')
ff = ff.drop(['Unnamed: 0'], axis=1)
# Insert the date_post_1 column based on the date of listing
ff['month']=[(x.split()[0]).lower() for x in ff['date_of_listing']]
ff['month_number']=np.where(ff['month']=='may',"-05-01",
np.where(ff['month']=='june',"-06-01",
np.where(ff['month']=='july',"-07-01",
np.where(ff['month']=="august","-08-01",
np.where(ff['month']=="september","-09-01",
np.where(ff['month']=="october","-10-01",
np.where(ff['month']=="november","-11-01",
np.where(ff['month']=="december","-12-01",
np.where(ff['month']=="january","-01-01",
np.where(ff['month']=="february","-02-01",
np.where(ff['month']=="march","-03-01",
np.where(ff['month']=="april","-04-01","-01-01"))))))))))))
ff['year']=[x.split()[1] for x in ff['date_of_listing']]
ff['date_post']=ff['year']+ff['month_number']
ff['date_post_1']=[pd.to_datetime(x) for x in ff['date_post']]
# calculate the room rate for each listing and merge it to the data frame
ff['rate_num']=[str(d).replace("$","") for d in ff['average_rate_per_night']]
ff=ff.fillna('0')
ff['rate_num_1']=[pd.to_numeric(x) if x!="nan" else 0 for x in ff['rate_num'] ]
ff_means=pd.DataFrame(ff.groupby(['city'])['rate_num_1'].mean())
ff_means.columns=['Average_in_this_city']
ff=ff.merge(ff_means, left_on='city', right_on='city', how='left')
# FUNCTIONS----FUNCTIONS----FUNCTIONS------------------------------
#input = [word1, word2, ...]
#output = {word1: [pos1, pos2], word2: [pos1, pos2], ...}
def index_one_file(termlist):
fileIndex = {}
words = list(set(termlist))
word_list = [x for x in termlist]
for i in range(len(word_list)):
for item in words:
if item == word_list[i]:
fileIndex.setdefault(item, []).append(i)
return fileIndex
#input = {filename: [word1, word2, ...], ...}
#ouput = {filename: {word: [pos1, pos2, ...]}, ...}
def make_indices(dictionary):
total = {}
for filename in dictionary.keys():
new = dictionary[filename]
total[filename] = index_one_file(new)
return total
# Dict reversal
#input = {filename: {word: [pos1, pos2, ...], ... }}
#output = {word: {filename: [pos1, pos2]}, ...}, ...}
def fullIndex(regdex):
total_index = {}
for filename in regdex.keys():
for word in regdex[filename].keys():
if word in total_index.keys():
if filename in total_index[word].keys():
total_index[word][filename].extend(regdex[filename][word][:])
else:
total_index[word][filename] = regdex[filename][word]
else:
total_index[word] = {filename: regdex[filename][word]}
return total_index
# Search Engine
# Preprocess the search
def preprocess(search):
search = search.lower().split()
stop_words = set(stopwords.words('english'))
lemma = nltk.wordnet.WordNetLemmatizer()
search_lst = []
for x in search:
if not x in stop_words:
x = re.sub("[^a-zA-Z]+", "*", x)
if "*" in x:
y = x.split('*')
y[0]=lemma.lemmatize(y[0])
search_lst.append(y[0])
if len(y)>1:
y[1]=lemma.lemmatize(y[1])
search_lst.append(y[1])
else:
x = lemma.lemmatize(x)
search_lst.append(x)
search_lst = (' '.join(search_lst))
return search_lst
#Input for the search
def search_eng_input(phrase):
phrase = phrase.lower().split()
n = len(phrase)
list1, list2, list3 = [], [], []
for x in phrase:
x = preprocess(x)
list1.append(x)
for x in list1:
if x in data.keys():
list2.append(set(data[x].keys()))
b = list2[0]
for i in range(0,len(list2)):
b = (b & list2[i])
for x in b:
list3.append(int(re.sub("[^0-9]+", "", x))-1)
return list3
# Executing the query and return the result for conjunctive search
def exec_query_s_1(search):
pd.set_option('display.max_colwidth', -1)
l = []
df = pd.DataFrame()
l = (search_eng_input(search))
if len(l)>0:
df = ff[['title','description','city', 'url']].loc[l]
if df.empty == False:
df.set_index('title', inplace=True)
return df
# TF-IDF
def tf(term_count, total_count):
return term_count / total_count
def idf(doc_count, contain_count):
return math.log(doc_count / contain_count)
def tf_idf(term_count, total_count, doc_count, contain_count):
if total_count == 0: total_count = 1
if contain_count == 0: contain_count = 1
return round(tf(term_count, total_count) * idf(doc_count, contain_count),2)
# return the number of words in a document when input in the name
def total_count(filename):
total = 0
inverse_data = fullIndex(data) #inverse the data
if filename in inverse_data.keys():
value = inverse_data.get(filename, 0) #get the sub_dict
for k, v in value.items():
total += len(v) # count the number of term in a document
return total
else:
return 0
# return the number of documents that contain a certain word when input in a term
def contain_count(term):
if term in data.keys():
return len(data[term].keys())
else:
return 0
# functions for returning the search with ranking similarity scores
#creating doc vectors
def doc_vec(query):
lemma = nltk.wordnet.WordNetLemmatizer()
querylist = query.split()
query = search_eng_input_1(query) # return the list of documents matched first
query = [x+1 for x in query] # +1 for the correct position
doc = {}
docvec = [0] * len(querylist)
for index, word in enumerate(querylist):
word = lemma.lemmatize(word)
word = word.lower()
try:
subvec = []
value = newdata[word]# get {doc1:tf-idf, doc2: tf-idf} of each word in search query
for k, v in value.items():
for i in query: # loop all the documents'ids that search gives
key = ('filtered_doc_%s'%str(i))
if key == k: # if the id is in the dict
subvec.append(v) # append the score to the vector = [[tf-idf1,tf-idf2,..],[tf-idf1,tf-idf2,..],..]
subvec += [0] * (len(query) - len(subvec)) # make the vectors equal in length for not found instances
docvec[index] = subvec
del subvec
except KeyError:
docvec[index] = [0]*len(value.keys()) # if the word not in dict, create a zero vector
# this loop return the dict with format {doc1:vector1,doc2:vector2,...} for the query
for index in range(len(docvec[0])):
sub_vec = [item[index] for item in docvec]
doc.update({query[index]:sub_vec})
return doc
#create query vector
def query_vec(query):
pattern = re.compile('[\W_]+') # for faster search function
query = pattern.sub(' ',query)
querylist = query.split()
b = len(querylist)
c = 18259 #total number of documents
queryvec = [0]*b
for index,word in enumerate(querylist):
a = querylist.count(word)
d = contain_count(word)
wordtfidf = tf_idf(a,b,c,d) # tf-idf score for each word
queryvec[index] = wordtfidf
return queryvec
def dotproduct(vec1, vec2):
if len(vec1) != len(vec2):
return 0
return sum([x*y for x,y in zip(vec1, vec2)])
def magnitude(vec):
return pow(sum(map(lambda x: x**2, vec)),.5)
# calculate the score of the results based on query
def generatescore(query):
queryvec = query_vec(query)
doc_vecs_dict = doc_vec(query)
score_dict = {}
for k, v in doc_vecs_dict.items():
score = round(dotproduct(queryvec, v)/(magnitude(queryvec)*magnitude(v)),2)
score_dict.update({k:score})
return score_dict
# heap data structure to keep top k
def heappq(mysearch):
query = search_eng_input(mysearch)
k = 10 # default top k-element
if k >= len(query):
k = len(query)
d = generatescore(mysearch)
k_keys_sorted = heapq.nlargest(k, d.items(), key = itemgetter(1))
key_lst, score_lst = [], []
for i in range(k):
key_lst.append(k_keys_sorted[i][0])
score_lst.append(k_keys_sorted[i][1])
return key_lst, score_lst
# executing tf_idf conjunctive search
def exec_tfidf_search(mysearch):
key_lst, score_lst = heappq(mysearch)
key_lst = [x-1 for x in key_lst] # to get the correct row in df
| pd.set_option('display.max_colwidth', -1) | pandas.set_option |
from datetime import datetime
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from data_process import data_process_utils
from data_process.census_process.census_data_creation_config import census_data_creation
from data_process.census_process.census_degree_process_utils import consistentize_census9495_columns, \
numericalize_census9495_data, standardize_census_data
from data_process.census_process.mapping_resource import cate_to_index_map, continuous_cols, categorical_cols, \
target_col_name
# follow link provides description on columns of Census Income Dataset:
# https://docs.1010data.com/Tutorials/MachineLearningExamples/CensusIncomeDataSet.html
def get_timestamp():
return int(datetime.utcnow().timestamp())
CENSUS_COLUMNS = ["age", "class_worker", "det_ind_code", "det_occ_code", "education",
"wage_per_hour", "hs_college", "marital_stat", "major_ind_code", "major_occ_code",
"race", "hisp_origin", "gender", "union_member", "unemp_reason", "full_or_part_emp",
"capital_gain", "capital_loss", "stock_dividends", "tax_filer_stat",
"region_prev_res", "state_prev_res", "det_hh_fam_stat", "det_hh_summ", "instance_weight",
"mig_chg_msa", "mig_chg_reg", "mig_move_reg", "mig_same", "mig_prev_sunbelt",
"num_emp", "fam_under_18", "country_father", "country_mother", "country_self",
"citizenship", "own_or_self", "vet_question", "vet_benefits", "weeks_worked",
"year", "income_label"]
RERANGED_CENSUS_COLUMNS_NEW = ["age", "gender_index", "age_index", "class_worker", "det_ind_code", "det_occ_code",
"education",
"education_year", "wage_per_hour", "hs_college", "marital_stat", "major_ind_code",
"major_occ_code", "race", "hisp_origin", "gender", "union_member", "unemp_reason",
"full_or_part_emp", "capital_gain", "capital_loss", "stock_dividends", "tax_filer_stat",
"region_prev_res", "state_prev_res", "det_hh_fam_stat", "det_hh_summ", "instance_weight",
"mig_chg_msa", "mig_chg_reg", "mig_move_reg", "mig_same", "mig_prev_sunbelt",
"num_emp", "fam_under_18", "country_father", "country_mother", "country_self",
"citizenship", "own_or_self", "vet_question", "vet_benefits", "weeks_worked",
"year", "income_label"]
def process(data_path, to_dir=None, train=True):
census = pd.read_csv(data_path, names=CENSUS_COLUMNS, skipinitialspace=True)
print("[INFO] load {} data".format("train" if train else "test"))
print("[INFO] load data with shape:", census.shape)
appendix = "_train" if train else "_test"
extension = ".csv"
appendix = appendix + extension
print("[INFO] consistentize original data")
c_census = consistentize_census9495_columns(census)
c_census.to_csv(to_dir + 'consistentized_census9495' + appendix, header=True, index=False)
print("[INFO] numericalize data")
p_census = numericalize_census9495_data(c_census, cate_to_index_map)
return p_census
def compute_instance_prob(data_frame):
weight_sum = data_frame["instance_weight"].sum()
data_frame["instance_weight"] = data_frame["instance_weight"] / weight_sum
def create_file_appendix(train):
appendix = "_train" if train else "_valid"
extension = ".csv"
return appendix + extension
def create_degree_src_tgt_data(p_census,
from_dir,
to_dir,
data_tag,
pos_ratio,
num_all,
train=True,
grad_train_scaler=None,
undergrad_train_scaler=None,
grad_census_test_values=None,
save_intermediate_tables=False):
appendix = create_file_appendix(train)
print("====================== create_degree_source_target_data for {} data ======================"
.format("train" if train else "valid"))
# form source and target domain data
doctorate_census = p_census[p_census['education'] == 11]
master_census = p_census[(p_census['education'] == 9) | (p_census['education'] == 10)]
undergrad_census = p_census[
(p_census['education'] != 9) & (p_census['education'] != 10) & (p_census['education'] != 11)]
columns = continuous_cols + categorical_cols + ['instance_weight', target_col_name]
doctorate_census = doctorate_census[columns]
master_census = master_census[columns]
undergrad_census = undergrad_census[columns]
print("[INFO] doctorate_census shape", doctorate_census.shape)
print("[INFO] master_census shape", master_census.shape)
print("[INFO] undergrad_census shape", undergrad_census.shape)
if save_intermediate_tables:
doctorate_census.to_csv(to_dir + 'doctorate_census9495' + appendix, header=True, index=False)
master_census.to_csv(to_dir + 'master_census9495' + appendix, header=True, index=False)
undergrad_census.to_csv(to_dir + 'undergrad_census9495' + appendix, header=True, index=False)
doctorate_census = pd.read_csv(from_dir + 'doctorate_census9495' + appendix, skipinitialspace=True)
master_census = | pd.read_csv(from_dir + 'master_census9495' + appendix, skipinitialspace=True) | pandas.read_csv |
import pandas as pd
import geopandas
import json
import altair as alt
def make_metrics_df():
GEOJSON = 'geojson/wi_map_plan_{}.geojson'
mm_gaps = []
sl_indices = []
efficiency_gaps = []
plan_number = [i for i in range(1,84)]
for i in range(1,84):
plan = geopandas.read_file(GEOJSON.format(i))
mm_gaps.append(plan['mm_gap'].iloc[0])
sl_indices.append(plan['SL_index'].iloc[0])
efficiency_gaps.append(plan['efficiency_gap'].iloc[0])
metrics_dict = {'plan_number':plan_number,'mm_gap':mm_gaps,'sl_index':sl_indices,'efficiency_gap':efficiency_gaps}
metrics_df = | pd.DataFrame(metrics_dict, columns = ['plan_number','mm_gap','sl_index','efficiency_gap']) | pandas.DataFrame |
from datetime import datetime
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas._testing as tm
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
msg = "stop passing 'keep_tz'"
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
assert msg in str(m[0].message)
# convert to utc
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_reindex_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
df = DataFrame(
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=["a", "b", "c"],
columns=["d", "e", "f"],
)
res1 = df.reindex(["b", "a"])
res2 = df.reindex(index=["b", "a"])
res3 = df.reindex(labels=["b", "a"])
res4 = df.reindex(labels=["b", "a"], axis=0)
res5 = df.reindex(["b", "a"], axis=0)
for res in [res2, res3, res4, res5]:
| tm.assert_frame_equal(res1, res) | pandas._testing.assert_frame_equal |
## The packages.
from selenium import webdriver
from selenium.webdriver import chrome
from selenium.webdriver.common.by import By
import pandas, os, tqdm, time
## The goal.
'''
根據 query 從 PubMed 搜尋引擎下載對應的文章摘要,輸出成表格。
'''
## The arguments.
keyword = "Athlete's foot"
platform = "pubmed"
site = "https://pubmed.ncbi.nlm.nih.gov/"
number = 10
folder = "LOG/VI/{}".format(keyword)
os.makedirs(folder) if not os.path.isdir(folder) else None
##
option = webdriver.chrome.options.Options()
option.binary_location = "/usr/bin/google-chrome"
# option.add_argument('--no-sandbox')
service = chrome.service.Service(executable_path='driver/chrome')
driver = webdriver.Chrome(options=option, service=service)
page = range(1, number+1, 1)
group = {
"link":[],
"title":[],
"abstract":[],
"tag":[],
"author":[]
}
for p in page:
try:
driver.get("{}?term={}&filter=simsearch1.fha&page={}".format(site, keyword, p))
# group['link'] += [i.get_attribute("href") for i in driver.find_elements_by_css_selector(".docsum-title")]
group['link'] += [i.get_attribute("href") for i in driver.find_elements(By.CSS_SELECTOR, ".docsum-title")]
pass
except:
continue
pass
link = | pandas.DataFrame({"link":group['link']}) | pandas.DataFrame |
# Exercise 4 : Manipulating Geospatial Data
import math
import pandas as pd
import geopandas as gpd
from learntools.geospatial.tools import geocode
import folium
from folium import Marker
from folium.plugins import MarkerCluster
from learntools.core import binder
binder.bind(globals())
from learntools.geospatial.ex4 import *
def embed_map(m, file_name):
from IPython.display import IFrame
m.save(file_name)
return IFrame(file_name, width='100%', height='500px')
## 1) Geocode the missing locations.
## Run the next code cell to create a DataFrame starbucks containing Starbucks locations in the state of California.
starbucks = | pd.read_csv("../input/geospatial-learn-course-data/starbucks_locations.csv") | pandas.read_csv |
import numpy as np
from scipy import stats
import pandas as pd
from sklearn.svm import SVC
from dask.distributed import Client
import dask_ml.model_selection as dms
def test_search_basic(xy_classification):
X, y = xy_classification
param_grid = {"class_weight": [None, "balanced"]}
a = dms.GridSearchCV(SVC(kernel="rbf", gamma=0.1), param_grid)
a.fit(X, y)
param_dist = {"C": stats.uniform}
b = dms.RandomizedSearchCV(SVC(kernel="rbf", gamma=0.1), param_dist)
b.fit(X, y)
def test_to_keys_numpy_array():
rng = np.random.RandomState(0)
arr = rng.randn(20, 30)
df = | pd.DataFrame(data=arr) | pandas.DataFrame |
import pandas as pd
import pytest
from dateutil.relativedelta import relativedelta
import featuretools as ft
from featuretools.entityset import Timedelta
from featuretools.primitives import Count # , SlidingMean
from featuretools.utils.wrangle import _check_timedelta
def test_timedelta_equality():
assert Timedelta(10, "d") == Timedelta(10, "d")
assert Timedelta(10, "d") != 1
def test_singular():
assert Timedelta.make_singular("Month") == "Month"
assert Timedelta.make_singular("Months") == "Month"
def test_delta_with_observations(es):
four_delta = Timedelta(4, 'observations')
assert not four_delta.is_absolute()
assert four_delta.get_value('o') == 4
neg_four_delta = -four_delta
assert not neg_four_delta.is_absolute()
assert neg_four_delta.get_value('o') == -4
time = pd.to_datetime('2019-05-01')
error_txt = 'Invalid unit'
with pytest.raises(Exception, match=error_txt):
time + four_delta
with pytest.raises(Exception, match=error_txt):
time - four_delta
def test_delta_with_time_unit_matches_pandas(es):
customer_id = 0
sessions_df = es['sessions'].df
sessions_df = sessions_df[sessions_df['customer_id'] == customer_id]
log_df = es['log'].df
log_df = log_df[log_df['session_id'].isin(sessions_df['id'])]
all_times = log_df['datetime'].sort_values().tolist()
# 4 observation delta
value = 4
unit = 'h'
delta = Timedelta(value, unit)
neg_delta = -delta
# first plus 4 obs is fifth
assert all_times[0] + delta == all_times[0] + pd.Timedelta(value, unit)
# using negative
assert all_times[0] - neg_delta == all_times[0] + pd.Timedelta(value, unit)
# fifth minus 4 obs is first
assert all_times[4] - delta == all_times[4] - pd.Timedelta(value, unit)
# using negative
assert all_times[4] + neg_delta == all_times[4] - pd.Timedelta(value, unit)
def test_check_timedelta(es):
time_units = list(Timedelta._readable_units.keys())
expanded_units = list(Timedelta._readable_units.values())
exp_to_standard_unit = {e: t for e, t in zip(expanded_units, time_units)}
singular_units = [u[:-1] for u in expanded_units]
sing_to_standard_unit = {s: t for s, t in zip(singular_units, time_units)}
to_standard_unit = {}
to_standard_unit.update(exp_to_standard_unit)
to_standard_unit.update(sing_to_standard_unit)
full_units = singular_units + expanded_units + time_units + time_units
strings = ["2 {}".format(u) for u in singular_units + expanded_units +
time_units]
strings += ["2{}".format(u) for u in time_units]
for i, s in enumerate(strings):
unit = full_units[i]
standard_unit = unit
if unit in to_standard_unit:
standard_unit = to_standard_unit[unit]
td = _check_timedelta(s)
assert td.get_value(standard_unit) == 2
def test_check_pd_timedelta(es):
pdtd = pd.Timedelta(5, 'm')
td = _check_timedelta(pdtd)
assert td.get_value('s') == 300
def test_string_timedelta_args():
assert Timedelta("1 second") == Timedelta(1, "second")
assert Timedelta("1 seconds") == Timedelta(1, "second")
assert Timedelta("10 days") == Timedelta(10, "days")
assert Timedelta("100 days") == Timedelta(100, "days")
assert Timedelta("1001 days") == Timedelta(1001, "days")
assert Timedelta("1001 weeks") == Timedelta(1001, "weeks")
def test_feature_takes_timedelta_string(es):
feature = ft.Feature(es['log']['id'], parent_entity=es['customers'],
use_previous="1 day", primitive=Count)
assert feature.use_previous == Timedelta(1, 'd')
# def test_sliding_feature_takes_timedelta_string(es):
# feature = SlidingMean(es['log']['id'], es['customers'],
# use_previous="1 day",
# window_size="1 second")
# assert feature.use_previous == Timedelta(1, 'd')
# assert feature.window_size == Timedelta(1, 's')
def test_deltas_week(es):
customer_id = 0
sessions_df = es['sessions'].df
sessions_df = sessions_df[sessions_df['customer_id'] == customer_id]
log_df = es['log'].df
log_df = log_df[log_df['session_id'].isin(sessions_df['id'])]
all_times = log_df['datetime'].sort_values().tolist()
delta_week = Timedelta(1, "w")
delta_days = Timedelta(7, "d")
assert all_times[0] + delta_days == all_times[0] + delta_week
def test_relative_year():
td_time = "1 years"
td = _check_timedelta(td_time)
assert td.get_value("Y") == 1
assert isinstance(td.delta_obj, relativedelta)
time = pd.to_datetime('2020-02-29')
assert time + td == pd.to_datetime('2021-02-28')
def test_serialization():
times = [
Timedelta(1, unit='w'),
Timedelta(3, unit='d'),
Timedelta(5, unit='o')
]
dictionaries = [
{'value': 1, 'unit': 'w'},
{'value': 3, 'unit': 'd'},
{'value': 5, 'unit': 'o'}
]
for td, expected in zip(times, dictionaries):
assert expected == td.get_arguments()
for expected, dictionary in zip(times, dictionaries):
assert expected == Timedelta.from_dictionary(dictionary)
# Test multiple temporal parameters separately since it is not deterministic
mult_time = {'years': 4, 'months': 3, 'days': 2}
mult_td = Timedelta(mult_time)
# Serialize
td_units = mult_td.get_arguments()['unit']
td_values = mult_td.get_arguments()['value']
arg_list = list(zip(td_values, td_units))
assert (4, 'Y') in arg_list
assert (3, 'mo') in arg_list
assert (2, 'd') in arg_list
# Deserialize
assert mult_td == Timedelta.from_dictionary({'value': [4, 3, 2],
'unit': ['Y', 'mo', 'd']})
def test_relative_month():
td_time = "1 month"
td = _check_timedelta(td_time)
assert td.get_value('mo') == 1
assert isinstance(td.delta_obj, relativedelta)
time = pd.to_datetime('2020-01-31')
assert time + td == pd.to_datetime('2020-02-29')
td_time = "6 months"
td = _check_timedelta(td_time)
assert td.get_value('mo') == 6
assert isinstance(td.delta_obj, relativedelta)
time = pd.to_datetime('2020-01-31')
assert time + td == pd.to_datetime('2020-07-31')
def test_has_multiple_units():
single_unit = pd.DateOffset(months=3)
multiple_units = pd.DateOffset(months=3, years=3, days=5)
single_td = _check_timedelta(single_unit)
multiple_td = _check_timedelta(multiple_units)
assert single_td.has_multiple_units() is False
assert multiple_td.has_multiple_units() is True
def test_pd_dateoffset_to_timedelta():
single_temporal = pd.DateOffset(months=3)
single_td = _check_timedelta(single_temporal)
assert single_td.get_value('mo') == 3
assert single_td.delta_obj == pd.DateOffset(months=3)
mult_temporal = pd.DateOffset(years=10, months=3, days=5)
mult_td = _check_timedelta(mult_temporal)
expected = {'Y': 10, 'mo': 3, 'd': 5}
assert mult_td.get_value() == expected
assert mult_td.delta_obj == mult_temporal
# get_name() for multiple values is not deterministic
assert len(mult_td.get_name()) == len("10 Years 3 Months 5 Days")
special_dateoffset = pd.offsets.BDay(100)
special_td = _check_timedelta(special_dateoffset)
assert special_td.get_value("businessdays") == 100
assert special_td.delta_obj == special_dateoffset
def test_pd_dateoffset_to_timedelta_math():
base = pd.to_datetime("2020-01-31")
add = _check_timedelta(pd.DateOffset(months=2))
res = base + add
assert res == pd.to_datetime("2020-03-31")
base_2 = pd.to_datetime("2020-01-31")
add_2 = _check_timedelta( | pd.DateOffset(months=2, days=3) | pandas.DateOffset |
# -*- coding: utf-8 -*-
import time
from datetime import datetime
import warnings
from textwrap import dedent, fill
import numpy as np
import pandas as pd
from numpy.linalg import norm, inv
from scipy.linalg import solve as spsolve, LinAlgError
from scipy.integrate import trapz
from scipy import stats
from lifelines.fitters import BaseFitter, Printer
from lifelines.plotting import set_kwargs_drawstyle
from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult
from lifelines.utils.lowess import lowess
from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio
from lifelines.utils import (
_get_index,
_to_list,
_to_tuple,
_to_1d_array,
inv_normal_cdf,
normalize,
qth_survival_times,
coalesce,
check_for_numeric_dtypes_or_raise,
check_low_var,
check_complete_separation,
check_nans_or_infs,
StatError,
ConvergenceWarning,
StatisticalWarning,
StepSizer,
ConvergenceError,
string_justify,
interpolate_at_times_and_return_pandas,
CensoringType,
interpolate_at_times,
format_p_value,
)
__all__ = ["CoxPHFitter"]
class BatchVsSingle:
@staticmethod
def decide(batch_mode, n_unique, n_total, n_vars):
frac_dups = n_unique / n_total
if batch_mode or (
# https://github.com/CamDavidsonPilon/lifelines/issues/591 for original issue.
# new values from from perf/batch_vs_single script.
(batch_mode is None)
and (
(
6.876218e-01
+ -1.796993e-06 * n_total
+ -1.204271e-11 * n_total ** 2
+ 1.912500e00 * frac_dups
+ -8.121036e-01 * frac_dups ** 2
+ 4.916605e-06 * n_total * frac_dups
+ -5.888875e-03 * n_vars
+ 5.473434e-09 * n_vars * n_total
)
< 1
)
):
return "batch"
return "single"
class CoxPHFitter(BaseFitter):
r"""
This class implements fitting Cox's proportional hazard model:
.. math:: h(t|x) = h_0(t) \exp((x - \overline{x})' \beta)
Parameters
----------
alpha: float, optional (default=0.05)
the level in the confidence intervals.
tie_method: string, optional
specify how the fitter should deal with ties. Currently only
'Efron' is available.
penalizer: float, optional (default=0.0)
Attach an L2 penalizer to the size of the coefficients during regression. This improves
stability of the estimates and controls for high correlation between covariates.
For example, this shrinks the absolute value of :math:`\beta_i`.
The penalty is :math:`\frac{1}{2} \text{penalizer} ||\beta||^2`.
strata: list, optional
specify a list of columns to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
Examples
--------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>> rossi = load_rossi()
>>> cph = CoxPHFitter()
>>> cph.fit(rossi, 'week', 'arrest')
>>> cph.print_summary()
Attributes
----------
params_ : Series
The estimated coefficients. Changed in version 0.22.0: use to be ``.hazards_``
hazard_ratios_ : Series
The exp(coefficients)
confidence_intervals_ : DataFrame
The lower and upper confidence intervals for the hazard coefficients
durations: Series
The durations provided
event_observed: Series
The event_observed variable provided
weights: Series
The event_observed variable provided
variance_matrix_ : numpy array
The variance matrix of the coefficients
strata: list
the strata provided
standard_errors_: Series
the standard errors of the estimates
score_: float
the concordance index of the model.
baseline_hazard_: DataFrame
baseline_cumulative_hazard_: DataFrame
baseline_survival_: DataFrame
"""
_KNOWN_MODEL = True
def __init__(self, alpha=0.05, tie_method="Efron", penalizer=0.0, strata=None):
super(CoxPHFitter, self).__init__(alpha=alpha)
if penalizer < 0:
raise ValueError("penalizer parameter must be >= 0.")
if tie_method != "Efron":
raise NotImplementedError("Only Efron is available at the moment.")
self.alpha = alpha
self.tie_method = tie_method
self.penalizer = penalizer
self.strata = strata
@CensoringType.right_censoring
def fit(
self,
df,
duration_col=None,
event_col=None,
show_progress=False,
initial_point=None,
strata=None,
step_size=None,
weights_col=None,
cluster_col=None,
robust=False,
batch_mode=None,
):
"""
Fit the Cox proportional hazard model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of thecolumn in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
step_size: float, optional
set an initial step size for the fitting algorithm. Setting to 1.0 may improve performance, but could also hurt convergence.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: string, optional
specifies what column has unique identifiers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
batch_mode: bool, optional
enabling batch_mode can be faster for datasets with a large number of ties. If left as None, lifelines will choose the best option.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc.
Note
----
Tied survival times are handled using Efron's tie-method.
Examples
--------
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E')
>>> cph.print_summary()
>>> cph.predict_median(df)
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'weights': [1.1, 0.5, 2.0, 1.6, 1.2, 4.3, 1.4, 4.5, 3.0, 3.2, 0.4, 6.2],
>>> 'month': [10, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E', strata=['month', 'age'], robust=True, weights_col='weights')
>>> cph.print_summary()
>>> cph.predict_median(df)
"""
if duration_col is None:
raise TypeError("duration_col cannot be None.")
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
self.duration_col = duration_col
self.event_col = event_col
self.robust = robust
self.cluster_col = cluster_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
self._batch_mode = batch_mode
self.strata = coalesce(strata, self.strata)
X, T, E, weights, original_index, self._clusters = self._preprocess_dataframe(df)
self.durations = T.copy()
self.event_observed = E.copy()
self.weights = weights.copy()
if self.strata is not None:
self.durations.index = original_index
self.event_observed.index = original_index
self.weights.index = original_index
self._norm_mean = X.mean(0)
self._norm_std = X.std(0)
X_norm = normalize(X, self._norm_mean, self._norm_std)
params_ = self._fit_model(
X_norm, T, E, weights=weights, initial_point=initial_point, show_progress=show_progress, step_size=step_size
)
self.params_ = | pd.Series(params_, index=X.columns, name="coef") | pandas.Series |
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from sklearn.utils.estimator_checks import parametrize_with_checks
from skdownscale.pointwise_models import (
AnalogRegression,
BcsdPrecipitation,
BcsdTemperature,
CunnaneTransformer,
EquidistantCdfMatcher,
LinearTrendTransformer,
PaddedDOYGrouper,
PureAnalog,
PureRegression,
QuantileMapper,
QuantileMappingReressor,
ZScoreRegressor,
)
@pytest.fixture(scope='module')
def sample_X_y(n=365):
index = pd.date_range('2019-01-01', periods=n)
X = pd.DataFrame(
{'foo': np.sin(np.linspace(-10 * np.pi, 10 * np.pi, n)) * 10, 'bar': np.random.rand((n))},
index=index,
)
y = X['foo'] + 2
return X, y
@parametrize_with_checks(
[
# Regressors
AnalogRegression(),
BcsdPrecipitation(),
BcsdTemperature(),
PureAnalog(),
PureRegression(),
ZScoreRegressor(),
QuantileMappingReressor(n_endpoints=2),
EquidistantCdfMatcher(kind='difference', n_endpoints=2),
EquidistantCdfMatcher(kind='ratio', n_endpoints=2),
# transformers
LinearTrendTransformer(),
CunnaneTransformer(),
QuantileMapper(),
]
)
def test_sklearn_compatible_estimator(estimator, check):
check(estimator)
def test_linear_trend_roundtrip():
# TODO: there is probably a better analytic test here
n = 100
trend = 1
yint = 15
trendline = trend * np.arange(n) + yint
trendline = trendline.reshape(-1, 1)
noise = np.sin(np.linspace(-10 * np.pi, 10 * np.pi, n)) * 10
noise = noise.reshape(-1, 1)
data = trendline + noise
ltt = LinearTrendTransformer()
# remove trend
d_no_trend = ltt.fit_transform(data)
# assert detrended data is equal to noise
np.testing.assert_almost_equal(d_no_trend, noise, decimal=0)
# assert linear coef is equal to trend
np.testing.assert_almost_equal(ltt.lr_model_.coef_, trend, decimal=0)
# assert roundtrip
np.testing.assert_array_equal(ltt.inverse_transform(d_no_trend), data)
def test_quantile_mapper():
n = 100
expected = np.sin(np.linspace(-10 * np.pi, 10 * np.pi, n)) * 10
expected = expected.reshape(-1, 1)
with_bias = expected + 2
mapper = QuantileMapper()
mapper.fit(expected)
actual = mapper.transform(with_bias)
np.testing.assert_almost_equal(actual, expected)
@pytest.mark.xfail(reason='Need 3 part QM routine to handle bias removal')
def test_quantile_mapper_detrend():
n = 100
trend = 1
yint = 15
trendline = trend * np.arange(n) + yint
base = np.sin(np.linspace(-10 * np.pi, 10 * np.pi, n)) * 10
expected = base + trendline
with_bias = expected + 2
mapper = QuantileMapper(detrend=True)
mapper.fit(base)
actual = mapper.transform(with_bias)
np.testing.assert_almost_equal(actual.squeeze(), expected)
@pytest.mark.parametrize(
'model',
[
BcsdTemperature(),
PureAnalog(),
AnalogRegression(),
PureRegression(),
ZScoreRegressor(),
QuantileMappingReressor(),
QuantileMappingReressor(extrapolate='min'),
QuantileMappingReressor(extrapolate='max'),
QuantileMappingReressor(extrapolate='both'),
QuantileMappingReressor(extrapolate='1to1'),
EquidistantCdfMatcher(),
EquidistantCdfMatcher(extrapolate='min'),
EquidistantCdfMatcher(extrapolate='max'),
EquidistantCdfMatcher(extrapolate='both'),
EquidistantCdfMatcher(extrapolate='1to1'),
],
)
def test_linear_model(model):
n = 365
# TODO: add test for time other time ranges (e.g. < 365 days)
index = | pd.date_range('2019-01-01', periods=n) | pandas.date_range |
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import dates
import hashlib
import json
def load_rawdata(filepath):
data = {'date': [], 'prod. pod': [],
'train. pod': [],
'config': []}
first = 0
with open(filepath) as f:
for doc in f:
doc_parsed = json.loads(doc)
data['prod. pod'].append(float(doc_parsed['prod_workload']['metric']))
data['train. pod'].append(float(doc_parsed['training_workload']['metric']))
data['config'].append(hashlib.md5(str(doc_parsed['prod_workload']['configuration']).encode('utf-8')).hexdigest())
if first == 0:
first = int(doc_parsed['prod_workload']['start'])
data['date'].append(int(doc_parsed['prod_workload']['start']))
# data['train. pod'].append(float(doc_parsed['tuning_metric']))
df = pd.DataFrame(data)
df['date'] = | pd.to_datetime(df['date'], unit='s') | pandas.to_datetime |
import io
import pickle
from unittest.mock import MagicMock, Mock, mock_open, patch
import numpy as np
import pandas as pd
import pytest
from sdv.lite import TabularPreset
from sdv.metadata import Table
from sdv.tabular import GaussianCopula
from tests.utils import DataFrameMatcher
class TestTabularPreset:
def test___init__missing_name(self):
"""Test the ``TabularPreset.__init__`` method with no parameters.
Side Effects:
- ValueError should be thrown
"""
# Run and Assert
with pytest.raises(
ValueError,
match=('You must provide the name of a preset using the `name` parameter. '
r'Use `TabularPreset.list_available_presets\(\)` to browse through '
'the options.')):
TabularPreset()
def test___init__invalid_name(self):
"""Test the ``TabularPreset.__init__`` method with an invalid arg value.
Input:
- name = invalid parameter
Side Effects:
- ValueError should be thrown
"""
# Run and Assert
with pytest.raises(ValueError, match=r'`name` must be one of *'):
TabularPreset(name='invalid')
@patch('sdv.lite.tabular.rdt.transformers')
@patch('sdv.lite.tabular.GaussianCopula', spec_set=GaussianCopula)
def test__init__speed_passes_correct_parameters(self, gaussian_copula_mock, transformers_mock):
"""Tests the ``TabularPreset.__init__`` method with the speed preset.
The method should pass the parameters to the ``GaussianCopula`` class.
Input:
- name of the speed preset
Side Effects:
- GaussianCopula should receive the correct parameters
"""
# Run
TabularPreset(name='FAST_ML')
# Assert
gaussian_copula_mock.assert_called_once_with(
table_metadata=None,
constraints=None,
categorical_transformer='categorical_fuzzy',
default_distribution='gaussian',
rounding=None,
)
metadata = gaussian_copula_mock.return_value._metadata
metadata._dtype_transformers.update.assert_called_once_with({
'i': transformers_mock.NumericalTransformer(
dtype=np.int64,
nan=None,
null_column=False,
min_value='auto',
max_value='auto',
),
'f': transformers_mock.NumericalTransformer(
dtype=np.float64,
nan=None,
null_column=False,
min_value='auto',
max_value='auto',
),
'O': transformers_mock.CategoricalTransformer(fuzzy=True),
'b': transformers_mock.BooleanTransformer(nan=None, null_column=False),
'M': transformers_mock.DatetimeTransformer(nan=None, null_column=False),
})
@patch('sdv.lite.tabular.GaussianCopula', spec_set=GaussianCopula)
def test__init__with_metadata(self, gaussian_copula_mock):
"""Tests the ``TabularPreset.__init__`` method with the speed preset.
The method should pass the parameters to the ``GaussianCopula`` class.
Input:
- name of the speed preset
Side Effects:
- GaussianCopula should receive the correct parameters
"""
# Setup
metadata = MagicMock(spec_set=Table)
# Run
TabularPreset(name='FAST_ML', metadata=metadata)
# Assert
gaussian_copula_mock.assert_called_once_with(
table_metadata=metadata.to_dict(),
constraints=None,
categorical_transformer='categorical_fuzzy',
default_distribution='gaussian',
rounding=None,
)
@patch('sdv.lite.tabular.rdt.transformers')
@patch('sdv.lite.tabular.GaussianCopula', spec_set=GaussianCopula)
def test__init__with_constraints(self, gaussian_copula_mock, transformers_mock):
"""Tests the ``TabularPreset.__init__`` method with constraints.
The constraints should be added to the metadata.
Input:
- constraints
Side Effects:
- GaussianCopula should receive args, including the constraints.
"""
# Setup
constraint = Mock()
# Run
preset = TabularPreset(name='FAST_ML', metadata=None, constraints=[constraint])
# Assert
gaussian_copula_mock.assert_called_once_with(
table_metadata=None,
constraints=[constraint],
categorical_transformer='categorical_fuzzy',
default_distribution='gaussian',
rounding=None,
)
metadata = gaussian_copula_mock.return_value._metadata
metadata._dtype_transformers.update.assert_called_once_with({
'i': transformers_mock.NumericalTransformer(
dtype=np.int64,
nan='mean',
null_column=None,
min_value='auto',
max_value='auto',
),
'f': transformers_mock.NumericalTransformer(
dtype=np.float64,
nan='mean',
null_column=None,
min_value='auto',
max_value='auto',
),
'O': transformers_mock.CategoricalTransformer(fuzzy=True),
'b': transformers_mock.BooleanTransformer(nan=-1, null_column=None),
'M': transformers_mock.DatetimeTransformer(nan='mean', null_column=None),
})
assert preset._null_column is True
@patch('sdv.lite.tabular.GaussianCopula', spec_set=GaussianCopula)
def test__init__with_constraints_and_metadata(self, gaussian_copula_mock):
"""Tests the ``TabularPreset.__init__`` method with constraints and metadata.
The constraints should be added to the metadata.
Input:
- constraints
- metadata
Side Effects:
- GaussianCopula should receive metadata with the constraints added.
"""
# Setup
metadata = {'name': 'test_table', 'fields': []}
constraint = Mock()
# Run
preset = TabularPreset(name='FAST_ML', metadata=metadata, constraints=[constraint])
# Assert
expected_metadata = metadata.copy()
expected_metadata['constraints'] = [constraint.to_dict.return_value]
gaussian_copula_mock.assert_called_once_with(
table_metadata=expected_metadata,
constraints=None,
categorical_transformer='categorical_fuzzy',
default_distribution='gaussian',
rounding=None,
)
metadata = gaussian_copula_mock.return_value._metadata
assert metadata._dtype_transformers.update.call_count == 1
assert preset._null_column is True
def test_fit(self):
"""Test the ``TabularPreset.fit`` method.
Expect that the model's fit method is called with the expected args.
Input:
- fit data
Side Effects:
- The model's ``fit`` method is called with the same data.
"""
# Setup
metadata = Mock()
metadata.to_dict.return_value = {'fields': {}}
model = Mock()
model._metadata = metadata
preset = Mock()
preset._model = model
preset._null_percentages = None
# Run
TabularPreset.fit(preset, pd.DataFrame())
# Assert
model.fit.assert_called_once_with(DataFrameMatcher(pd.DataFrame()))
assert preset._null_percentages is None
def test_fit_null_column_True(self):
"""Test the ``TabularPreset.fit`` method with modeling null columns.
Expect that the model's fit method is called with the expected args when
``_null_column`` is set to ``True``.
Setup:
- _null_column is True
Input:
- fit data
Side Effects:
- The model's ``fit`` method is called with the same data.
- ``_null_percentages`` is ``None``
"""
# Setup
metadata = Mock()
metadata.to_dict.return_value = {'fields': {}}
model = Mock()
model._metadata = metadata
preset = Mock()
preset._model = model
preset._null_column = True
preset._null_percentages = None
# Run
TabularPreset.fit(preset, pd.DataFrame())
# Assert
model.fit.assert_called_once_with(DataFrameMatcher( | pd.DataFrame() | pandas.DataFrame |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import argparse
import os
import warnings
import numpy as np
import pandas as pd
from sklearn.compose import make_column_transformer
from sklearn.exceptions import DataConversionWarning
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, StandardScaler
warnings.filterwarnings(action="ignore", category=DataConversionWarning)
columns = [
"age",
"education",
"major industry code",
"class of worker",
"num persons worked for employer",
"capital gains",
"capital losses",
"dividends from stocks",
"income",
]
class_labels = [" - 50000.", " 50000+."]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--train-test-split-ratio", type=float, default=0.3)
args, _ = parser.parse_known_args()
input_data_path = os.path.join("/opt/ml/processing/input", "census-income.csv")
df = | pd.read_csv(input_data_path) | pandas.read_csv |
import os, re, sys, time
import datetime as dt
import pandas as pd
import numpy as np
from .bot import Bot
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
class FacebookBot(Bot):
LOGIN_URL = 'https://www.facebook.com/'
SHARE_URL = "https://www.facebook.com/shares/view?id="
def __init__(self, browser="phantomjs", verbose=True):
super(FacebookBot, self).__init__(browser, verbose=verbose)
self.clr_str = lambda str: re.sub(re.compile('\W'), '', str)
def login(self, usr, pwd):
br_id = self._get_any_idle_br_id()
self._new_command(br_id, (self.GOTO, self.LOGIN_URL))
logged_in = not self._new_command(br_id, (self.ID, "login_form"))[0]
if(logged_in):
self.br_dict[br_id].save_screenshot('login_status.png')
self._idle_br(br_id)
return
elem = self._new_command(br_id, (self.ID, "email"))[1][0]
elem.send_keys(usr)
elem = self._new_command(br_id, (self.ID, "pass"))[1][0]
elem.send_keys(pwd)
elem.send_keys(Keys.RETURN)
facebookLogo = "/html/body/div/div[1]/div/div/div/div[1]/div/h1/a"
self.br_dict[br_id].save_screenshot('login_status.png')
self._idle_br(br_id)
def fetch_shared_posts_by_post_id(self, post_id):
shared_posts_header = ['share_time', 'user_name', 'user_link',
'post_content', 'post_link', 'num_likes']
shared_posts_dict = {attr:{} for attr in shared_posts_header}
br_id = self._get_any_idle_br_id()
shared_posts_class = "fbUserContent"
user_name_class = "fwb"
post_link_xpath = "div[1]/div[3]/div[1]/div/div/div[2]/div/div/div[2]/div/span[3]/span/a"
share_tme_xpath = "div[1]/div[3]/div[1]/div/div/div[2]/div/div/div[2]/div/span[3]/span/a/abbr"
post_content_xpath = "div[1]/div[3]/div[2]"
num_likes_class = "_4arz"
self._new_command(br_id, (self.GOTO, self.SHARE_URL+"%d"%(int(post_id))))
all_shared_posts = []
count_update, last_loaded_posts = 0, 0
while(True):
self.br_dict[br_id].execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(1)
found, all_shared_posts = self._new_command(br_id, (self.CLASS, shared_posts_class))
print('loaded', len(all_shared_posts), 'posts...')
count_update = 0
while(len(all_shared_posts) == last_loaded_posts):
self.br_dict[br_id].execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(1)
found, all_shared_posts = self._new_command(br_id, (self.CLASS, shared_posts_class))
print('retry: loaded', len(all_shared_posts), 'posts...')
count_update += 1
if(count_update > 2):
break
if(count_update > 2):
break
last_loaded_posts = len(all_shared_posts)
self._idle_br(br_id)
if(len(all_shared_posts) == 0):
print("shared post not found!")
return None
ind = 0
for post in all_shared_posts:
try:
share_time = post.find_element_by_xpath(share_tme_xpath)
shared_posts_dict['share_time'][ind] = share_time.get_attribute('title')
user_name = post.find_element_by_class_name(user_name_class)
shared_posts_dict['user_name'][ind] = user_name.text
user_link = user_name.find_element_by_tag_name("a")
shared_posts_dict['user_link'][ind] = user_link.get_attribute('href')
post_content = post.find_element_by_xpath(post_content_xpath)
try:
post_content.find_element_by_class_name('see_more_link').click()
except:
pass
shared_posts_dict['post_content'][ind] = post_content.text
post_link = post.find_element_by_xpath(post_link_xpath)
shared_posts_dict['post_link'][ind] = post_link.get_attribute('href')
try:
num_likes = int(post.find_element_by_class_name(num_likes_class).text)
except:
num_likes = 0
shared_posts_dict['num_likes'][ind] = num_likes
print(share_time.get_attribute('title'), user_name.text)
print(post_content.text)
ind += 1
except:
print('failure after', ind)
continue
return | pd.DataFrame.from_dict(shared_posts_dict) | pandas.DataFrame.from_dict |
from os import sep
import pandas as pd
encryptionkey = | pd.read_csv(r"C:\Users\cjwhi\OneDrive\Computer\Documents\Coding\Programs\Small Coding Projects\Hash.csv", sep = ',', names = ['Character', 'Byte'], header = None, skiprows = [0]) | pandas.read_csv |
# Copyright 2021 The Funnel Rocket Maintainers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import shutil
from contextlib import contextmanager
from dataclasses import dataclass
from enum import auto
from typing import List
import numpy as np
import pytest
from pandas import RangeIndex, Series, DataFrame
from frocket.common.dataset import DatasetPartsInfo, DatasetId, DatasetPartId, PartNamingMethod, DatasetInfo, \
DatasetColumnType, DatasetShortSchema
from frocket.common.serializable import AutoNamedEnum
from frocket.worker.runners.part_loader import shared_part_loader
from tests.utils.base_test_utils import temp_filename, TEMP_DIR, DisablePyTestCollectionMixin
from tests.utils.mock_s3_utils import SKIP_S3_TESTS, new_mock_s3_bucket
class TestColumn(DisablePyTestCollectionMixin, str, AutoNamedEnum):
int_64_userid = auto()
int_64_ts = auto()
int_u32 = auto()
float_64_ts = auto()
float_all_none = auto()
float_32 = auto()
float_category = auto()
str_userid = auto()
str_and_none = auto()
str_all_none = auto()
str_object_all_none = auto()
str_category_userid = auto()
str_category_few = auto()
str_category_many = auto()
bool = auto()
unsupported_datetimes = auto()
unsupported_lists = auto()
DEFAULT_GROUP_COUNT = 200
DEFAULT_ROW_COUNT = 1000
DEFAULT_GROUP_COLUMN = TestColumn.int_64_userid.value
DEFAULT_TIMESTAMP_COLUMN = TestColumn.int_64_ts.value
BASE_TIME = 1609459200000 # Start of 2021, UTC
BASE_USER_ID = 100000
TIME_SHIFT = 10000
UNSUPPORTED_COLUMN_DTYPES = {TestColumn.unsupported_datetimes: 'datetime64[ns]',
TestColumn.unsupported_lists: 'object'}
STR_AND_NONE_VALUES = ["1", "2", "3"]
STR_CAT_FEW_WEIGHTS = [0.9, 0.07, 0.02, 0.01]
STR_CAT_MANY_WEIGHTS = [0.5, 0.2] + [0.01] * 30
def test_colname_to_coltype(name: str) -> DatasetColumnType:
prefix_to_type = {
'int': DatasetColumnType.INT,
'float': DatasetColumnType.FLOAT,
'str': DatasetColumnType.STRING,
'bool': DatasetColumnType.BOOL,
'unsupported': None
}
coltype = prefix_to_type[name.split('_')[0]]
return coltype
def datafile_schema(part: int = 0) -> DatasetShortSchema:
# noinspection PyUnresolvedReferences
result = DatasetShortSchema(
min_timestamp=float(BASE_TIME),
max_timestamp=float(BASE_TIME + TIME_SHIFT),
source_categoricals=[TestColumn.str_category_userid, TestColumn.str_category_many],
potential_categoricals=[TestColumn.str_and_none, TestColumn.str_category_few],
columns={col.value: test_colname_to_coltype(col)
for col in TestColumn
if test_colname_to_coltype(col)})
# print(f"Test dataset short schema is:\n{result.to_json(indent=2)}")
return result
def weighted_list(size: int, weights: list) -> list:
res = []
for idx, w in enumerate(weights):
v = str(idx)
vlen = size * w
res += [v] * int(vlen)
assert len(res) == size
return res
def str_and_none_column_values(part: int = 0, with_none: bool = True) -> List[str]:
result = [*STR_AND_NONE_VALUES, f"part-{part}"]
if with_none:
result.append(None)
return result
def create_datafile(part: int = 0, size: int = DEFAULT_ROW_COUNT, filename: str = None) -> str:
# First, prepare data for columns
# Each part has a separate set of user (a.k.a. group) IDs
initial_user_id = BASE_USER_ID * part
min_user_id = initial_user_id
max_user_id = initial_user_id + DEFAULT_GROUP_COUNT - 1
# To each tests, ensure that each user ID appears in the file at least once, by including the whole range,
# then add random IDs in the range
int64_user_ids = \
list(range(min_user_id, max_user_id + 1)) + \
random.choices(range(min_user_id, max_user_id + 1), k=size - DEFAULT_GROUP_COUNT)
# And also represent as strings in another column
str_user_ids = [str(uid) for uid in int64_user_ids]
# Timestamp: each part has a range of values of size TIME_SHIFT
min_ts = BASE_TIME + (TIME_SHIFT * part)
max_ts = BASE_TIME + (TIME_SHIFT * (part + 1))
# Ensure that min & max timestamps appear exactly once, and fill the rest randomly in the range
int_timestamps = \
[min_ts, max_ts] + \
random.choices(range(min_ts + 1, max_ts), k=size-2)
# Now as floats and as (incorrect!) datetimes (datetimes currently unsupported)
float_timestamps = [ts + random.random() for ts in int_timestamps]
# More test columns
int_u32_values = random.choices(range(100), k=size)
float_32_values = [np.nan, *[random.random() for _ in range(size - 2)], np.nan]
str_and_none_values = random.choices(str_and_none_column_values(part), k=size)
bool_values = random.choices([True, False], k=size)
# For yet-unsupported columns below
lists_values = [[1, 2, 3]] * size
datetimes = [ts * 1000000 for ts in float_timestamps]
# Now create all series
idx = RangeIndex(size)
columns = {
TestColumn.int_64_userid: Series(data=int64_user_ids),
TestColumn.int_64_ts: Series(data=int_timestamps),
TestColumn.int_u32: Series(data=int_u32_values, dtype='uint32'),
TestColumn.float_64_ts: Series(data=float_timestamps),
TestColumn.float_all_none: Series(data=None, index=idx, dtype='float64'),
TestColumn.float_32: | Series(data=float_32_values, dtype='float32') | pandas.Series |
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)),
)
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{
"group": [1, 1, 2],
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"category_string": pd.Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": pd.date_range("20130101", periods=3),
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
},
columns=[
"group",
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
],
)
expected_columns_numeric = Index(["int", "float", "category_int"])
# mean / median
expected = pd.DataFrame(
{
"category_int": [7.5, 9],
"float": [4.5, 6.0],
"timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
"int": [1.5, 3],
"datetime": [
pd.Timestamp("2013-01-01 12:00:00"),
pd.Timestamp("2013-01-03 00:00:00"),
],
"datetimetz": [
pd.Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
pd.Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
],
},
index=Index([1, 2], name="group"),
columns=["int", "float", "category_int", "datetime", "datetimetz", "timedelta"],
)
for attr in ["mean", "median"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(
[
"int",
"float",
"string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["min", "max"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(
[
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["first", "last"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "string", "category_int", "timedelta"])
result = df.groupby("group").sum()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = df.groupby("group").sum(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int"])
for attr in ["prod", "cumprod"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
for attr in ["cummin", "cummax"]:
result = getattr(df.groupby("group"), attr)()
# GH 15561: numeric_only=False set by default like min/max
| tm.assert_index_equal(result.columns, expected_columns) | pandas._testing.assert_index_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Yahoo! Finance market data downloader (+fix for Pandas Datareader)
# https://github.com/ranaroussi/yfinance
#
# Copyright 2017-2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import time as _time
import datetime as _datetime
import requests as _requests
import pandas as _pd
import numpy as _np
try:
from urllib.parse import quote as urlencode
except ImportError:
from urllib import quote as urlencode
from . import utils
from . import shared
class TickerBase():
def __init__(self, ticker):
self.ticker = ticker.upper()
self._history = None
self._base_url = 'https://query1.finance.yahoo.com'
self._scrape_url = 'https://finance.yahoo.com/quote'
self._fundamentals = False
self._info = None
self._sustainability = None
self._recommendations = None
self._major_holders = None
self._institutional_holders = None
self._mutualfund_holders = None
self._isin = None
self._calendar = None
self._expirations = {}
self._earnings = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._financials = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._balancesheet = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._cashflow = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
def history(self, period="1mo", interval="1d",
start=None, end=None, prepost=False, actions=True,
auto_adjust=True, back_adjust=False,
proxy=None, rounding=True, tz=None, **kwargs):
"""
:Parameters:
period : str
Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
Either Use period parameter or use start and end
interval : str
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
Intraday data cannot extend last 60 days
start: str
Download start date string (YYYY-MM-DD) or _datetime.
Default is 1900-01-01
end: str
Download end date string (YYYY-MM-DD) or _datetime.
Default is now
prepost : bool
Include Pre and Post market data in results?
Default is False
auto_adjust: bool
Adjust all OHLC automatically? Default is True
back_adjust: bool
Back-adjusted data to mimic true historical prices
proxy: str
Optional. Proxy server URL scheme. Default is None
rounding: bool
Round values to 2 decimal places?
Optional. Default is False = precision suggested by Yahoo!
tz: str
Optional timezone locale for dates.
(default data is returned as non-localized dates)
**kwargs: dict
debug: bool
Optional. If passed as False, will suppress
error message printing to console.
"""
if start or period is None or period.lower() == "max":
if start is None:
start = -2208988800
elif isinstance(start, _datetime.datetime):
start = int(_time.mktime(start.timetuple()))
else:
start = int(_time.mktime(
_time.strptime(str(start), '%Y-%m-%d')))
if end is None:
end = int(_time.time())
elif isinstance(end, _datetime.datetime):
end = int(_time.mktime(end.timetuple()))
else:
end = int(_time.mktime(_time.strptime(str(end), '%Y-%m-%d')))
params = {"period1": start, "period2": end}
else:
period = period.lower()
params = {"range": period}
params["interval"] = interval.lower()
params["includePrePost"] = prepost
params["events"] = "div,splits"
# 1) fix weired bug with Yahoo! - returning 60m for 30m bars
if params["interval"] == "30m":
params["interval"] = "15m"
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
# Getting data from json
url = "{}/v8/finance/chart/{}".format(self._base_url, self.ticker)
data = _requests.get(url=url, params=params, proxies=proxy)
if "Will be right back" in data.text:
raise RuntimeError("*** YAHOO! FINANCE IS CURRENTLY DOWN! ***\n"
"Our engineers are working quickly to resolve "
"the issue. Thank you for your patience.")
data = data.json()
# Work with errors
debug_mode = True
if "debug" in kwargs and isinstance(kwargs["debug"], bool):
debug_mode = kwargs["debug"]
err_msg = "No data found for this date range, symbol may be delisted"
if "chart" in data and data["chart"]["error"]:
err_msg = data["chart"]["error"]["description"]
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
if "chart" not in data or data["chart"]["result"] is None or \
not data["chart"]["result"]:
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
# parse quotes
try:
quotes = utils.parse_quotes(data["chart"]["result"][0], tz)
except Exception:
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
# 2) fix weired bug with Yahoo! - returning 60m for 30m bars
if interval.lower() == "30m":
quotes2 = quotes.resample('30T')
quotes = _pd.DataFrame(index=quotes2.last().index, data={
'Open': quotes2['Open'].first(),
'High': quotes2['High'].max(),
'Low': quotes2['Low'].min(),
'Close': quotes2['Close'].last(),
'Adj Close': quotes2['Adj Close'].last(),
'Volume': quotes2['Volume'].sum()
})
try:
quotes['Dividends'] = quotes2['Dividends'].max()
except Exception:
pass
try:
quotes['Stock Splits'] = quotes2['Dividends'].max()
except Exception:
pass
if auto_adjust:
quotes = utils.auto_adjust(quotes)
elif back_adjust:
quotes = utils.back_adjust(quotes)
if rounding:
quotes = _np.round(quotes, data[
"chart"]["result"][0]["meta"]["priceHint"])
quotes['Volume'] = quotes['Volume'].fillna(0).astype(_np.int64)
quotes.dropna(inplace=True)
# actions
dividends, splits = utils.parse_actions(data["chart"]["result"][0], tz)
# combine
df = _pd.concat([quotes, dividends, splits], axis=1, sort=True)
df["Dividends"].fillna(0, inplace=True)
df["Stock Splits"].fillna(0, inplace=True)
# index eod/intraday
df.index = df.index.tz_localize("UTC").tz_convert(
data["chart"]["result"][0]["meta"]["exchangeTimezoneName"])
if params["interval"][-1] in {"m", "h"}:
df.index.name = "Datetime"
else:
df.index = _pd.to_datetime(df.index.date)
if tz is not None:
df.index = df.index.tz_localize(tz)
df.index.name = "Date"
self._history = df.copy()
if not actions:
df.drop(columns=["Dividends", "Stock Splits"], inplace=True)
return df
# ------------------------
def _get_fundamentals(self, kind=None, proxy=None):
def cleanup(data):
df = | _pd.DataFrame(data) | pandas.DataFrame |
#!/bin/env python
# coding=utf8
import os
import sys
import json
import functools
import gzip
from collections import defaultdict
from itertools import groupby
import numpy as np
import pandas as pd
import subprocess
from scipy.io import mmwrite
from scipy.sparse import csr_matrix, coo_matrix
import pysam
from celescope.tools.utils import format_number, log, gene_convert, glob_genomeDir
from celescope.tools.report import reporter
toolsdir = os.path.dirname(__file__)
def report_prepare(count_file, downsample_file, outdir):
json_file = outdir + '/.data.json'
if not os.path.exists(json_file):
data = {}
else:
fh = open(json_file)
data = json.load(fh)
fh.close()
df0 = pd.read_table(downsample_file, header=0)
data['percentile'] = df0['percent'].tolist()
data['MedianGeneNum'] = df0['median_geneNum'].tolist()
data['Saturation'] = df0['saturation'].tolist()
#data['count' + '_summary'] = df0.T.values.tolist()
df = pd.read_table(count_file, header=0)
df = df.sort_values('UMI', ascending=False)
data['CB_num'] = df[df['mark'] == 'CB'].shape[0]
data['Cells'] = list(df.loc[df['mark'] == 'CB', 'UMI'])
data['UB_num'] = df[df['mark'] == 'UB'].shape[0]
data['Background'] = list(df.loc[df['mark'] == 'UB', 'UMI'])
data['umi_summary'] = True
with open(json_file, 'w') as fh:
json.dump(data, fh)
def hd(x, y):
return len([i for i in range(len(x)) if x[i] != y[i]])
def correct_umi(fh1, barcode, gene_umi_dict, percent=0.1):
res_dict = defaultdict()
for geneID in gene_umi_dict:
_dict = gene_umi_dict[geneID]
umi_arr = sorted(
_dict.keys(), key=lambda x: (_dict[x], x), reverse=True)
while True:
# break when only one barcode or umi_low/umi_high great than 0.1
if len(umi_arr) == 1:
break
umi_low = umi_arr.pop()
for u in umi_arr:
if float(_dict[umi_low]) / _dict[u] > percent:
break
if hd(umi_low, u) == 1:
_dict[u] += _dict[umi_low]
del (_dict[umi_low])
break
res_dict[geneID] = _dict
return res_dict
@log
def bam2table(bam, detail_file):
# 提取bam中相同barcode的reads,统计比对到基因的reads信息
#
samfile = pysam.AlignmentFile(bam, "rb")
with gzip.open(detail_file, 'wt') as fh1:
fh1.write('\t'.join(['Barcode', 'geneID', 'UMI', 'count']) + '\n')
# pysam.libcalignedsegment.AlignedSegment
# AAACAGGCCAGCGTTAACACGACC_CCTAACGT_A00129:340:HHH72DSXX:2:1353:23276:30843
# 获取read的barcode
def keyfunc(x): return x.query_name.split('_', 1)[0]
for _, g in groupby(samfile, keyfunc):
gene_umi_dict = defaultdict(lambda: defaultdict(int))
for seg in g:
(barcode, umi) = seg.query_name.split('_')[:2]
if not seg.has_tag('XT'):
continue
geneID = seg.get_tag('XT')
gene_umi_dict[geneID][umi] += 1
res_dict = correct_umi(fh1, barcode, gene_umi_dict)
# output
for geneID in res_dict:
for umi in res_dict[geneID]:
fh1.write('%s\t%s\t%s\t%s\n' % (barcode, geneID, umi,
res_dict[geneID][umi]))
@log
def call_cells(df, expected_num, pdf):
def num_gt2(x):
return | pd.Series.sum(x[x > 1]) | pandas.Series.sum |
#!/usr/bin/env python3
"""
Plotting routines dedicated to time-series or temporal trends
"""
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
# import seaborn as sns
#--------------------------------------
# Time-Series Plots
#--------------------------------------
def OccurancePlot(archive,
searchterms,
fields=None,
alpha=0.2,
startdate=None,
enddate=None):
"""
Plots a time-series of occurances of a given
set of search terms in the collection as a scatter
plot of vertical lines
Inputs:
archive (obj) : archive.Archive() object
searchterms (list) : Terms for which to search.
fields (list) : Metadata fields in which to look.
Shorthand expected. Default searches through
all in config.fields_short.
alpha (float) : Alpha of marker face
startdate (str) : Datetime (YYYYmmdd_HHMMSS) after which
to return data.
enddate (str) : Datetime (YYYYmmdd_HHMMSS) before which
to return data.
"""
# Instantiate figure
plt.figure(figsize=(8,len(searchterms)*1.3/3), dpi=200)
# Handle colors (tab10 or brg)
colors = cm.tab10(np.arange(len(searchterms)) \
/ float(len(searchterms)))
np.random.shuffle(colors)
for ii, term in enumerate(searchterms):
# Find filenames with search terms in field
sourcefiles = archive.FindSource([term], fields)
# Grab and filter by datetimes
data = archive.GrabData(sourcefiles, ['CreateDate'],
startdate=startdate,
enddate=enddate)
# Plot scatterplot w/ default spacing settings
plt.scatter(data['CreateDate'],
np.ones(len(data['CreateDate']))*(len(searchterms)-ii),
s=400, color=colors[ii], marker='|', alpha=alpha)
plt.ylim([0.5, len(searchterms) + 0.5])
plt.yticks(list(range(len(searchterms),0,-1)), searchterms)
plt.title('Occurances by Date')
return
def OccuranceMagnitude(archive,
searchterms,
fields=None,
alpha=0.4,
scale=5,
startdate=None,
enddate=None):
"""
Plots a scatterplot time-series of occurances of
a given set of search terms in the collection,
with sizes of each marker reflecting the number
of appearances that day
Inputs:
archive (obj) : archive.Archive() object
searchterms (list) : Terms for which to search.
fields (list) : Metadata fields in which to look.
Shorthand expected. Default searches through
all in config.fields_short.
alpha (float) : Alpha of marker face
scale (int) : Scaling factor to apply to all markers
startdate (str) : Datetime (YYYYmmdd_HHMMSS) after which
to return data.
enddate (str) : Datetime (YYYYmmdd_HHMMSS) before which
to return data.
"""
# Instantiate figure
plt.figure(figsize=(8,len(searchterms)*1.3/3), dpi=300)
# Get array of colors from hsv:
colors = cm.hsv(np.arange(len(searchterms)) \
/ float(len(searchterms)))
colors = np.array(colors)
colors[:,3] = alpha
np.random.shuffle(colors)
for ii, term in enumerate(searchterms):
# Find filenames with search terms in field
sourcefiles = archive.FindSource([term], fields)
# Grab and filter by datetimes
data = archive.GrabData(sourcefiles, ['CreateDate'],
startdate=startdate,
enddate=enddate)
# Count totals by day
counts = data['CreateDate'].dt.normalize().value_counts()
dates = counts.index.to_series()
# Plot scatterplot w/ default spacing settings
plt.scatter(dates, np.ones(len(dates))*(len(searchterms)-ii),
s=counts*scale, color=colors[ii], marker='o',
edgecolors=(0,0,0,1), linewidth=0.5)
plt.ylim([0.5, len(searchterms) + 0.5])
plt.yticks(list(range(len(searchterms),0,-1)), searchterms)
plt.title('Occurances by Date')
return
def ViolinPlot(archive, terms, fields,
startdate=None, enddate=None,
refdate='19800101_000000',
palette='Set2', inner='points',
scale='area', cut=0, linewidth=0.8):
"""
Wrapper for the Seaborn violin plot function. For each keyword
in terms, find files for which that keyword appears in fields,
and plot a violin of occurances by date. Most other attributes
are aesthetic adjustments fed into seaborn.violinplots()
Inputs:
archive (obj) : archive.Archive() object
terms (list) : Keywords to search for in Archive, fed into
Archive.FindSource()
fields (list) : Exif fields in which to search for terms,
fed into Archive.FindSource()
startdate (str) : Datetime (YYYYmmdd_HHMMSS) after which
to return data.
enddate (str) : Datetime (YYYYmmdd_HHMMSS) before which
to return data.
refdate (str) : Reference date which is used to convert
pandas datetime to numeric dates, ideally a value
similar to the dates returned from the collection
palette, inner, scale, cut, linewidth : See requirements
for seaborn.violinplot()
"""
# Check if Seaborn is available
try:
import seaborn as sns
except ImportError:
print("Function unavailable, requires installation of Seaborn")
print("Perform full setup for auxilary packages")
return
sns.set_theme(style="whitegrid")
dates = []
refdate = pd.to_datetime(refdate, format="%Y%m%d_%H%M%S")
for n, term in enumerate(terms):
# Create a random dataset across several variables
sourcefiles = archive.FindSource([term], fields)
# Grab and filter by datetimes
data = archive.GrabData(sourcefiles, ['CreateDate'],
startdate, enddate)
# Convert to numeric date
data['epoch'] = (data['CreateDate']-refdate)//pd.Timedelta("1d")
# Save to list
dates.append(data['epoch']/365.0 + refdate.year)
# Append all the dates into a new dataframe
df = | pd.concat(dates, axis=1, keys=terms) | pandas.concat |
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
plt.style.use("./Styles/Scientific.mplstyle")
import numpy as np
import pandas as pd
from plotting import plot_3D_scatter
def get_features(data):
features = {}
for key, values in data.items():
timestamps, counts = np.unique(values["Timestamp"], return_counts=True)
features[key] = np.array([timestamps, counts]).T
return features
def get_octaves(data):
octaves = pd.DataFrame()
for index, (key, values) in enumerate(data.items()):
octave, counts = np.unique(values["Octave"], return_counts=True)
octaves.insert(index, key, counts)
return octaves
def feature_plot(features, xlims, ylims):
fig, ax = plt.subplots(figsize=(7, 2.0))
for key, value in features.items():
ax.plot(value[:, 0], value[:, 1], label=key)
ax.set_xlabel("Time [s]")
ax.set_ylabel("Num. of features [-]")
ax.set_ylim(ylims)
ax.set_xlim(xlims)
lg = fig.legend(bbox_to_anchor=(0.69, 1.0), loc="upper center", \
frameon=True, fancybox=False, ncol=3)
fr = lg.get_frame()
fr.set_edgecolor("black")
fr.set_facecolor("white")
fig.subplots_adjust(left=0.10, right=0.975, top=0.75, bottom=0.17, \
wspace=0.2, hspace=0.675)
return fig
def octave_plot(octave_data):
# Octave distribution.
margin = 0.4
lefts, rights, centers = [], [], []
fig, ax = plt.subplots(figsize=(7, 2.5))
for index, method in enumerate(octave_data):
counts = octave_data[method]
fracs = counts / max(counts)
levels = np.arange(1, len(counts)+1)
center = (1.0 + margin) * index
left = center - 0.5
right = center + 0.5
lefts.append(left)
centers.append(center)
rights.append(right)
ax.barh(levels, fracs, left=-fracs/2+center, height=0.8, color="b", \
label=method)
ax.set_ylabel("Image Pyramid Level [-]")
ax.set_yticks([1, 2, 3, 4, 5, 6, 7, 8], minor=False)
ax.set_yticklabels([1, 2, 3, 4, 5, 6, 7, 8])
ax.set_xticks(centers, minor=False)
ax.set_xlim([lefts[0]-margin, rights[-1]+margin])
ax.set_xticklabels([col for col in octave_data.columns])
fig.tight_layout()
return fig
def main():
directory = "/home/martin/dev/Trajectory/Data/SLAM/"
time_lims = [ 1611313305.76, 1611313730 ]
paths = {}
paths["Raw"] = directory + "RAW-Keypoint-Statistics.csv"
paths["BLF"] = directory + "BLF-Keypoint-Statistics.csv"
paths["HE"] = directory + "HE-Keypoint-Statistics.csv"
paths["CLAHE"] = directory + "CLAHE-Keypoint-Statistics.csv"
paths["UIENet"] = directory + "UIENet-Keypoint-Statistics.csv"
data = {}
data["Raw"] = | pd.read_csv(paths["Raw"]) | pandas.read_csv |
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.enums import range_dt
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
mask = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), columns=['a', 'b', 'c'])
ts = pd.Series([1., 2., 3., 2., 1.], index=mask.index)
price = pd.DataFrame({
'open': [10, 11, 12, 11, 10],
'high': [11, 12, 13, 12, 11],
'low': [9, 10, 11, 10, 9],
'close': [11, 12, 11, 10, 9]
})
group_by = pd.Index(['g1', 'g1', 'g2'])
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# accessors.py ############# #
class TestAccessors:
def test_indexing(self):
assert mask.vbt.signals['a'].total() == mask['a'].vbt.signals.total()
def test_freq(self):
assert mask.vbt.signals.wrapper.freq == day_dt
assert mask['a'].vbt.signals.wrapper.freq == day_dt
assert mask.vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert mask['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.wrapper.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(mask['a'].vbt.signals.fshift(test_n), mask['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.fshift(test_n).values,
generic_nb.fshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.fshift(test_n), mask.shift(test_n, fill_value=False))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.bshift(test_n),
mask['a'].shift(-test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.bshift(test_n).values,
generic_nb.bshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.bshift(test_n), mask.shift(-test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty_like(mask['a']),
pd.Series(np.full(mask['a'].shape, False), index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty_like(mask),
pd.DataFrame(np.full(mask.shape, False), index=mask.index, columns=mask.columns)
)
def test_generate(self):
@njit
def choice_func_nb(from_i, to_i, col, n):
if col == 0:
return np.arange(from_i, to_i)
elif col == 1:
return np.full(1, from_i)
else:
return np.full(1, to_i - n)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[True, False, False],
[True, False, False],
[True, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, pick_first=True, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_both(self):
@njit
def entry_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
@njit
def exit_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
en, ex = pd.Series.vbt.signals.generate_both(
5, entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, True, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=1, exit_wait=0)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=0, exit_wait=1)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
@njit
def entry_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
@njit
def exit_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func2_nb, (temp_int,), exit_func2_nb, (temp_int,),
entry_pick_first=False, exit_pick_first=False,
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_exits(self):
@njit
def choice_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
@njit
def choice_func2_nb(from_i, to_i, col, temp_int):
for i in range(from_i, to_i):
temp_int[i - from_i] = i
return temp_int[:to_i - from_i]
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func2_nb, temp_int, until_next=False, pick_first=False),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[True, True, False],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
mask2 = pd.Series([True, True, True, True, True], index=mask.index)
pd.testing.assert_series_equal(
mask2.vbt.signals.generate_exits(choice_func_nb, temp_int, until_next=False, skip_until_exit=True),
pd.Series(
np.array([False, True, False, True, False]),
index=mask.index
)
)
def test_clean(self):
entries = pd.DataFrame([
[True, False, True],
[True, False, False],
[True, True, True],
[False, True, False],
[False, True, True]
], index=mask.index, columns=mask.columns)
exits = pd.Series([True, False, True, False, True], index=mask.index)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[1],
pd.DataFrame(
np.array([
[False, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.clean(entries, entries, entries)
def test_generate_random(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, n=3, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([False, True, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), n=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=3, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[True, True, True],
[True, True, False],
[False, True, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, prob=0.5, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), prob=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=0.5, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, True],
[False, True, False],
[False, False, False],
[False, False, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, True, True],
[False, False, True],
[False, False, True],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], pick_first=True, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_random_both(self):
# n
en, ex = pd.Series.vbt.signals.generate_random_both(
5, n=2, seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=2, seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, True, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, False, True],
[False, True, False],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((2, 3), n=2, seed=seed, entry_wait=1, exit_wait=0)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True]
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((3, 3), n=2, seed=seed, entry_wait=0, exit_wait=1)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((7, 3), n=2, seed=seed, entry_wait=2, exit_wait=2)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True]
])
)
)
n = 10
a = np.full(n * 2, 0.)
for i in range(10000):
en, ex = pd.Series.vbt.signals.generate_random_both(1000, n, entry_wait=2, exit_wait=2)
_a = np.empty((n * 2,), dtype=np.int_)
_a[0::2] = np.flatnonzero(en)
_a[1::2] = np.flatnonzero(ex)
a += _a
greater = a > 10000000 / (2 * n + 1) * np.arange(0, 2 * n)
less = a < 10000000 / (2 * n + 1) * np.arange(2, 2 * n + 2)
assert np.all(greater & less)
# probs
en, ex = pd.Series.vbt.signals.generate_random_both(
5, entry_prob=0.5, exit_prob=1., seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=0.5, exit_prob=1., seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=[0., 0.5, 1.], exit_prob=[0., 0.5, 1.],
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., exit_wait=0,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=False, exit_pick_first=True,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=True, exit_pick_first=False,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
# none
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
def test_generate_random_exits(self):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(seed=seed),
pd.Series(
np.array([False, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, False],
[False, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=[0., 0.5, 1.], seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., wait=0, seed=seed),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., until_next=False, seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_stop_exits(self):
e = pd.Series([True, False, False, False, False, False])
t = pd.Series([2, 3, 4, 3, 2, 1]).astype(np.float64)
# stop loss
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits(t.vbt.tile(3), [np.nan, -0.5, -1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, False],
[False, True, False]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# take profit
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits((4 - t).vbt.tile(3), [np.nan, 0.5, 1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True],
[False, True, True]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# chain
e = pd.Series([True, True, True, True, True, True])
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, True]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, entry_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
# until_next and pick_first
e2 = pd.Series([True, True, True, True, True, True])
t2 = pd.Series([6, 5, 4, 3, 2, 1]).astype(np.float64)
ex = e2.vbt.signals.generate_stop_exits(t2, -0.1, until_next=False, pick_first=False)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, True, True, True, True, True]))
)
def test_generate_ohlc_stop_exits(self):
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=-0.1)
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=-0.1)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1, reverse=True)
)
def _test_ohlc_stop_exits(**kwargs):
out_dict = {'stop_price': np.nan, 'stop_type': -1}
result = mask.vbt.signals.generate_ohlc_stop_exits(
price['open'], price['high'], price['low'], price['close'],
out_dict=out_dict, **kwargs
)
if isinstance(result, tuple):
_, ex = result
else:
ex = result
return result, out_dict['stop_price'], out_dict['stop_type']
ex, stop_price, stop_type = _test_ohlc_stop_exits()
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, 0],
[0, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 11.7, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, 1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=[np.nan, 0.1, 0.2], sl_trail=True, tp_stop=[np.nan, 0.1, 0.2])
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, True, False],
[False, False, False],
[False, False, True]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 9.6]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, 1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1, exit_wait=0)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[9.0, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 11.7],
[10.8, 9.0, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, 1, -1]
]), index=mask.index, columns=mask.columns)
)
(en, ex), stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=0.1, sl_trail=True, tp_stop=0.1, chain=True)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
def test_between_ranges(self):
ranges = mask.vbt.signals.between_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 3, 1), (1, 1, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask.vbt.wrapper
mask2 = pd.DataFrame([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False]
], index=mask.index, columns=mask.columns)
other_mask = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[False, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_ranges(other=other_mask)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 1, 1, 1), (2, 1, 0, 2, 1),
(3, 1, 1, 2, 1), (4, 2, 0, 3, 1), (5, 2, 1, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
ranges = mask2.vbt.signals.between_ranges(other=other_mask, from_other=True)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 1, 1), (1, 0, 1, 2, 1), (2, 1, 1, 2, 1),
(3, 1, 1, 3, 1), (4, 2, 1, 3, 1), (5, 2, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_partition_ranges(self):
mask2 = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 0, 4, 4, 0), (2, 1, 2, 4, 1), (3, 2, 3, 4, 0)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_between_partition_ranges(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 1, 2, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.pos_rank(),
pd.Series([-1, 0, 1, -1, 0], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 2, 2],
[2, -1, 3]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask['a'], allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 0, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask, allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
def test_partition_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.partition_pos_rank(),
pd.Series([-1, 0, 0, -1, 1], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 1, 1],
[1, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask['a']),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_fns(self):
pd.testing.assert_frame_equal(
(~mask).vbt.signals.first(),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(1),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[True, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(2),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.from_nth(0),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, True],
[True, True, False],
[False, True, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 1, 0, 0, 1, 0, 0, 1])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_partition_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.partition_pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 0, 1, 0, 0, 1, 0, 0])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_nth_index(self):
assert mask['a'].vbt.signals.nth_index(0) == pd.Timestamp('2020-01-01 00:00:00')
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1),
pd.Series([
pd.Timestamp('2020-01-04 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-2),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
np.nan
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
def test_norm_avg_index(self):
assert mask['a'].vbt.signals.norm_avg_index() == -0.25
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(),
pd.Series([-0.25, 0.25, 0.0], index=mask.columns, name='norm_avg_index')
)
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(group_by=group_by),
pd.Series([0.0, 0.0], index=['g1', 'g2'], name='norm_avg_index')
)
def test_index_mapped(self):
mapped = mask.vbt.signals.index_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 3, 1, 4, 2])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 1, 1, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 3, 1, 4, 2])
)
assert mapped.wrapper == mask.vbt.wrapper
def test_total(self):
assert mask['a'].vbt.signals.total() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total(),
pd.Series([2, 2, 1], index=mask.columns, name='total')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total')
)
def test_rate(self):
assert mask['a'].vbt.signals.rate() == 0.4
pd.testing.assert_series_equal(
mask.vbt.signals.rate(),
pd.Series([0.4, 0.4, 0.2], index=mask.columns, name='rate')
)
pd.testing.assert_series_equal(
mask.vbt.signals.rate(group_by=group_by),
pd.Series([0.4, 0.2], index=['g1', 'g2'], name='rate')
)
def test_total_partitions(self):
assert mask['a'].vbt.signals.total_partitions() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total_partitions(),
pd.Series([2, 2, 1], index=mask.columns, name='total_partitions')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total_partitions(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total_partitions')
)
def test_partition_rate(self):
assert mask['a'].vbt.signals.partition_rate() == 1.0
pd.testing.assert_series_equal(
mask.vbt.signals.partition_rate(),
pd.Series([1.0, 1.0, 1.0], index=mask.columns, name='partition_rate')
)
pd.testing.assert_series_equal(
mask.vbt.signals.partition_rate(group_by=group_by),
pd.Series([1.0, 1.0], index=['g1', 'g2'], name='partition_rate')
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Total', 'Rate [%]', 'First Index',
'Last Index', 'Norm Avg Index [-1, 1]', 'Distance: Min',
'Distance: Max', 'Distance: Mean', 'Distance: Std', 'Total Partitions',
'Partition Rate [%]', 'Partition Length: Min', 'Partition Length: Max',
'Partition Length: Mean', 'Partition Length: Std',
'Partition Distance: Min', 'Partition Distance: Max',
'Partition Distance: Mean', 'Partition Distance: Std'
], dtype='object')
pd.testing.assert_series_equal(
mask.vbt.signals.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
1.6666666666666667,
33.333333333333336,
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
0.0,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan,
1.6666666666666667,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
2,
40.0,
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
-0.25,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
| pd.Timedelta('3 days 00:00:00') | pandas.Timedelta |
"""
Module of utility methods.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import re
import os
import sys
import time
import pickle
import random
import scipy.sparse
import numpy as np
import pandas as pd
import xgboost as xgb
import lightgbm as lgb
import termcolor
import sklearn.metrics as sm
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import GridSearchCV
class Util:
def __init__(self):
self.noise_limit = 0.000025
self.timer = []
self.dirs = []
# public
def check_file(self, file):
"""Checks to see if the file exists
file: path of the file.
Returns True if it exists, exits the application if not."""
if os.path.exists(file):
return True
else:
self.exit('cannot read ' + file)
def clean_msg(self, msg):
"""Utility function to clean msg text by removing links, special
characters using simple regex statements."""
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|\
(\w+:\/\/\S+)", " ", msg).split())
def close_writer(self, sw):
"""Closes a file writer.
sw: file writer object."""
sw.close()
def colorize(self, string, color, display):
"""Gives the string the specified color if there is a display.
string: string to colorize.
color: color to give the string.
display: boolean indicating if the application is run on a consolde.
Returns a colorized string if there is a display, string otherwise."""
s = string
if display:
s = termcolor.colored(string, color)
return s
def create_dirs(self, path):
"""Creates all directories in path that doesn't already exist.
path: str, directory to create."""
if not os.path.exists(path):
os.makedirs(path)
def div0(self, num, denom):
"""Divide operation that deals with a 0 value denominator.
num: numerator.
denom: denominator.
Returns 0.0 if the denominator is 0, otherwise returns a float."""
return 0.0 if denom == 0 else float(num) / denom
def end(self, message='', fw=None):
"""Pop a start time and take the time difference from now.
message: message to print."""
unit = 's'
elapsed = time.time() - self.timer.pop()
if elapsed >= 60:
elapsed /= 60
unit = 'm'
s = message + '%.2f' + unit + '\n'
if fw is not None:
fw.write(s % (elapsed))
else:
self.out(s % (elapsed))
def evaluate(self, data, test_probs):
"""Evaluates the predictions against the true labels.
data: tuple including test set labels and ids.
test_probs: predictions to evaluate."""
x, y, ids, feat_names = data
if y is not None:
t1 = self.out('evaluating...')
auroc, aupr, p, r, mp, mr, t = self.compute_scores(test_probs, y)
self.time(t1)
self.print_scores(mp, mr, t, aupr, auroc)
self.print_median_mean(ids, test_probs, y)
def exit(self, message='Unexpected error occurred!'):
"""Convenience method to fail gracefully.
message: messaage to display to the user as to the error."""
self.out(message)
self.out('exiting...')
exit(0)
def file_len(self, fname):
"""Counts the number of lines in a file.
fname: path of the file.
Returns the number of lines in the specified file."""
lines = 0
f = open(fname, 'r')
lines = len(f.readlines())
f.close()
return lines
def gen_noise(self, pred):
"""Returns a prediction with some noise added to it.
pred: predicion (e.g. value between 0.0 and 1.0).
Returns predictions with noise."""
noise = random.uniform(-self.noise_limit, self.noise_limit)
result = max(0.0, min(1.0, pred + noise))
return result
def get_comments_filename(self, modified):
"""Chooses the correct comments file to read
modified: Boolean indicating to read the modified comments file.
Returns the name of the appropriate comments file."""
filename = 'comments.csv'
if modified:
filename = 'modified.csv'
return filename
def load(self, filename):
"""Loads a binary pickled object.
filename: path of the file.
Returns loaded object."""
if self.check_file(filename):
with open(filename, 'rb') as f:
obj = pickle.load(f)
return obj
def load_sparse(self, filename):
"""Loads a sparse matrix object.
filename: path to the sparse matrix object file.
Returns sparse matrix object."""
matrix = scipy.sparse.load_npz(filename)
return matrix
def mean(self, numbers):
"""Computes the mean for a list of numbers.
numbers: list of numbers.
Returns mean as a float."""
return np.mean(numbers)
def out(self, message='', newline=1):
"""Custom print method to print multiple times on one line.
message: string to print immediately."""
msg = '\n' + message if newline == 1 else message
sys.stdout.write(msg)
sys.stdout.flush()
return time.time()
def open_writer(self, name, mode='w'):
f = open(name, mode)
return f
def percent(self, num, denom):
"""Turns fraction into a percent.
num: numerator.
denom: denominator.
Returns float in percent form."""
return self.div0(num, denom) * 100.0
def plot_features(self, model, classifier, features, fname, save=True):
"""Plots relative feature importance.
model: fitted model.
classifier: specific model.
features: list of feature names.
fname: filename of where to store the plot.
save: boolean of whether the plot should be saved."""
if classifier == 'lr':
feat_importance = model.coef_[0]
elif classifier == 'rf' or classifier == 'lgb':
feat_importance = model.feature_importances_
elif classifier == 'xgb':
try:
ax = xgb.plot_importance(model._Booster)
labels = ax.get_yticklabels()
indices = [int(x.get_text().replace('f', '')) for x in labels]
yticks = [features[ndx] for ndx in indices]
ax.set_yticklabels(yticks)
plt.savefig(fname + '_feats.png', bbox_inches='tight')
plt.close('all')
except ValueError:
self.out('error plotting xgb feature importances...')
return
# normalize and rearrange features
feat_norm = feat_importance / feat_importance.sum()
sorted_idx = np.argsort(feat_norm)
pos = np.arange(sorted_idx.shape[0]) + 0.5 # [0.5, 1.5, ...]
feat_importance_sort = feat_norm[sorted_idx]
feat_sort = np.asanyarray(features)[sorted_idx]
# plot relative feature importance
color = '#7A68A6'
plt.figure(figsize=(12, 12))
plt.barh(pos, feat_importance_sort, align='center', color=color)
plt.yticks(pos, feat_sort)
plt.xlabel('Relative Importance')
plt.title('Feature Importance')
plt.savefig(fname + '_feats.pdf', bbox_inches='tight', format='pdf')
plt.close('all')
def plot_pr_curve(self, model, fname, rec, prec, aupr, title='',
line='-', save=False, show_legend=False, show_grid=False,
more_ticks=False):
"""Plots a precision-recall curve.
model: name of the model.
fname: filename to save the plot.
rec: recalls from the aupr.
prec: precisions from the aupr.
aupr: area under the pr curve.
title: title of the plot.
line: shape used to draw the curve.
save: boolean specifying whether to save the plot."""
self.set_plot_rc()
# if ax is None:
# fig, ax = plt.subplots()
plt.figure(2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title(title, fontsize=22)
plt.xlabel('Recall', fontsize=22)
plt.ylabel('Precision', fontsize=22)
plt.tick_params(axis='both', labelsize=18)
# else:
# plt.figure(2)
plt.plot(rec, prec, line, label=model + ' = %0.3f' % aupr)
if show_legend:
plt.legend(loc='lower left', prop={'size': 6})
if show_grid:
ax = plt.gca()
ax.grid(b=True, which='major', color='#E5DCDA', linestyle='-')
if more_ticks:
plt.yticks(np.arange(0.0, 1.01, 0.1))
plt.xticks(np.arange(0.0, 1.01, 0.1), rotation=70)
if save:
plt.savefig(fname + '.pdf', bbox_inches='tight', format='pdf')
plt.clf()
plt.close('all')
def print_stats(self, df, r_df, relation, dset, fw=None):
"""Prints information about a relationship in the data.
df: comments dataframe.
r_df: df containing number of times relationship occurred.
relation: name of relation (e.g. posts).
dset: dataset (e.g. 'val' or 'test')."""
spam = r_df['label'].sum()
out_str = '\n\t[' + dset + '] ' + relation + ': >1: ' + str(len(r_df))
out_str += ', spam: ' + str(spam)
self.write(out_str, fw=fw)
def pushd(self, dir):
curd = os.getcwd()
self.dirs.append(curd)
os.chdir(dir)
def popd(self):
os.chdir(self.dirs.pop())
def read_csv(self, filename):
"""Safe read for pandas dataframes.
filename: path to data file.
Returns dataframe if the file exists, None otherwise."""
result = None
if os.path.exists(filename):
result = pd.read_csv(filename)
return result
def save(self, obj, filename):
"""Pickles an object to a binary file.
obj: object to pickle.
filename: path of the file."""
with open(filename, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def save_sparse(self, matrix, filename):
"""Saves a sparse matrix object to a file.
matrix: sparse matrix object.
filename: path to the file to save the object to."""
scipy.sparse.save_npz(filename, matrix)
def set_noise_limit(self, noise_limit):
"""Setter for noise_limit."""
self.noise_limit = noise_limit
def start(self, message='', fw=None):
"""Pushes a start time onto a stack and print a specified message.
message: message to print."""
self.write(message=message, fw=fw)
self.timer.append(time.time())
def test(self, data, model, fsets=['all']):
"""Tests data using a trained model.
data: tuple including data to classify.
model: trained model.
Returns predictions and ids associated with those predictions."""
x, y, ids, feat_names = data
if type(model) == xgb.XGBClassifier and \
any(x in fsets for x in ['ngrams', 'all']):
x = x.tocsc() # bug in xgb, turn on when stacking is on.
t1 = self.out('testing...')
if type(model) == lgb.LGBMClassifier:
ys = model.predict_proba(x, num_iteration=model.best_iteration_)
else:
ys = model.predict_proba(x)
self.time(t1)
return ys, ids
def time(self, t):
"""Write time based on suffix."""
elapsed = time.time() - t
if elapsed < 60:
suffix = 's'
elif elapsed < 3600:
suffix = 'm'
else:
suffix = 'h'
if suffix == 'm':
elapsed /= 60.0
elif suffix == 'h':
elapsed /= 3600.0
self.out('%.2f%s' % (elapsed, suffix), 0)
def train(self, data, clf='rf', param_search='single', tune_size=0.15,
scoring='roc_auc', n_jobs=1, verbose=1):
"""Trains a classifier with the specified training data.
data: tuple including training data.
clf: string of {'rf' 'lr', 'xgb'}.
Returns trained classifier."""
x_train, y_train, _, features = data
if param_search == 'single' or tune_size == 0:
model, params = self.classifier(clf, param_search='single')
model.set_params(**params)
elif tune_size > 0:
t1 = self.out('tuning...')
model, params = self.classifier(clf, param_search=param_search)
train_len = x_train.shape[0]
split_ndx = train_len - int(train_len * tune_size)
sm_x_train, x_val = x_train[:split_ndx], x_train[split_ndx:]
sm_train_fold = np.full(sm_x_train.shape[0], -1)
val_fold = np.full(x_val.shape[0], 0)
predefined_fold = np.append(sm_train_fold, val_fold)
ps = PredefinedSplit(predefined_fold)
cv = ps.split(x_train, y_train)
m = GridSearchCV(model, params, scoring=scoring, cv=cv,
verbose=verbose, n_jobs=n_jobs)
m.fit(x_train, y_train)
model = m.best_estimator_
self.time(t1)
t1 = self.out('training...')
if clf == 'lgb':
cat_feat = ['app', 'device', 'os', 'channel', 'hour']
cat_feat_ndx = [features.index(x) for x in cat_feat]
train_len = x_train.shape[0]
split_ndx = train_len - int(train_len * tune_size)
sm_x_train, x_val = x_train[:split_ndx], x_train[split_ndx:]
sm_y_train, y_val = y_train[:split_ndx], y_train[split_ndx:]
eval_set = (x_val, y_val)
model = model.fit(sm_x_train, sm_y_train, eval_set=eval_set,
early_stopping_rounds=50, eval_metric='auc',
categorical_feature=cat_feat_ndx)
else:
model = model.fit(x_train, y_train)
self.time(t1)
self.out(str(model))
return model
def write(self, message='', fw=None):
if fw is not None:
fw.write(message)
else:
self.out(message)
def classifier(self, classifier='rf', param_search='single'):
"""
Defines model and parameters to tune.
Parameters
----------
classifier : str, {'rf', 'xgb', 'lr1', 'lr2'}, default: 'rf'
Type of model to define.
param_search : str, {'low', 'med', 'high'}, default: 'low'
Level of parameters to tune.
input_dim : int, default = 0
Number of features input to the model.
Returns
-------
Defined model and dictionary of parameters to tune.
"""
if classifier == 'lr':
clf = LogisticRegression()
high = [{'penalty': ['l1', 'l2'],
'C': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5,
1.0, 2.0, 10.0, 50.0, 100.0, 500.0, 1000.0],
'solver': ['liblinear']},
{'penalty': ['l2'],
'C': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5,
1.0, 2.0, 10.0, 50.0, 100.0, 500.0, 1000.0],
'solver': ['newton-cg']}]
med = [{'penalty': ['l1', 'l2'],
'C': [0.0001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0],
'solver': ['liblinear']},
{'penalty': ['l2'],
'C': [0.0001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0],
'solver': ['newton-cg']}]
low = {'penalty': ['l2'],
'C': [0.0001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0],
'solver': ['liblinear'],
'class_weight': ['balanced']},
single = {'penalty': 'l2', 'C': 1000.0, 'solver': 'liblinear',
'class_weight': 'balanced'}
elif classifier == 'rf':
clf = RandomForestClassifier()
high = {'n_estimators': [10, 100, 1000], 'max_depth': [None, 2, 4]}
med = {'n_estimators': [1000], 'max_depth': [None, 2]}
low = {'n_estimators': [1000], 'max_depth': [None]}
single = {'n_estimators': 100, 'max_depth': 4}
elif classifier == 'lgb':
clf = lgb.LGBMClassifier()
high = {'max_depth': [3, 4, 6],
'n_estimators': [100, 1000],
'learning_rate': [0.3, 0.1, 0.05, 0.01, 0.005, 0.001],
'subsample': [0.8, 0.9, 1.0],
'colsample_bytree': [0.8, 0.9, 1.0]}
med = {'max_depth': [4, 6], 'n_estimators': [10, 100, 1000],
'learning_rate': [0.005, 0.05, 0.1],
'subsample': [0.9, 1.0], 'colsample_bytree': [1.0]}
low = {'max_depth': [4], 'boosting_type': ['gbdt'],
'n_estimators': [1500], 'subsample': [0.7],
'num_leaves': [7], 'colsample_bytree': [0.7, 0.9],
'min_child_samples': [100], 'max_bin': [100],
'learning_rate': [0.1, 0.2], 'min_child_weight': [0.0],
'scale_pos_weight': [500], 'verbose': [-1]}
single = {'max_depth': 4, 'n_estimators': 1500,
'learning_rate': 0.1, 'scale_pos_weight': 500,
'num_leaves': 7, 'min_child_samples': 100,
'subsample': 0.7, 'colsample_bytree': 0.7,
'min_child_weight': 0.0, 'verbose': -1}
# single = {'max_depth': 4, 'n_estimators': 1500, # not adclicks
# 'learning_rate': 0.1, 'scale_pos_weight': 500,
# 'num_leaves': 7, 'min_child_samples': 20,
# 'subsample': 0.7, 'colsample_bytree': 0.7,
# 'min_child_weight': 0.0, 'verbose': -1}
elif classifier == 'xgb':
clf = xgb.XGBClassifier()
high = {'max_depth': [3, 4, 6],
'n_estimators': [100, 1000],
'learning_rate': [0.3, 0.1, 0.05, 0.01, 0.005, 0.001],
'subsample': [0.8, 0.9, 1.0],
'colsample_bytree': [0.8, 0.9, 1.0]}
med = {'max_depth': [4, 6], 'n_estimators': [10, 100, 1000],
'learning_rate': [0.005, 0.05, 0.1],
'subsample': [0.9, 1.0], 'colsample_bytree': [1.0]}
low = {'max_depth': [6], 'n_estimators': [1000],
'learning_rate': [0.05], 'subsample': [0.9],
'colsample_bytree': [1.0]}
single = {'max_depth': 4, 'n_estimators': 100,
'learning_rate': 0.1, 'subsample': 1.0,
'colsample_bytree': 1.0, 'scale_pos_weight': 500}
param_dict = {'high': high, 'med': med, 'low': low, 'single': single}
param_grid = param_dict[param_search]
return (clf, param_grid)
def compute_scores(self, probs, y):
"""Generates noisy predictions and computes various metrics.
probs: predictions, shape=(2, <num_instances>).
y: list of true labels.
report: file to write performance to.
dset: dataset (e.g. 'train', 'val', 'test').
Returns auroc, aupr, recalls, precisions, max precision, max recall,
and threshold where those max values take place."""
prob_preds_noise = [self.gen_noise(pred) for pred in probs[:, 1]]
fpr, tpr, tholds = sm.roc_curve(y, prob_preds_noise)
prec, rec, tholds = sm.precision_recall_curve(y, prob_preds_noise)
aupr = sm.average_precision_score(y, prob_preds_noise)
auroc = sm.auc(fpr, tpr)
max_p, max_r, thold = self.find_max_prec_recall(prec, rec, tholds)
return auroc, aupr, prec, rec, max_p, max_r, thold
def find_max_prec_recall(self, prec, rec, tholds):
"""Finds the precision and recall scores with the maximum amount of
area and returns their values, including the threshold.
prec: list of precisions from the pr curve.
rec: list of recalls from the pr curve.
tholds: list of thresholds from the pr curve.
Returns max precision and recall scores, including their threshold."""
max_val, max_prec, max_rec, max_thold = -1, -1, -1, -1
if len(tholds) > 1:
for i in range(len(prec)):
val = prec[i] * rec[i]
if val > max_val:
max_val = val
max_thold = tholds[i]
max_prec = prec[i]
max_rec = rec[i]
return max_prec, max_rec, max_thold
def save_preds(self, probs, ids, fold, pred_f, dset, eval='cc'):
"""Save predictions to a specified file.
probs: array of binary predictions; shape=(2, <num_instances>).
ids: list of identifiers for the data instances.
pred_f: folder to save predictions to.
dset: dataset (e.g. 'train', 'val', 'test')."""
columns = ['com_id', 'ind_pred']
fname = dset + '_' + fold + '_preds'
t1 = self.out('saving predictions...')
preds = list(zip(ids, probs[:, 1]))
preds_df = | pd.DataFrame(preds, columns=columns) | pandas.DataFrame |
from matplotlib import pyplot as plt
import matplotlib.ticker as mticker
from matplotlib import patches
import matplotlib
SMALL_SIZE = 10
MEDIUM_SIZE = 12
BIGGER_SIZE = 14
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
from os.path import join
import pandas as pd
import seaborn as sns
import re
import numpy as np
# define log csv data and dataset parameters not available in csvs
analysis_dir = './data/analysis' # dir to save plots to
points_to_label_per_al_iter = 20
train_set_size = 949
fp_baseline = "data/_analysis/R6-20190315T030959.log/logdata.csv"
# fp_medal = "data/_analysis/RM6-20190319T005512.log/logdata.csv"
fp_medal_patience10 = "data/_analysis/RM6e-20190324T140934.log/logdata.csv"
fp_medal_patience20 = "data/_analysis/RM6g-20190406T222759.log/logdata.csv"
fps_varying_online_frac = [ # stored in dfo
# "data/_analysis/RMO6-0d-20190323T003224.log/logdata.csv",
"data/_analysis/RMO6-12.5d-20190322T142710.log/logdata.csv",
# "data/_analysis/RMO6-25d-20190323T152428.log/logdata.csv",
# "data/_analysis/RMO6-37.5d-20190322T051602.log/logdata.csv",
# "data/_analysis/RMO6-50d-20190323T030509.log/logdata.csv",
# "data/_analysis/RMO6-62.5d-20190322T092309.log/logdata.csv",
# "data/_analysis/RMO6-75d-20190323T185418.log/logdata.csv",
"data/_analysis/RMO6-87.5d-20190322T173655.log/logdata.csv",
# "data/_analysis/RMO6-100d-20190323T082726.log/logdata.csv",
]
fps_omedal_patience = {
# "p=0.875, 20 epoch": "data/_analysis/RMO6-87.5-20epoch-20191005T215029.log/logdata.csv",
"p=0.875, patience=05": "data/_analysis/RMO6-87.5-5patience-20191005T215029.log/logdata.csv",
"p=0.875, patience=10": "data/_analysis/RMO6-87.5-10patience-20191006T004721.log/logdata.csv",
"p=0.875, patience=20": "data/_analysis/RMO6-87.5-20patience-20191006T133459.log/logdata.csv",
"p=0.125, patience=05": "data/_analysis/RMO6-12.5-5patience-20191007T054951.log/logdata.csv",
# "p=0.125, patience=10": "data/_analysis/RMO6-12.5-10patience-20191007T054951.log/logdata.csv",
# "p=0.125, patience=20": "data/_analysis/RMO6-12.5-20patience-20191007T020901.log/logdata.csv",
# 'p=0.125, patience=05b': 'data/_analysis/RMO6-12.5-5patienceb-20191009T184359.log/logdata.csv',
# 'p=0.125, patience=05c': 'data/_analysis/RMO6-12.5-5patiencec-20191009T184359.log/logdata.csv',
# "p=0.875, patience=05b": "data/_analysis/RMO6-87.5-5patience-b-20191009T221911.log/logdata.csv",
# "p=0.875, patience=05c": "data/_analysis/RMO6-87.5-5patience-c-20191009T221911.log/logdata.csv",
# "p=0.875, patience=30": "data/_analysis/RMO6-87.5-30patience-20191010T141916.log/logdata.csv",
# "p=0.875, patience=40": "data/_analysis/RMO6-87.5-40patience-20191010T005839.log/logdata.csv",
# "p=0.125, vpf9": "data/_analysis/RMO6-12.5-vpf9-20191010T020718.log/logdata.csv",
# "p=0.125, patience=05 vpf9": "data/_analysis/RMO6-12.5-5patience-vpf9-20191010T022128.log/logdata.csv",
}
def get_train_frac(fp):
return str(float(re.sub(r'.*RMO6-([\d\.]+).?-.*', r'\1', fp)) * .01)
# load the data
dfb = pd.read_csv(fp_baseline).query('perf').sort_values('epoch').set_index('epoch')
dfm20 = pd.read_csv(fp_medal_patience20).query('perf').sort_values(['al_iter', 'epoch'])
dfm10 = | pd.read_csv(fp_medal_patience10) | pandas.read_csv |
import pandas as pd
import numpy as np
from math import sqrt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_squared_error
from xgboost import XGBRegressor
def get_predictions():
data = pd.read_csv('https://drive.google.com/uc?export=download&id=1ibgtR07ULjL2Mf7YCiOl4mL1zxxDUNWW')
y = data.SalePrice
x = data.drop('SalePrice', axis=1)
train_X, test_X, train_y, test_y = train_test_split(x, y)
my_pipeline = make_pipeline(Imputer(), XGBRegressor())
train_X = pd.get_dummies(train_X)
test_X = pd.get_dummies(test_X)
train_X, test_X = train_X.align(test_X, join='left', axis=1)
my_pipeline.fit(train_X, train_y)
predictions = my_pipeline.predict(test_X)
print(np.mean(np.abs(predictions-test_y)))
def get_submission():
train_data = pd.read_csv('https://drive.google.com/uc?export=download&id=1ibgtR07ULjL2Mf7YCiOl4mL1zxxDUNWW')
train_y = train_data.SalePrice
train_x = train_data.drop('SalePrice', axis=1)
test_x = pd.read_csv('https://drive.google.com/uc?export=download&id=1cmqIDhq9xn_5kv-ERbn-HkMVzg7tdG5v')
my_pipeline = make_pipeline(Imputer(), XGBRegressor())
train_x = | pd.get_dummies(train_x) | pandas.get_dummies |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
def test_pi_add_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + one
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize('five', [5, np.array(5, dtype=np.int64)])
def test_sub(self, five):
rng = period_range('2007-01', periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = | pd.period_range('2014-04-28', '2014-05-12', freq='D') | pandas.period_range |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import gzip
import os
import re
import shutil
from collections import OrderedDict
from io import BytesIO, StringIO
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf import read_csv
from cudf.tests.utils import assert_eq, assert_exceptions_equal
def make_numeric_dataframe(nrows, dtype):
df = pd.DataFrame()
df["col1"] = np.arange(nrows, dtype=dtype)
df["col2"] = np.arange(1, 1 + nrows, dtype=dtype)
return df
def make_datetime_dataframe(include_non_standard=False):
df = pd.DataFrame()
df["col1"] = np.array(
[
"31/10/2010",
"05/03/2001",
"20/10/1994",
"18/10/1990",
"1/1/1970",
"2016-04-30T01:02:03.000",
"2038-01-19 03:14:07",
]
)
df["col2"] = np.array(
[
"18/04/1995",
"14 / 07 / 1994",
"07/06/2006",
"16/09/2005",
"2/2/1970",
"2007-4-30 1:6:40.000PM",
"2038-01-19 03:14:08",
]
)
if include_non_standard:
# Last column contains non-standard date formats
df["col3"] = np.array(
[
"1 Jan",
"2 January 1994",
"Feb 2002",
"31-01-2000",
"1-1-1996",
"15-May-2009",
"21-Dec-3262",
]
)
return df
def make_numpy_mixed_dataframe():
df = pd.DataFrame()
df["Integer"] = np.array([2345, 11987, 9027, 9027])
df["Date"] = np.array(
["18/04/1995", "14/07/1994", "07/06/2006", "16/09/2005"]
)
df["Float"] = np.array([9.001, 8.343, 6, 2.781])
df["Integer2"] = np.array([2345, 106, 2088, 789277])
# Category is not yet supported from libcudf
# df["Category"] = np.array(["M", "F", "F", "F"])
df["String"] = np.array(["Alpha", "Beta", "Gamma", "Delta"])
df["Boolean"] = np.array([True, False, True, False])
return df
@pytest.fixture
def pd_mixed_dataframe():
return make_numpy_mixed_dataframe()
@pytest.fixture
def cudf_mixed_dataframe():
return cudf.from_pandas(make_numpy_mixed_dataframe())
def make_all_numeric_dataframe():
df = pd.DataFrame()
gdf_dtypes = [
"float",
"float32",
"double",
"float64",
"int8",
"short",
"int16",
"int",
"int32",
"long",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
]
np_dtypes = [
np.float32,
np.float32,
np.float64,
np.float64,
np.int8,
np.int16,
np.int16,
np.int32,
np.int32,
np.int64,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
for i in range(len(gdf_dtypes)):
df[gdf_dtypes[i]] = np.arange(10, dtype=np_dtypes[i])
return (
df,
OrderedDict(zip(gdf_dtypes, gdf_dtypes)),
OrderedDict(zip(gdf_dtypes, np_dtypes)),
)
def make_all_numeric_extremes_dataframe():
# integers 0,+1,-1,min,max
# float 0.0, -0.0,+1,-1,min,max, nan, esp, espneg, tiny, [-ve values]
df, gdf_dtypes, pdf_dtypes = make_all_numeric_dataframe()
df = pd.DataFrame()
for gdf_dtype in gdf_dtypes:
np_type = pdf_dtypes[gdf_dtype]
if np.issubdtype(np_type, np.integer):
itype = np.iinfo(np_type)
extremes = [0, +1, -1, itype.min, itype.max]
df[gdf_dtype] = np.array(extremes * 4, dtype=np_type)[:20]
else:
ftype = np.finfo(np_type)
extremes = [
0.0,
-0.0,
+1,
-1,
np.nan,
-np.nan,
# ftype.min, # TODO enable after fixing truncation issue #6235
# ftype.max, # TODO enable after fixing truncation issue #6235
np_type(np.inf),
-np_type(np.inf),
ftype.eps,
ftype.epsneg,
ftype.tiny,
-ftype.eps,
-ftype.epsneg,
-ftype.tiny,
]
df[gdf_dtype] = np.array(extremes * 4, dtype=np_type)[:20]
return (
df,
gdf_dtypes,
pdf_dtypes,
)
@pytest.fixture
def path_or_buf(tmpdir):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_path_or_buf.csv")
df = make_numeric_dataframe(10, np.int32)
df.to_csv(fname, index=False, header=False)
buffer = df.to_csv(index=False, header=False)
def _make_path_or_buf(src):
if src == "filepath":
return str(fname)
if src == "pathobj":
return fname
if src == "bytes_io":
return BytesIO(buffer.encode())
if src == "string_io":
return StringIO(buffer)
if src == "url":
return Path(fname).as_uri()
raise ValueError("Invalid source type")
yield _make_path_or_buf
dtypes = [np.float64, np.float32, np.int64, np.int32, np.uint64, np.uint32]
dtypes_dict = {"1": np.float64, "2": np.float32, "3": np.int64, "4": np.int32}
nelem = [5, 25, 100]
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nelem", nelem)
def test_csv_reader_numeric_data(dtype, nelem, tmpdir):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file1.csv")
df = make_numeric_dataframe(nelem, dtype)
df.to_csv(fname, index=False, header=False)
dtypes = [df[k].dtype for k in df.columns]
out = read_csv(str(fname), names=list(df.columns.values), dtype=dtypes)
assert len(out.columns) == len(df.columns)
assert_eq(df, out)
@pytest.mark.parametrize("parse_dates", [["date2"], [0], ["date1", 1, "bad"]])
def test_csv_reader_datetime(parse_dates):
df = make_datetime_dataframe(include_non_standard=True)
buffer = df.to_csv(index=False, header=False)
gdf = read_csv(
StringIO(buffer),
names=["date1", "date2", "bad"],
parse_dates=parse_dates,
dayfirst=True,
)
pdf = pd.read_csv(
StringIO(buffer),
names=["date1", "date2", "bad"],
parse_dates=parse_dates,
dayfirst=True,
)
assert_eq(gdf, pdf)
@pytest.mark.parametrize("pandas_arg", [{"delimiter": "|"}, {"sep": "|"}])
@pytest.mark.parametrize("cudf_arg", [{"sep": "|"}, {"delimiter": "|"}])
def test_csv_reader_mixed_data_delimiter_sep(
tmpdir, pandas_arg, cudf_arg, pd_mixed_dataframe
):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file3.csv")
pd_mixed_dataframe.to_csv(fname, sep="|", index=False, header=False)
gdf1 = read_csv(
str(fname),
# Category is not yet supported from libcudf
# names=["1", "2", "3", "4", "5", "6", "7"],
# dtype=[
# "int64", "date", "float64", "int64", "category", "str", "bool"
# ],
names=["1", "2", "3", "4", "5", "6"],
dtype=["int64", "date", "float64", "uint64", "str", "bool"],
dayfirst=True,
**cudf_arg,
)
gdf2 = read_csv(
str(fname),
# Category is not yet supported from libcudf
# names=["1", "2", "3", "4", "5", "6", "7"],
# dtype=[
# "int64", "date", "float64", "int64", "category", "str", "bool"
# ],
names=["1", "2", "3", "4", "5", "6"],
dtype=["int64", "date", "float64", "uint64", "str", "bool"],
dayfirst=True,
**pandas_arg,
)
pdf = pd.read_csv(
fname,
# Category is not yet supported from libcudf
# names=["1", "2", "3", "4", "5", "6", "7"],
names=["1", "2", "3", "4", "5", "6"],
parse_dates=[1],
dayfirst=True,
**pandas_arg,
)
assert len(gdf1.columns) == len(pdf.columns)
assert len(gdf2.columns) == len(pdf.columns)
assert_eq(gdf1, gdf2)
@pytest.mark.parametrize("use_list", [False, True])
def test_csv_reader_dtype_list(use_list):
df = make_numeric_dataframe(10, dtype=np.float32)
buffer = df.to_csv(index=False, header=False)
# PANDAS doesn't list but cudf does (treated as implied ordered dict)
# Select first column's dtype if non-list; expect the same dtype for all
if use_list:
dtypes = [df[k].dtype for k in df.columns]
else:
dtypes = df[df.columns[0]].dtype
gdf = read_csv(StringIO(buffer), dtype=dtypes, names=df.columns)
assert_eq(gdf, df)
@pytest.mark.parametrize("use_names", [False, True])
def test_csv_reader_dtype_dict(use_names):
# Save with the column header if not explicitly specifying a list of names
df, gdf_dtypes, pdf_dtypes = make_all_numeric_dataframe()
buffer = df.to_csv(index=False, header=(not use_names))
dtypes = df.dtypes.to_dict()
gdf_names = list(gdf_dtypes.keys()) if use_names else None
pdf_names = list(pdf_dtypes.keys()) if use_names else None
gdf = read_csv(StringIO(buffer), dtype=dtypes, names=gdf_names)
pdf = pd.read_csv(StringIO(buffer), dtype=dtypes, names=pdf_names)
assert_eq(gdf, pdf)
@pytest.mark.parametrize("use_names", [True, False])
def test_csv_reader_dtype_extremes(use_names):
# Save with the column header if not explicitly specifying a list of names
df, gdf_dtypes, pdf_dtypes = make_all_numeric_extremes_dataframe()
buffer = df.to_csv(index=False, header=(not use_names))
dtypes = df.dtypes.to_dict()
gdf_names = list(gdf_dtypes.keys()) if use_names else None
pdf_names = list(pdf_dtypes.keys()) if use_names else None
gdf = read_csv(StringIO(buffer), dtype=dtypes, names=gdf_names)
pdf = pd.read_csv(StringIO(buffer), dtype=dtypes, names=pdf_names)
assert_eq(gdf, pdf)
def test_csv_reader_skiprows_skipfooter(tmpdir, pd_mixed_dataframe):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file5.csv")
pd_mixed_dataframe.to_csv(
fname, columns=["Integer", "Date", "Float"], index=False, header=False
)
# Using engine='python' to eliminate pandas warning of using python engine.
df_out = pd.read_csv(
fname,
names=["1", "2", "3"],
parse_dates=[1],
dayfirst=True,
skiprows=1,
skipfooter=1,
engine="python",
)
out = read_csv(
str(fname),
names=["1", "2", "3"],
dtype=["int64", "date", "float64"],
skiprows=1,
skipfooter=1,
dayfirst=True,
)
assert len(out.columns) == len(df_out.columns)
assert len(out) == len(df_out)
assert_eq(df_out, out)
def test_csv_reader_negative_vals(tmpdir):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file6.csv")
names = ["0", "1", "2"]
dtypes = ["float32", "float32", "float32"]
lines = [
",".join(names),
"-181.5060,-185.37000,-3",
"-127.6300,-230.54600,-9",
]
with open(str(fname), "w") as fp:
fp.write("\n".join(lines))
zero = [-181.5060, -127.6300]
one = [-185.370, -230.54600]
two = [-3, -9]
df = read_csv(str(fname), names=names, dtype=dtypes, skiprows=1)
np.testing.assert_allclose(zero, df["0"].to_array())
np.testing.assert_allclose(one, df["1"].to_array())
np.testing.assert_allclose(two, df["2"].to_array())
def test_csv_reader_strings(tmpdir):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file7.csv")
names = ["text", "int"]
dtypes = ["str", "int"]
lines = [",".join(names), "a,0", "b,0", "c,0", "d,0"]
with open(str(fname), "w") as fp:
fp.write("\n".join(lines))
df = read_csv(
str(fname),
names=names,
dtype=dtypes,
skiprows=1,
decimal=".",
thousands="'",
)
assert len(df.columns) == 2
assert df["text"].dtype == np.dtype("object")
assert df["int"].dtype == np.dtype("int64")
assert df["text"][0] == "a"
assert df["text"][1] == "b"
assert df["text"][2] == "c"
assert df["text"][3] == "d"
def test_csv_reader_strings_quotechars(tmpdir):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file8.csv")
names = ["text", "int"]
dtypes = ["str", "int"]
lines = [",".join(names), '"a,\n",0', '"b ""c"" d",0', "e,0", '"f,,!.,",0']
with open(str(fname), "w") as fp:
fp.write("\n".join(lines))
df = read_csv(
str(fname),
names=names,
dtype=dtypes,
skiprows=1,
quotechar='"',
quoting=1,
)
assert len(df.columns) == 2
assert df["text"].dtype == np.dtype("object")
assert df["int"].dtype == np.dtype("int64")
assert df["text"][0] == "a,\n"
assert df["text"][1] == 'b "c" d'
assert df["text"][2] == "e"
assert df["text"][3] == "f,,!.,"
def test_csv_reader_usecols_int_char(tmpdir, pd_mixed_dataframe):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file10.csv")
pd_mixed_dataframe.to_csv(
fname,
columns=["Integer", "Date", "Float", "Integer2"],
index=False,
header=False,
)
df_out = pd.read_csv(fname, usecols=[0, 1, 3])
out = read_csv(fname, usecols=[0, 1, 3])
assert len(out.columns) == len(df_out.columns)
assert len(out) == len(df_out)
assert_eq(df_out, out, check_names=False)
def test_csv_reader_mangle_dupe_cols(tmpdir):
buffer = "abc,ABC,abc,abcd,abc\n1,2,3,4,5\n"
# Default: mangle_dupe_cols=True
pd_df = pd.read_csv(StringIO(buffer))
cu_df = read_csv(StringIO(buffer))
assert_eq(cu_df, pd_df)
# Pandas does not support mangle_dupe_cols=False
cu_df = read_csv(StringIO(buffer), mangle_dupe_cols=False)
# check that the dupe columns were removed
assert len(cu_df.columns) == 3
np.testing.assert_array_equal(cu_df["abc"].to_array(), [1])
def test_csv_reader_float_decimal(tmpdir):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file12.csv")
names = ["basic_32", "basic_64", "round", "decimal_only", "precision"]
dtypes = ["float32", "float64", "float64", "float32", "float64"]
lines = [
";".join(names),
"1,2;1234,5678;12345;0,123;-73,98007199999998",
"3,4;3456,7890;67890;,456;1,7976931348623157e+307",
"5,6e0;0,5679e2;1,2e10;0,07e-001;0,0",
]
with open(str(fname), "w") as fp:
fp.write("\n".join(lines))
basic_32_ref = [1.2, 3.4, 5.6]
basic_64_ref = [1234.5678, 3456.7890, 56.79]
round_ref = [12345, 67890, 12000000000]
decimal_only_ref = [0.123, 0.456, 0.007]
precision_ref = [-73.98007199999998, 1.7976931348623157e307, 0.0]
df = read_csv(
str(fname),
names=names,
dtype=dtypes,
skiprows=1,
delimiter=";",
decimal=",",
)
np.testing.assert_allclose(basic_32_ref, df["basic_32"].to_array())
np.testing.assert_allclose(basic_64_ref, df["basic_64"].to_array())
np.testing.assert_allclose(round_ref, df["round"].to_array())
np.testing.assert_allclose(decimal_only_ref, df["decimal_only"].to_array())
np.testing.assert_allclose(precision_ref, df["precision"].to_array())
def test_csv_reader_NaN_values():
names = dtypes = ["float32"]
empty_cells = '\n""\n'
default_na_cells = (
"#N/A\n#N/A N/A\n#NA\n-1.#IND\n"
"-1.#QNAN\n-NaN\n-nan\n1.#IND\n"
"1.#QNAN\nN/A\n<NA>\nNA\nNULL\n"
"NaN\nn/a\nnan\nnull\n"
)
custom_na_cells = "NV_NAN\nNotANumber\n"
all_cells = empty_cells + default_na_cells + custom_na_cells
custom_na_values = ["NV_NAN", "NotANumber"]
# test default NA values. empty cells should also yield NaNs
gdf = read_csv(
StringIO(default_na_cells + empty_cells), names=names, dtype=dtypes
)
pdf = pd.read_csv(
StringIO(default_na_cells + empty_cells), names=names, dtype=np.float32
)
assert_eq(pdf, gdf)
# custom NA values
gdf = read_csv(
StringIO(all_cells),
names=names,
dtype=dtypes,
na_values=custom_na_values,
)
pdf = pd.read_csv(
StringIO(all_cells),
names=names,
dtype=np.float32,
na_values=custom_na_values,
)
assert_eq(pdf, gdf)
# custom NA values
gdf = read_csv(
StringIO(empty_cells + default_na_cells + "_NAA_\n"),
names=names,
dtype=dtypes,
na_values="_NAA_",
)
pdf = pd.read_csv(
StringIO(empty_cells + default_na_cells + "_NAA_\n"),
names=names,
dtype=np.float32,
na_values="_NAA_",
)
assert_eq(pdf, gdf)
# data type detection should evaluate the column to int8 (all nulls)
gdf = read_csv(
StringIO(all_cells), header=None, na_values=custom_na_values,
)
assert gdf.dtypes[0] == "int8"
assert all(gdf["0"][idx] is cudf.NA for idx in range(len(gdf["0"])))
# data type detection should evaluate the column to object if some nulls
gdf = read_csv(StringIO(all_cells), header=None)
assert gdf.dtypes[0] == np.dtype("object")
def test_csv_reader_thousands(tmpdir):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file13.csv")
names = dtypes = [
"float32",
"float64",
"int32",
"int64",
"uint32",
"uint64",
]
lines = [
",".join(names),
"1'234.5, 1'234.567, 1'234'567, 1'234'567'890,\
1'234'567, 1'234'567'890",
"12'345.6, 123'456.7, 12'345, 123'456'789, 12'345, 123'456'789",
]
with open(str(fname), "w") as fp:
fp.write("\n".join(lines))
f32_ref = [1234.5, 12345.6]
f64_ref = [1234.567, 123456.7]
int32_ref = [1234567, 12345]
int64_ref = [1234567890, 123456789]
uint32_ref = [1234567, 12345]
uint64_ref = [1234567890, 123456789]
df = read_csv(
str(fname), names=names, dtype=dtypes, skiprows=1, thousands="'"
)
np.testing.assert_allclose(f32_ref, df["float32"].to_array())
np.testing.assert_allclose(f64_ref, df["float64"].to_array())
np.testing.assert_allclose(int32_ref, df["int32"].to_array())
np.testing.assert_allclose(int64_ref, df["int64"].to_array())
np.testing.assert_allclose(uint32_ref, df["uint32"].to_array())
np.testing.assert_allclose(uint64_ref, df["uint64"].to_array())
def test_csv_reader_buffer_strings():
names = ["text", "int"]
dtypes = ["str", "int"]
lines = [",".join(names), "a,0", "b,0", "c,0", "d,0"]
buffer = "\n".join(lines)
df = read_csv(StringIO(buffer), names=names, dtype=dtypes, skiprows=1)
assert len(df.columns) == 2
assert df["text"].dtype == np.dtype("object")
assert df["int"].dtype == np.dtype("int64")
assert df["text"][0] == "a"
assert df["text"][1] == "b"
assert df["text"][2] == "c"
assert df["text"][3] == "d"
df2 = read_csv(
BytesIO(str.encode(buffer)), names=names, dtype=dtypes, skiprows=1
)
assert len(df2.columns) == 2
assert df2["text"].dtype == np.dtype("object")
assert df2["int"].dtype == np.dtype("int64")
assert df2["text"][0] == "a"
assert df2["text"][1] == "b"
assert df2["text"][2] == "c"
assert df2["text"][3] == "d"
@pytest.mark.parametrize(
"ext, out_comp, in_comp",
[
(".geez", "gzip", "gzip"),
(".beez", "bz2", "bz2"),
(".gz", "gzip", "infer"),
(".bz2", "bz2", "infer"),
(".beez", "bz2", np.str_("bz2")),
(".data", None, "infer"),
(".txt", None, None),
("", None, None),
],
)
def test_csv_reader_compression(
tmpdir, ext, out_comp, in_comp, pd_mixed_dataframe
):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_compression" + ext)
df = pd_mixed_dataframe
df.to_csv(fname, index=False, header=False, compression=out_comp)
gdf = read_csv(fname, names=list(df.columns.values), compression=in_comp)
pdf = pd.read_csv(
fname, names=list(df.columns.values), compression=in_comp
)
assert_eq(gdf, pdf)
@pytest.mark.parametrize(
"names, dtypes, data, trues, falses",
[
(
["A", "B"],
["bool", "bool"],
"True,True\nFalse,False\nTrue,False",
None,
None,
),
(
["A", "B"],
["int32", "int32"],
"True,1\nFalse,2\nTrue,3",
None,
None,
),
(
["A", "B"],
["int32", "int32"],
"YES,1\nno,2\nyes,3\nNo,4\nYes,5",
["yes", "Yes", "YES"],
["no", "NO", "No"],
),
(["A", "B"], ["int32", "int32"], "foo,bar\nbar,foo", ["foo"], ["bar"]),
(["x", "y"], None, "True,1\nFalse,0", None, None),
],
)
def test_csv_reader_bools(tmpdir, names, dtypes, data, trues, falses):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file11.csv")
lines = [",".join(names), data]
with open(str(fname), "w") as fp:
fp.write("\n".join(lines))
# Usage of true_values and false_values makes that column into bool type
df_out = pd.read_csv(
fname,
names=names,
skiprows=1,
dtype=(dtypes[0] if dtypes else None),
true_values=trues,
false_values=falses,
)
out = read_csv(
fname,
names=names,
dtype=dtypes,
skiprows=1,
true_values=trues,
false_values=falses,
)
assert_eq(df_out, out)
def test_csv_quotednumbers(tmpdir):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file12.csv")
names = ["integer", "decimal"]
dtypes = ["int32", "float32"]
lines = [
",".join(names),
'1,"3.14"',
'"2","300"',
'"3",10101.0101',
'4,"6.28318"',
]
with open(str(fname), "w") as fp:
fp.write("\n".join(lines))
integer_ref = [1, 2, 3, 4]
decimal_ref = [3.14, 300, 10101.0101, 6.28318]
df1 = read_csv(str(fname), names=names, dtype=dtypes, skiprows=1)
df2 = read_csv(str(fname), names=names, dtype=dtypes, skiprows=1)
assert len(df2.columns) == 2
np.testing.assert_allclose(integer_ref, df1["integer"].to_array())
np.testing.assert_allclose(decimal_ref, df1["decimal"].to_array())
np.testing.assert_allclose(integer_ref, df2["integer"].to_array())
np.testing.assert_allclose(decimal_ref, df2["decimal"].to_array())
def test_csv_reader_nrows(tmpdir):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file14.csv")
names = ["int1", "int2"]
dtypes = ["int32", "int32"]
rows = 4000000
read_rows = (rows * 3) // 4
skip_rows = (rows - read_rows) // 2
sample_skip = 1000
with open(str(fname), "w") as fp:
fp.write(",".join(names) + "\n")
for i in range(rows):
fp.write(str(i) + ", " + str(2 * i) + " \n")
# with specified names
df = read_csv(
str(fname),
names=names,
dtype=dtypes,
skiprows=skip_rows + 1,
nrows=read_rows,
)
assert df.shape == (read_rows, 2)
for row in range(0, read_rows // sample_skip, sample_skip):
assert df["int1"][row] == row + skip_rows
assert df["int2"][row] == 2 * (row + skip_rows)
assert df["int2"][read_rows - 1] == 2 * (read_rows - 1 + skip_rows)
# with column name inference
df = read_csv(
str(fname), dtype=dtypes, skiprows=skip_rows + 1, nrows=read_rows
)
assert df.shape == (read_rows, 2)
assert str(skip_rows) in list(df)[0]
assert str(2 * skip_rows) in list(df)[1]
for row in range(0, read_rows // sample_skip, sample_skip):
assert df[list(df)[0]][row] == row + skip_rows + 1
assert df[list(df)[1]][row] == 2 * (row + skip_rows + 1)
assert df[list(df)[1]][read_rows - 1] == 2 * (read_rows + skip_rows)
# nrows larger than the file
df = read_csv(str(fname), dtype=dtypes, nrows=rows * 2)
assert df.shape == (rows, 2)
for row in range(0, rows // sample_skip, sample_skip):
assert df["int1"][row] == row
assert df["int2"][row] == 2 * row
assert df["int2"][rows - 1] == 2 * (rows - 1)
# nrows + skiprows larger than the file
df = read_csv(
str(fname), dtype=dtypes, nrows=read_rows, skiprows=read_rows
)
assert df.shape == (rows - read_rows, 2)
# nrows equal to zero
df = read_csv(str(fname), dtype=dtypes, nrows=0)
assert df.shape == (0, 2)
# with both skipfooter and nrows - should throw
with pytest.raises(ValueError):
read_csv(str(fname), nrows=read_rows, skipfooter=1)
def test_csv_reader_gzip_compression_strings(tmpdir):
fnamebase = tmpdir.mkdir("gdf_csv")
fname = fnamebase.join("tmp_csvreader_file15.csv")
fnamez = fnamebase.join("tmp_csvreader_file15.csv.gz")
names = ["text", "int"]
dtypes = ["str", "int"]
lines = [",".join(names), "a,0", "b,0", "c,0", "d,0"]
with open(str(fname), "w") as fp:
fp.write("\n".join(lines))
with open(str(fname), "rb") as f_in, gzip.open(str(fnamez), "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
df = read_csv(
str(fnamez),
names=names,
dtype=dtypes,
skiprows=1,
decimal=".",
thousands="'",
compression="gzip",
)
assert len(df.columns) == 2
assert df["text"].dtype == np.dtype("object")
assert df["int"].dtype == np.dtype("int64")
assert df["text"][0] == "a"
assert df["text"][1] == "b"
assert df["text"][2] == "c"
assert df["text"][3] == "d"
@pytest.mark.parametrize("skip_rows", [0, 2, 4])
@pytest.mark.parametrize("header_row", [0, 2])
def test_csv_reader_skiprows_header(skip_rows, header_row):
names = ["float_point", "integer"]
dtypes = ["float64", "int64"]
lines = [
",".join(names),
"1.2, 1",
"2.3, 2",
"3.4, 3",
"4.5, 4",
"5.6, 5",
"6.7, 6",
]
buffer = "\n".join(lines)
cu_df = read_csv(
StringIO(buffer), dtype=dtypes, skiprows=skip_rows, header=header_row
)
pd_df = pd.read_csv(
StringIO(buffer), skiprows=skip_rows, header=header_row
)
assert cu_df.shape == pd_df.shape
assert list(cu_df.columns.values) == list(pd_df.columns.values)
def test_csv_reader_dtype_inference():
names = ["float_point", "integer"]
lines = [
",".join(names),
"1.2,1",
"2.3,2",
"3.4,3",
"4.5,4",
"5.6,5",
"6.7,6",
]
buffer = "\n".join(lines)
cu_df = read_csv(StringIO(buffer))
pd_df = pd.read_csv(StringIO(buffer))
assert cu_df.shape == pd_df.shape
assert list(cu_df.columns.values) == list(pd_df.columns.values)
def test_csv_reader_dtype_inference_whitespace():
names = ["float_point", "integer"]
lines = [
",".join(names),
" 1.2, 1",
"2.3,2 ",
" 3.4, 3",
" 4.5,4",
"5.6, 5",
" 6.7,6 ",
]
buffer = "\n".join(lines)
cu_df = read_csv(StringIO(buffer))
pd_df = pd.read_csv(StringIO(buffer))
assert cu_df.shape == pd_df.shape
assert list(cu_df.columns.values) == list(pd_df.columns.values)
def test_csv_reader_empty_dataframe():
dtypes = ["float64", "int64"]
buffer = "float_point, integer"
# should work fine with dtypes
df = read_csv(StringIO(buffer), dtype=dtypes)
assert df.shape == (0, 2)
assert all(df.dtypes == ["float64", "int64"])
# should default to string columns without dtypes
df = read_csv(StringIO(buffer))
assert df.shape == (0, 2)
assert all(df.dtypes == ["object", "object"])
def test_csv_reader_filenotfound(tmpdir):
fname = "non-existing-filename.csv"
# should raise an error
with pytest.raises(FileNotFoundError):
read_csv(str(fname))
# should raise an error
dname = tmpdir.mkdir("gdf_csv")
with pytest.raises(FileNotFoundError):
read_csv(str(dname))
@pytest.mark.parametrize(
"src", ["filepath", "pathobj", "bytes_io", "string_io", "url"]
)
def test_csv_reader_filepath_or_buffer(tmpdir, path_or_buf, src):
expect = pd.read_csv(path_or_buf("filepath"))
got = cudf.read_csv(path_or_buf(src))
assert_eq(expect, got)
def test_csv_reader_carriage_return(tmpdir):
rows = 1000
names = ["int_row", "int_double_row"]
buffer = ",".join(names) + "\r\n"
for row in range(rows):
buffer += str(row) + ", " + str(2 * row) + "\r\n"
df = read_csv(StringIO(buffer))
assert len(df) == rows
for row in range(0, rows):
assert df[names[0]][row] == row
assert df[names[1]][row] == 2 * row
def test_csv_reader_tabs():
names = ["float_point", "integer", "date"]
lines = [
",".join(names),
"1.2,\t12, \t11/22/1995",
"3.4\t,\t34\t,\t 01/01/2001",
"\t 5.6,56 \t, 12/12/1970",
"\t7.8 , 78\t,06/15/2018 \t",
]
buffer = "\n".join(lines)
df = read_csv(StringIO(buffer), parse_dates=["date"])
assert df.shape == (4, 3)
floats = [1.2, 3.4, 5.6, 7.8]
ints = [12, 34, 56, 78]
dates = [
"1995-11-22T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"1970-12-12T00:00:00.000000000",
"2018-06-15T00:00:00.000000000",
]
np.testing.assert_allclose(floats, df["float_point"].to_array())
np.testing.assert_allclose(ints, df["integer"].to_array())
for row in range(4):
assert str(df["date"][row]) == dates[row]
@pytest.mark.parametrize("segment_bytes", [10000, 19999, 30001, 36000])
def test_csv_reader_byte_range(tmpdir, segment_bytes):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file16.csv")
names = ["int1", "int2"]
rows = 10000
with open(str(fname), "w") as fp:
for i in range(rows):
fp.write(str(i) + ", " + str(2 * i) + " \n")
file_size = os.stat(str(fname)).st_size
ref_df = read_csv(str(fname), names=names).to_pandas()
dfs = []
for segment in range((file_size + segment_bytes - 1) // segment_bytes):
dfs.append(
read_csv(
str(fname),
names=names,
byte_range=(segment * segment_bytes, segment_bytes),
)
)
df = cudf.concat(dfs).to_pandas()
assert list(df["int1"]) == list(ref_df["int1"])
assert list(df["int2"]) == list(ref_df["int2"])
def test_csv_reader_byte_range_type_corner_case(tmpdir):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file17.csv")
cudf.datasets.timeseries(
start="2000-01-01",
end="2000-01-02",
dtypes={"name": str, "id": int, "x": float, "y": float},
).to_csv(fname, chunksize=100000)
byte_range = (2_147_483_648, 0)
with pytest.raises(RuntimeError, match="Offset is past end of file"):
cudf.read_csv(fname, byte_range=byte_range, header=None)
@pytest.mark.parametrize("segment_bytes", [10, 19, 31, 36])
def test_csv_reader_byte_range_strings(segment_bytes):
names = ["strings"]
buffer = "\n".join('"' + str(x) + '"' for x in range(1, 100))
file_size = len(buffer)
ref_df = read_csv(StringIO(buffer), names=names).to_pandas()
dfs = []
for segment in range((file_size + segment_bytes - 1) // segment_bytes):
dfs.append(
read_csv(
StringIO(buffer),
names=names,
byte_range=(segment * segment_bytes, segment_bytes),
)
)
df = cudf.concat(dfs).to_pandas()
assert list(df["strings"]) == list(ref_df["strings"])
@pytest.mark.parametrize(
"header_row, skip_rows, skip_blanks",
[
(1, 0, True),
("infer", 2, True),
(1, 4, True),
(3, 0, False),
("infer", 5, False),
],
)
@pytest.mark.parametrize("line_terminator", ["\n", "\r\n"])
def test_csv_reader_blanks_and_comments(
skip_rows, header_row, skip_blanks, line_terminator
):
lines = [
"# first comment line",
line_terminator,
"# third comment line",
"1,2,3",
"4,5,6",
"7,8,9",
line_terminator,
"# last comment line",
line_terminator,
"1,1,1",
]
buffer = line_terminator.join(lines)
cu_df = read_csv(
StringIO(buffer),
comment="#",
header=header_row,
skiprows=skip_rows,
skip_blank_lines=skip_blanks,
)
pd_df = pd.read_csv(
StringIO(buffer),
comment="#",
header=header_row,
skiprows=skip_rows,
skip_blank_lines=skip_blanks,
)
assert cu_df.shape == pd_df.shape
assert list(cu_df.columns.values) == list(pd_df.columns.values)
def test_csv_reader_prefix():
lines = ["1, 1, 1, 1"]
buffer = "\n".join(lines)
prefix_str = "a_prefix"
df = read_csv(StringIO(buffer), header=None, prefix=prefix_str)
column_names = list(df.columns.values)
for col in range(len(column_names)):
assert column_names[col] == prefix_str + str(col)
def test_csv_reader_delim_whitespace():
buffer = "1 2 3\n4 5 6"
# with header row
cu_df = read_csv(StringIO(buffer), delim_whitespace=True)
pd_df = pd.read_csv(StringIO(buffer), delim_whitespace=True)
assert_eq(pd_df, cu_df)
# without header row
cu_df = read_csv(StringIO(buffer), delim_whitespace=True, header=None)
pd_df = pd.read_csv(StringIO(buffer), delim_whitespace=True, header=None)
assert pd_df.shape == cu_df.shape
# should raise an error if used with delimiter or sep
with pytest.raises(ValueError):
read_csv(StringIO(buffer), delim_whitespace=True, delimiter=" ")
with pytest.raises(ValueError):
read_csv(StringIO(buffer), delim_whitespace=True, sep=" ")
def test_csv_reader_unnamed_cols():
# first and last columns are unnamed
buffer = ",1,2,3,\n4,5,6,7,8"
cu_df = read_csv(StringIO(buffer))
pd_df = pd.read_csv(StringIO(buffer))
assert all(pd_df.columns == cu_df.columns)
assert pd_df.shape == cu_df.shape
def test_csv_reader_header_quotation():
buffer = '"1,,1","2,\n,2",3\n+4,+5,+6'
cu_df = read_csv(StringIO(buffer))
pd_df = pd.read_csv(StringIO(buffer))
assert cu_df.shape == (1, 3)
assert_eq(pd_df, cu_df)
# test cases that fail with pandas
buffer_pd_fail = '"1,one," , ",2,two" ,3\n4,5,6'
cu_df = read_csv(StringIO(buffer_pd_fail))
assert cu_df.shape == (1, 3)
def test_csv_reader_oversized_byte_range():
buffer = "a,b,c,d,e\n4,5,6,7,8"
cu_df = read_csv(StringIO(buffer), byte_range=(0, 1024))
pd_df = pd.read_csv(StringIO(buffer))
assert all(pd_df.columns == cu_df.columns)
assert pd_df.shape == cu_df.shape
def test_csv_reader_index_col():
buffer = "0,1,2\n3,4,5\n6,7,8"
names = ["int1", "int2", "int3"]
# using a column name
cu_df = read_csv(StringIO(buffer), names=names, index_col="int1")
pd_df = pd.read_csv(StringIO(buffer), names=names, index_col="int1")
assert_eq(pd_df, cu_df)
# using a column index
cu_df = read_csv(StringIO(buffer), header=None, index_col=0)
pd_df = pd.read_csv(StringIO(buffer), header=None, index_col=0)
assert_eq(cu_df.index, pd_df.index)
# using a column index with names
cu_df = read_csv(StringIO(buffer), header=None, index_col=0, names=names)
pd_df = pd.read_csv(
StringIO(buffer), header=None, index_col=0, names=names
)
assert_eq(cu_df.index, pd_df.index)
# passing False to avoid using a column as index (no-op in cuDF)
cu_df = read_csv(StringIO(buffer), header=None, index_col=False)
pd_df = pd.read_csv(StringIO(buffer), header=None, index_col=False)
assert_eq(cu_df.index, pd_df.index)
@pytest.mark.parametrize(
"names", [["a", "b", "c"], [416, 905, 647], range(3), None]
)
def test_csv_reader_column_names(names):
buffer = "0,1,2\n3,4,5\n6,7,8"
df = read_csv(StringIO(buffer), names=names)
if names is None:
assert list(df) == ["0", "1", "2"]
else:
assert list(df) == list(names)
def test_csv_reader_bools_false_positives(tmpdir):
# values that are equal to ["True", "TRUE", "False", "FALSE"]
# when using ints to detect bool values
items = [3977, 4329, 24015, 27567]
buffer = "\n".join(str(i) for i in items)
df = read_csv(StringIO(buffer), header=None, dtype=["int32"])
np.testing.assert_array_equal(items, df["0"].to_array())
def test_csv_reader_aligned_byte_range(tmpdir):
fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file19.csv")
nelem = 1000
input_df = pd.DataFrame(
{"key": np.arange(0, nelem), "zeros": np.zeros(nelem)}
)
input_df.to_csv(fname)
df = cudf.read_csv(str(fname), byte_range=(0, 4096))
# read_csv call above used to crash; the assert below is not crucial
assert np.count_nonzero(df["zeros"].to_pandas().values) == 0
@pytest.mark.parametrize(
"pdf_dtype, gdf_dtype",
[(None, None), ("int", "hex"), ("int32", "hex32"), ("int64", "hex64")],
)
def test_csv_reader_hexadecimals(pdf_dtype, gdf_dtype):
lines = ["0x0", "-0x1000", "0xfedcba", "0xABCDEF", "0xaBcDeF", "9512c20b"]
values = [int(hex_int, 16) for hex_int in lines]
buffer = "\n".join(lines)
if gdf_dtype is not None:
# require explicit `hex` dtype to parse hexadecimals
pdf = | pd.DataFrame(data=values, dtype=pdf_dtype, columns=["hex_int"]) | pandas.DataFrame |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = | tm.makeIntIndex(10, name="a") | pandas.util.testing.makeIntIndex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.