prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
サンプルコード
参考: https://programming-info.dream-target.jp/streamlit-start
"""
import streamlit as st
import pandas as pd
import numpy as np
st.title("streamlitのサンプルだお")
DATE_COLUMN = "date/time"
DATA_URL = (
"https://s3-us-west-2.amazonaws.com/"
"streamlit-demo-data/uber-raw-data-sep14.csv.gz"
)
@st.cache
def load_data(nrows):
data = | pd.read_csv(DATA_URL, nrows=nrows) | pandas.read_csv |
"""Core utilities"""
import sys
import logging
import inspect
from functools import singledispatch
from copy import deepcopy
from typing import (
Any,
Callable,
Iterable,
List,
Mapping,
Sequence,
Union,
Tuple,
)
import numpy
from numpy import array as Array
import pandas
from pandas import Categorical, DataFrame, Series
from pipda import register_func
from pipda.symbolic import Reference
from pipda.utils import CallingEnvs
from .exceptions import (
ColumnNotExistingError,
DataUnrecyclable,
NameNonUniqueError,
)
from .contexts import Context
from .types import (
StringOrIter,
Dtype,
is_iterable,
is_scalar,
is_categorical,
is_null,
)
from .defaults import DEFAULT_COLUMN_PREFIX, NA_REPR
# logger
logger = logging.getLogger("datar")
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler(sys.stderr)
stream_handler.setFormatter(
logging.Formatter(
"[%(asctime)s][%(name)s][%(levelname)7s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
logger.addHandler(stream_handler)
def vars_select(
all_columns: Iterable[str],
*columns: Any,
raise_nonexists: bool = True,
base0: bool = None,
) -> List[int]:
# TODO: support selecting data-frame columns
"""Select columns
Args:
all_columns: The column pool to select
*columns: arguments to select from the pool
raise_nonexist: Whether raise exception when column not exists
in the pool
base0: Whether indexes are 0-based if columns are selected by indexes.
If not given, will use `datar.base.get_option('index.base.0')`
Returns:
The selected indexes for columns
Raises:
ColumnNotExistingError: When the column does not exist in the pool
and raise_nonexists is True.
"""
from .collections import Collection
from ..base import unique
columns = [
column.name if isinstance(column, Series) else column
for column in columns
]
selected = Collection(*columns, pool=list(all_columns), base0=base0)
if raise_nonexists and selected.unmatched and selected.unmatched != {None}:
raise ColumnNotExistingError(
f"Columns `{selected.unmatched}` do not exist."
)
return unique(selected).astype(int)
def recycle_value(
value: Any, size: int, name: str = None
) -> Union[DataFrame, numpy.ndarray]:
"""Recycle a value based on a dataframe
Args:
value: The value to be recycled
size: The size to recycle to
name: The name to show in the error if failed to recycle
Returns:
The recycled value
"""
# TODO: follow base R's recycling rule? i.e. size 2 -> 4
from ..base import NA
if is_scalar(value):
value = [value]
length = len(value)
if length not in (0, 1, size):
name = "value" if not name else f"`{name}`"
expect = "1" if size == 1 else f"(1, {size})"
raise DataUnrecyclable(
f"Cannot recycle {name} to size {size}, "
f"expect {expect}, got {length}."
)
if isinstance(value, DataFrame):
if length == size == 0:
return DataFrame(columns=value.columns)
if length == 0:
value = DataFrame([[NA] * value.shape[1]], columns=value.columns)
if length == 1 and size > length:
return value.iloc[[0] * size, :].reset_index(drop=True)
return value
cats = categorized(value).categories if is_categorical(value) else None
if length == size == 0:
return [] if cats is None else Categorical([], categories=cats)
if length == 0:
value = [NA]
if isinstance(value, Series):
# try to keep Series class
# some operators can only do with it or with it correctly
# For example:
# Series([True, True]) & Series([False, NA]) -> [False, Fa.se]
# But with numpy.array, it raises error, since NA is a float
if length == 1 and size > length:
value = value.iloc[[0] * size].reset_index(drop=True)
return value
if isinstance(value, tuple):
value = list(value)
# dtype = getattr(value, 'dtype', None)
if length == 1 and size > length:
value = list(value) * size
if cats is not None:
return Categorical(value, categories=cats)
is_elem_iter = any(is_iterable(val) for val in value)
if is_elem_iter:
# without dtype: VisibleDeprecationWarning
# return Array(value, dtype=object)
# The above does not keep [DataFrame()] structure
return value
# Avoid numpy.nan to be converted into 'nan' when other elements are string
out = Array(value)
if numpy.issubdtype(out.dtype, numpy.str_) and is_null(value).any():
return Array(value, dtype=object)
return out
def recycle_df(
df: DataFrame,
value: Any,
df_name: str = None,
value_name: str = None,
) -> Tuple[DataFrame, Any]:
"""Recycle the dataframe based on value"""
if length_of(df) == 1:
df = recycle_value(df, length_of(value), df_name)
value = recycle_value(value, length_of(df), value_name)
return df, value
def categorized(data: Any) -> Any:
"""Get the Categorical object"""
if not is_categorical(data):
return data
if isinstance(data, Series):
return data.values
return data
@singledispatch
def to_df(data: Any, name: str = None) -> DataFrame:
"""Convert an object to a data frame"""
if is_scalar(data):
data = [data]
if name is None:
return DataFrame(data)
return DataFrame({name: data})
@to_df.register(numpy.ndarray)
def _(data: numpy.ndarray, name: StringOrIter = None) -> DataFrame:
if name is not None and is_scalar(name):
name = [name]
if len(data.shape) == 1:
return (
| DataFrame(data, columns=name) | pandas.DataFrame |
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(("a"))) == 0
assert len(df.groupby(("b"))) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
result = grouped.mean()
assert result.index.name == "A"
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
def test_as_index_select_column():
# GH 5764
df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
expected = pd.Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
expected = pd.Series(
[2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
def test_groupby_as_index_select_column_sum_empty_df():
# GH 35246
df = DataFrame(columns=["A", "B", "C"])
left = df.groupby(by="A", as_index=False)["B"].sum()
assert type(left) is DataFrame
assert left.to_dict() == {"A": {}, "B": {}}
def test_groupby_as_index_agg(df):
grouped = df.groupby("A", as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"Q": np.sum})
# multi-key
grouped = df.groupby(["A", "B"], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
result3 = grouped["C"].agg({"Q": np.sum})
tm.assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
ts = Series(np.random.randint(5, 10, 50), name="jim")
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
tm.assert_frame_equal(left, right)
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
if reduction_func in ("corrwith",):
pytest.skip("Test not applicable")
if reduction_func in ("nth", "ngroup",):
pytest.skip("Skip until behavior is determined (GH #5755)")
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
expected = getattr(df.groupby("a"), reduction_func)()
if reduction_func == "size":
expected = expected.rename("size")
expected = expected.reset_index()
g = df.groupby("a", as_index=False)
result = getattr(g, reduction_func)()
tm.assert_frame_equal(result, expected)
result = g.agg(reduction_func)
tm.assert_frame_equal(result, expected)
result = getattr(g["b"], reduction_func)()
tm.assert_frame_equal(result, expected)
result = g["b"].agg(reduction_func)
tm.assert_frame_equal(result, expected)
def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
result = grouped["C"].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].sum()
expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby("A", as_index=False)
msg = r"Column\(s\) C already selected"
with pytest.raises(IndexError, match=msg):
grouped["C"].__getitem__("D")
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby("A", as_index=False)
result = grouped.mean()
expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(["A", "B"], as_index=False)
result = grouped.mean()
expected = data.groupby(["A", "B"]).mean()
arrays = list(zip(*expected.index.values))
expected.insert(0, "A", arrays[0])
expected.insert(1, "B", arrays[1])
expected.index = np.arange(len(expected))
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# Web Scraping Demo
import time
import os
import string
from datetime import datetime
import requests
from diskcache import Cache
from bs4 import BeautifulSoup
import pandas as pd
from docx import Document
from docx.shared import Pt, RGBColor
class Fox():
"""
A wrapper for requests that automates interaction with diskcache
"""
def __init__(self):
self.base = None
self.params = None
self.headers = []
self.private_keys = []
self.cache_path = None
self.accept_codes = [200]
def update(self,
base = None,
params = None,
headers = None,
private_keys = None,
cache_path = None,
cache_ref = None,
accept_codes = None,
):
"""
Used to set up conditions for a request, including base, parameters, cache reference, and which codes to accept
"""
if base != None:
self.base = base
self.params = None
if params != None:
self.params = params
if headers != None:
self.headers = headers
if private_keys != None:
self.private_keys = private_keys
if cache_path != None:
self.cache_path = cache_path
if not os.path.exists(self.cache_path):
os.makedirs(self.cache_path)
if cache_ref != None:
self.cache_ref = cache_ref
if accept_codes != None:
self.accept_codes = accept_codes
self.cache_key = self.make_cache_key(self.base,self.params,self.private_keys)
def make_cache_key(self, base=None, params=None, private_keys=None):
"""
Makes a unique string for cache access
"""
kvs = []
if (base == None) or (params in [None,{}]):
return base
else:
alpha_keys = sorted(params.keys())
for k in alpha_keys:
if k not in private_keys:
kvs.append('{}-{}'.format(k, params[k]))
return base + '_'.join(kvs)
def make_request(self, sleep = None):
"""
Handles whether to actually send the request or just skip it because it's already cached
"""
if self.base == None:
return None
returnable = None
if self.cache_ref == None:
if self.cache_path == None:
response = send_request()
else:
with Cache(self.cache_path) as cache:
if self.cache_key not in cache:
returnable = self.send_request(cache)
else:
if self.cache_key not in self.cache_ref:
if sleep != None:
time.sleep(sleep)
returnable = self.send_request(self.cache_ref)
if returnable != None:
return returnable
def send_request(self, cache=None):
"""
Actually sends the request and checks the response code
"""
response = requests.get(
self.base,
params=self.params,
headers=self.headers,
)
if response.status_code not in self.accept_codes:
print("Response:",response.status_code,':\n',self.base,self.params,self.headers)
print(response.text)
else:
if cache != None:
cache[self.cache_key] = response
return response
def manage_scraping(row,cache,fox):
"""
Takes a row (pandas Series) and iterates over the data needed to form URLs. If conditions are met, makes the scraping request and adds the resulting data to the row
"""
description_found = False
for isbn_format in ["ebook","hardcover","paper"]:
if "University of California Press" == row["publisher"]:
if description_found == False:
isbn = row[isbn_format]
url = "https://www.ucpress.edu/book/" + str(isbn)
fox.update(base=url)
fox.make_request(sleep=3)
resp = cache[fox.cache_key]
if resp.status_code != 200:
print("Nothing found at", url)
print(resp.status_code)
else:
print("Webpage found at", url)
soup = BeautifulSoup(resp.text,"html.parser")
description_section = soup.find("section",id="link-about-book")
if description_section != None:
description_found = True
just_the_description = description_section.find("article")
simplified_text = text_with_newlines(just_the_description)
row["description"] = simplified_text
return row
def manage_apply():
"""
Handles opening the input file, opening the cache file, iterating over the input rows, and forming the output file(s)
"""
spreadsheet = pd.read_excel("metadata.xlsx",dtype=str)
new_headers = [
"handle",
"title",
"subtitle",
"publisher",
"ebook",
"hardcover",
"paper",
"description"
]
spreadsheet = spreadsheet.reindex(columns = new_headers)
with Cache("demo_cache") as cache:
user_agent = ""
fox = Fox()
fox.update(
headers = {'User-Agent':user_agent},
cache_ref = cache,
accept_codes=[200]
)
spreadsheet = spreadsheet.apply(
lambda row : manage_scraping(row,cache,fox),
axis=1
)
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
outdir = "outputs"
if not os.path.exists(outdir):
os.makedirs(outdir)
spreadsheet.to_excel(f"{outdir}/{timestamp}_output.xlsx",index=False)
new_document = Document()
new_document.add_heading("Scraped Book Descriptions",0)
new_document.styles["Normal"].font.name = "Times New Roman"
new_document.styles["Heading 1"].font.size = Pt(10)
for each_style in ["Title","Heading 1","Heading 2"]:
new_document.styles[each_style].font.name = None
new_document.styles[each_style].font.color.rgb = RGBColor(0,0,0)
for i in spreadsheet.index.values:
if not | pd.isnull(spreadsheet.loc[i,"description"]) | pandas.isnull |
import math
import queue
from datetime import datetime, timedelta, timezone
import pandas as pd
from storey import build_flow, SyncEmitSource, Reduce, Table, AggregateByKey, FieldAggregator, NoopDriver, \
DataframeSource
from storey.dtypes import SlidingWindows, FixedWindows, EmitAfterMaxEvent, EmitEveryEvent
test_base_time = datetime.fromisoformat("2020-07-21T21:40:00+00:00")
def append_return(lst, x):
lst.append(x)
return lst
def test_sliding_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
controller.emit({'col1': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col1': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col1': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col1': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col1': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col1': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col1': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col1': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col1': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col1': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data_uneven_feature_occurrence():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'col1': 0}, 'tal', test_base_time)
for i in range(10):
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_multiple_keys_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, f'{i % 2}', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0,
'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 2, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2, 'number_of_stuff_sum_24h': 2,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 4, 'number_of_stuff_sum_2h': 4, 'number_of_stuff_sum_24h': 4,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 4, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 9, 'number_of_stuff_sum_24h': 9,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 6, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12, 'number_of_stuff_sum_24h': 12,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 16, 'number_of_stuff_sum_2h': 16, 'number_of_stuff_sum_24h': 16,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 8, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 20,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 25, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 25,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 5.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_filters_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'),
aggr_filter=lambda element: element['is_valid'] == 0)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'is_valid': i % 2}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'is_valid': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'is_valid': 1, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 2, 'is_valid': 0, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'is_valid': 1, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 4, 'is_valid': 0, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'is_valid': 1, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 6, 'is_valid': 0, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'is_valid': 1, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 8, 'is_valid': 0, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'is_valid': 1, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_max_values_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("num_hours_with_stuff_in_the_last_24h", "col1", ["count"],
SlidingWindows(['24h'], '1h'),
max_value=5)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=10 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'num_hours_with_stuff_in_the_last_24h_count_24h': 1},
{'col1': 1, 'num_hours_with_stuff_in_the_last_24h_count_24h': 2},
{'col1': 2, 'num_hours_with_stuff_in_the_last_24h_count_24h': 3},
{'col1': 3, 'num_hours_with_stuff_in_the_last_24h_count_24h': 4},
{'col1': 4, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 5, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 6, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 7, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 8, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 9, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_simple_aggregation_flow_multiple_fields():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_things", "col2", ["count"],
SlidingWindows(['1h', '2h'], '15m')),
FieldAggregator("abc", "col3", ["sum"],
SlidingWindows(['24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'col2': i * 1.2, 'col3': i * 2 + 4}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'col2': 0.0, 'col3': 4, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_things_count_1h': 1, 'number_of_things_count_2h': 1,
'abc_sum_24h': 4, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'col2': 1.2, 'col3': 6, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1,
'number_of_stuff_sum_24h': 1, 'number_of_things_count_1h': 2, 'number_of_things_count_2h': 2,
'abc_sum_24h': 10, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'col2': 2.4, 'col3': 8, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3,
'number_of_stuff_sum_24h': 3, 'number_of_things_count_1h': 3, 'number_of_things_count_2h': 3,
'abc_sum_24h': 18, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'col2': 3.5999999999999996, 'col3': 10, 'number_of_stuff_sum_1h': 6,
'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_things_count_1h': 4,
'number_of_things_count_2h': 4, 'abc_sum_24h': 28, 'number_of_stuff_avg_1h': 1.5, 'number_of_stuff_avg_2h': 1.5,
'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'col2': 4.8, 'col3': 12, 'number_of_stuff_sum_1h': 10, 'number_of_stuff_sum_2h': 10,
'number_of_stuff_sum_24h': 10, 'number_of_things_count_1h': 5, 'number_of_things_count_2h': 5,
'abc_sum_24h': 40, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'col2': 6.0, 'col3': 14, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 15,
'number_of_stuff_sum_24h': 15, 'number_of_things_count_1h': 6, 'number_of_things_count_2h': 6,
'abc_sum_24h': 54, 'number_of_stuff_avg_1h': 2.5, 'number_of_stuff_avg_2h': 2.5, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'col2': 7.199999999999999, 'col3': 16, 'number_of_stuff_sum_1h': 21,
'number_of_stuff_sum_2h': 21, 'number_of_stuff_sum_24h': 21, 'number_of_things_count_1h': 7,
'number_of_things_count_2h': 7, 'abc_sum_24h': 70, 'number_of_stuff_avg_1h': 3.0,
'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'col2': 8.4, 'col3': 18, 'number_of_stuff_sum_1h': 28, 'number_of_stuff_sum_2h': 28,
'number_of_stuff_sum_24h': 28, 'number_of_things_count_1h': 8, 'number_of_things_count_2h': 8,
'abc_sum_24h': 88, 'number_of_stuff_avg_1h': 3.5, 'number_of_stuff_avg_2h': 3.5, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'col2': 9.6, 'col3': 20, 'number_of_stuff_sum_1h': 36, 'number_of_stuff_sum_2h': 36,
'number_of_stuff_sum_24h': 36, 'number_of_things_count_1h': 9, 'number_of_things_count_2h': 9,
'abc_sum_24h': 108, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'col2': 10.799999999999999, 'col3': 22, 'number_of_stuff_sum_1h': 45,
'number_of_stuff_sum_2h': 45, 'number_of_stuff_sum_24h': 45,
'number_of_things_count_1h': 10, 'number_of_things_count_2h': 10, 'abc_sum_24h': 130,
'number_of_stuff_avg_1h': 4.5, 'number_of_stuff_avg_2h': 4.5, 'number_of_stuff_avg_24h': 4.5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h', '24h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5,
'number_of_stuff_count_24h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_aggregation_with_uncommon_windows_flow():
time_format = '%Y-%m-%d %H:%M:%S.%f'
columns = ['sample_time', 'signal', 'isotope']
data = [[datetime.strptime('2021-05-30 16:42:15.797000', time_format).replace(tzinfo=timezone.utc), 790.235, 'U235'],
[datetime.strptime('2021-05-30 16:45:15.798000', time_format).replace(tzinfo=timezone.utc), 498.491, 'U235'],
[datetime.strptime('2021-05-30 16:48:15.799000', time_format).replace(tzinfo=timezone.utc), 34650.00343, 'U235'],
[datetime.strptime('2021-05-30 16:51:15.800000', time_format).replace(tzinfo=timezone.utc), 189.823, 'U235'],
[datetime.strptime('2021-05-30 16:54:15.801000', time_format).replace(tzinfo=timezone.utc), 379.524, 'U235'],
[datetime.strptime('2021-05-30 16:57:15.802000', time_format).replace(tzinfo=timezone.utc), 2225.4952, 'U235'],
[datetime.strptime('2021-05-30 17:00:15.803000', time_format).replace(tzinfo=timezone.utc), 1049.0903, 'U235'],
[datetime.strptime('2021-05-30 17:03:15.804000', time_format).replace(tzinfo=timezone.utc), 41905.63447, 'U235'],
[datetime.strptime('2021-05-30 17:06:15.805000', time_format).replace(tzinfo=timezone.utc), 4987.6764, 'U235'],
[datetime.strptime('2021-05-30 17:09:15.806000', time_format).replace(tzinfo=timezone.utc), 67657.11975, 'U235'],
[datetime.strptime('2021-05-30 17:12:15.807000', time_format).replace(tzinfo=timezone.utc), 56173.06327, 'U235'],
[datetime.strptime('2021-05-30 17:15:15.808000', time_format).replace(tzinfo=timezone.utc), 14249.67394, 'U235'],
[datetime.strptime('2021-05-30 17:18:15.809000', time_format).replace(tzinfo=timezone.utc), 656.831, 'U235'],
[datetime.strptime('2021-05-30 17:21:15.810000', time_format).replace(tzinfo=timezone.utc), 5768.4822, 'U235'],
[datetime.strptime('2021-05-30 17:24:15.811000', time_format).replace(tzinfo=timezone.utc), 929.028, 'U235'],
[datetime.strptime('2021-05-30 17:27:15.812000', time_format).replace(tzinfo=timezone.utc), 2585.9646, 'U235'],
[datetime.strptime('2021-05-30 17:30:15.813000', time_format).replace(tzinfo=timezone.utc), 358.918, 'U235']]
df = pd.DataFrame(data, columns=columns)
controller = build_flow([
DataframeSource(df, time_field="sample_time", key_field="isotope"),
AggregateByKey([FieldAggregator("samples", "signal", ["count"],
FixedWindows(['15m', '25m', '45m', '1h']))], Table("U235_test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
termination_result = controller.await_termination()
expected = [{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 1.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:42:15.797000+0000', tz='UTC'), 'signal': 790.235,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 2.0, 'samples_count_45m': 2.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 16:45:15.798000+0000', tz='UTC'), 'signal': 498.491,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 3.0, 'samples_count_45m': 3.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 16:48:15.799000+0000', tz='UTC'), 'signal': 34650.00343,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 4.0, 'samples_count_45m': 4.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 16:51:15.800000+0000', tz='UTC'), 'signal': 189.823,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 5.0, 'samples_count_45m': 5.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 16:54:15.801000+0000', tz='UTC'), 'signal': 379.524,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 6.0, 'samples_count_45m': 6.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 16:57:15.802000+0000', tz='UTC'), 'signal': 2225.4952,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 7.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:00:15.803000+0000', tz='UTC'), 'signal': 1049.0903,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 2.0, 'samples_count_45m': 8.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:03:15.804000+0000', tz='UTC'), 'signal': 41905.63447,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 3.0, 'samples_count_45m': 9.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 17:06:15.805000+0000', tz='UTC'), 'signal': 4987.6764,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 4.0, 'samples_count_45m': 10.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 17:09:15.806000+0000', tz='UTC'), 'signal': 67657.11975,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 5.0, 'samples_count_45m': 11.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 17:12:15.807000+0000', tz='UTC'), 'signal': 56173.06327,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 6.0, 'samples_count_45m': 1.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 17:15:15.808000+0000', tz='UTC'), 'signal': 14249.67394,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 7.0, 'samples_count_45m': 2.0, 'samples_count_1h': 7.0,
'sample_time': pd.Timestamp('2021-05-30 17:18:15.809000+0000', tz='UTC'), 'signal': 656.831,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 8.0, 'samples_count_45m': 3.0, 'samples_count_1h': 8.0,
'sample_time': pd.Timestamp('2021-05-30 17:21:15.810000+0000', tz='UTC'), 'signal': 5768.4822,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 9.0, 'samples_count_45m': 4.0, 'samples_count_1h': 9.0,
'sample_time': | pd.Timestamp('2021-05-30 17:24:15.811000+0000', tz='UTC') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
These test the private routines in types/cast.py
"""
import pytest
from datetime import datetime, timedelta, date
import numpy as np
import pandas as pd
from pandas import (Timedelta, Timestamp, DatetimeIndex,
DataFrame, NaT, Period, Series)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
maybe_convert_objects,
cast_scalar_to_array,
infer_dtype_from_scalar,
infer_dtype_from_array,
maybe_convert_string_to_object,
maybe_convert_scalar,
find_common_type)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
PeriodDtype)
from pandas.core.dtypes.common import (
is_dtype_equal)
from pandas.util import testing as tm
class TestMaybeDowncast(object):
def test_downcast_conv(self):
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
assert (np.array_equal(result, arr))
arr = np.array([8., 8., 8., 8., 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
arr = np.array([8., 8., 8., 8., 9.0000000000005])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
# GH16875 coercing of bools
ser = Series([True, True, False])
result = maybe_downcast_to_dtype(ser, np.dtype(np.float64))
expected = ser
tm.assert_series_equal(result, expected)
# conversions
expected = np.array([1, 2])
for dtype in [np.float64, object, np.int64]:
arr = np.array([1.0, 2.0], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected, check_dtype=False)
for dtype in [np.float64, object]:
expected = np.array([1.0, 2.0, np.nan], dtype=dtype)
arr = np.array([1.0, 2.0, np.nan], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected)
# empties
for dtype in [np.int32, np.float64, np.float32, np.bool_,
np.int64, object]:
arr = np.array([], dtype=dtype)
result = | maybe_downcast_to_dtype(arr, 'int64') | pandas.core.dtypes.cast.maybe_downcast_to_dtype |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from scipy import stats as scipy_stats
def estimated_sharpe_ratio(returns):
"""
Calculate the estimated sharpe ratio (risk_free=0).
Parameters
----------
returns: np.array, pd.Series, pd.DataFrame
Returns
-------
float, pd.Series
"""
return returns.mean() / returns.std(ddof=1)
def ann_estimated_sharpe_ratio(returns=None, periods=261, *, sr=None):
"""
Calculate the annualized estimated sharpe ratio (risk_free=0).
Parameters
----------
returns: np.array, pd.Series, pd.DataFrame
periods: int
How many items in `returns` complete a Year.
If returns are daily: 261, weekly: 52, monthly: 12, ...
sr: float, np.array, pd.Series, pd.DataFrame
Sharpe ratio to be annualized, it's frequency must be coherent with `periods`
Returns
-------
float, pd.Series
"""
if sr is None:
sr = estimated_sharpe_ratio(returns)
sr = sr * np.sqrt(periods)
return sr
def estimated_sharpe_ratio_stdev(returns=None, *, n=None, skew=None, kurtosis=None, sr=None):
"""
Calculate the standard deviation of the sharpe ratio estimation.
Parameters
----------
returns: np.array, pd.Series, pd.DataFrame
If no `returns` are passed it is mandatory to pass the other 4 parameters.
n: int
Number of returns samples used for calculating `skew`, `kurtosis` and `sr`.
skew: float, np.array, pd.Series, pd.DataFrame
The third moment expressed in the same frequency as the other parameters.
`skew`=0 for normal returns.
kurtosis: float, np.array, pd.Series, pd.DataFrame
The fourth moment expressed in the same frequency as the other parameters.
`kurtosis`=3 for normal returns.
sr: float, np.array, pd.Series, pd.DataFrame
Sharpe ratio expressed in the same frequency as the other parameters.
Returns
-------
float, pd.Series
Notes
-----
This formula generalizes for both normal and non-normal returns.
https://papers.ssrn.com/sol3/papers.cfm?abstract_id=1821643
"""
if type(returns) != pd.DataFrame:
_returns = pd.DataFrame(returns)
else:
_returns = returns.copy()
if n is None:
n = len(_returns)
if skew is None:
skew = pd.Series(scipy_stats.skew(_returns, nan_policy='omit'), index=_returns.columns)
if kurtosis is None:
kurtosis = pd.Series(scipy_stats.kurtosis(_returns, fisher=False, nan_policy='omit'), index=_returns.columns)
if sr is None:
sr = estimated_sharpe_ratio(_returns)
sr_std = np.sqrt((1 + (0.5 * sr ** 2) - (skew * sr) + (((kurtosis - 3) / 4) * sr ** 2)) / (n - 1))
if type(returns) == pd.DataFrame:
sr_std = pd.Series(sr_std, index=returns.columns)
elif type(sr_std) not in (float, np.float64, pd.DataFrame):
sr_std = sr_std.values[0]
return sr_std
def probabilistic_sharpe_ratio(returns=None, sr_benchmark=0.0, *, sr=None, sr_std=None):
"""
Calculate the Probabilistic Sharpe Ratio (PSR).
Parameters
----------
returns: np.array, pd.Series, pd.DataFrame
If no `returns` are passed it is mandatory to pass a `sr` and `sr_std`.
sr_benchmark: float
Benchmark sharpe ratio expressed in the same frequency as the other parameters.
By default set to zero (comparing against no investment skill).
sr: float, np.array, pd.Series, pd.DataFrame
Sharpe ratio expressed in the same frequency as the other parameters.
sr_std: float, np.array, pd.Series, pd.DataFrame
Standard deviation fo the Estimated sharpe ratio,
expressed in the same frequency as the other parameters.
Returns
-------
float, pd.Series
Notes
-----
PSR(SR*) = probability that SR^ > SR*
SR^ = sharpe ratio estimated with `returns`, or `sr`
SR* = `sr_benchmark`
https://papers.ssrn.com/sol3/papers.cfm?abstract_id=1821643
"""
if sr is None:
sr = estimated_sharpe_ratio(returns)
if sr_std is None:
sr_std = estimated_sharpe_ratio_stdev(returns, sr=sr)
psr = scipy_stats.norm.cdf((sr - sr_benchmark) / sr_std)
if type(returns) == pd.DataFrame:
psr = pd.Series(psr, index=returns.columns)
elif type(psr) not in (float, np.float64):
psr = psr[0]
return psr
def min_track_record_length(returns=None, sr_benchmark=0.0, prob=0.95, *, n=None, sr=None, sr_std=None):
"""
Calculate the MIn Track Record Length (minTRL).
Parameters
----------
returns: np.array, pd.Series, pd.DataFrame
If no `returns` are passed it is mandatory to pass a `sr` and `sr_std`.
sr_benchmark: float
Benchmark sharpe ratio expressed in the same frequency as the other parameters.
By default set to zero (comparing against no investment skill).
prob: float
Confidence level used for calculating the minTRL.
Between 0 and 1, by default=0.95
n: int
Number of returns samples used for calculating `sr` and `sr_std`.
sr: float, np.array, pd.Series, pd.DataFrame
Sharpe ratio expressed in the same frequency as the other parameters.
sr_std: float, np.array, pd.Series, pd.DataFrame
Standard deviation fo the Estimated sharpe ratio,
expressed in the same frequency as the other parameters.
Returns
-------
float, pd.Series
Notes
-----
minTRL = minimum of returns/samples needed (with same SR and SR_STD) to accomplish a PSR(SR*) > `prob`
PSR(SR*) = probability that SR^ > SR*
SR^ = sharpe ratio estimated with `returns`, or `sr`
SR* = `sr_benchmark`
https://papers.ssrn.com/sol3/papers.cfm?abstract_id=1821643
"""
if n is None:
n = len(returns)
if sr is None:
sr = estimated_sharpe_ratio(returns)
if sr_std is None:
sr_std = estimated_sharpe_ratio_stdev(returns, sr=sr)
min_trl = 1 + (sr_std ** 2 * (n - 1)) * (scipy_stats.norm.ppf(prob) / (sr - sr_benchmark)) ** 2
if type(returns) == pd.DataFrame:
min_trl = | pd.Series(min_trl, index=returns.columns) | pandas.Series |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..kabam_exe import Kabam
test = {}
class TestKabam(unittest.TestCase):
"""
Unit tests for Kabam model.
: unittest will
: 1) call the setup method,
: 2) then call every method starting with "test",
: 3) then the teardown method
"""
print("kabam unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for Kabam unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open Kabam qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for Kabam unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_kabam_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty kabam object
kabam_empty = Kabam(df_empty, df_empty)
return kabam_empty
def test_ventilation_rate(self):
"""
:description Ventilation rate of aquatic animal
:unit L/d
:expression Kabam Eq. A5.2b (Gv)
:param zoo_wb: wet weight of animal (kg)
:param conc_do: concentration of dissolved oxygen (mg O2/L)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float')
try:
#use the zooplankton variables/values for the test
kabam_empty.zoo_wb = pd.Series(['nan', 1.e-07, 1.e-4], dtype = 'float')
kabam_empty.conc_do = pd.Series([5.0, 10.0, 7.5], dtype='float')
result = kabam_empty.ventilation_rate(kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_pest_uptake_eff_gills(self):
"""
:description Pesticide uptake efficiency by gills
:unit fraction
"expresssion Kabam Eq. A5.2a (Ew)
:param log kow: octanol-water partition coefficient ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 0.540088, 0.540495], dtype = 'float')
try:
kabam_empty.log_kow = pd.Series(['nan', 5., 6.], dtype = 'float')
kabam_empty.kow = 10.**(kabam_empty.log_kow)
result = kabam_empty.pest_uptake_eff_bygills()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_phytoplankton_k1_calc(self):
"""
:description Uptake rate constant through respiratory area for phytoplankton
:unit: L/kg*d
:expression Kabam Eq. A5.1 (K1:unique to phytoplankton)
:param log kow: octanol-water partition coefficient ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([1639.34426, 8695.6521, 15267.1755], dtype = 'float')
try:
kabam_empty.log_kow = pd.Series([4., 5., 6.], dtype = 'float')
kabam_empty.kow = 10.**(kabam_empty.log_kow)
result = kabam_empty.phytoplankton_k1_calc(kabam_empty.kow)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_aq_animal_k1_calc(self):
"""
U:description ptake rate constant through respiratory area for aquatic animals
:unit: L/kg*d
:expression Kabam Eq. A5.2 (K1)
:param pest_uptake_eff_bygills: Pesticide uptake efficiency by gills of aquatic animals (fraction)
:param vent_rate: Ventilation rate of aquatic animal (L/d)
:param wet_wgt: wet weight of animal (kg)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 1201.13849, 169.37439], dtype = 'float')
try:
pest_uptake_eff_bygills = pd.Series(['nan', 0.0304414, 0.0361228], dtype = 'float')
vent_rate = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float')
wet_wgt = pd.Series(['nan', 1.e-07, 1.e-4], dtype = 'float')
result = kabam_empty.aq_animal_k1_calc(pest_uptake_eff_bygills, vent_rate, wet_wgt)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_water_part_coef(self):
"""
:description Organism-Water partition coefficient (based on organism wet weight)
:unit ()
:expression Kabam Eq. A6a (Kbw)
:param zoo_lipid: lipid fraction of organism (kg lipid/kg organism wet weight)
:param zoo_nlom: non-lipid organic matter (NLOM) fraction of organism (kg NLOM/kg organism wet weight)
:param zoo_water: water content of organism (kg water/kg organism wet weight)
:param kow: octanol-water partition coefficient ()
:param beta: proportionality constant expressing the sorption capacity of NLOM or NLOC to
that of octanol
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([650.87, 11000.76, 165000.64], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_lipid_frac = pd.Series([0.03, 0.04, 0.06], dtype = 'float')
kabam_empty.zoo_nlom_frac = pd.Series([0.10, 0.20, 0.30,], dtype = 'float')
kabam_empty.zoo_water_frac = pd.Series([0.87, 0.76, 0.64], dtype = 'float')
kabam_empty.kow = | pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float') | pandas.Series |
import collections
import os
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from tqdm import tqdm
# competitors = ['Eigen', 'PrimateAI', 'FATHMM-XF', 'ClinPred', 'REVEL', 'M-CAP', 'MISTIC']
competitors = ['InMeRF', 'ClinPred', 'REVEL', 'MISTIC']
def violin_plot_scores(dir, logger):
logger.info('Plotting violin plots distribution')
dict_names = {
'ID': 'ID',
'True_Label': 'True_Label',
'M-CAP_flag': 'M-CAP',
'ClinPred_flag': 'ClinPred',
'REVEL_flag': 'REVEL',
'PrimateAI_flag': 'PrimateAI',
'Eigen-raw_coding_flag': 'Eigen',
'fathmm-XF_coding_flag': 'FATHMM-XF',
'VotingClassifier_proba': 'MISTIC',
}
y_dict = {
'M-CAP': 0.025,
'ClinPred': 0.5,
'REVEL': 0.5,
'PrimateAI': 0.803,
'Eigen': 0,
'FATHMM-XF': 0.5,
'MISTIC': 0.5,
'MISTIC_LR': 0.5,
'GradientBoostingClassifier': 0.5,
'LogisticRegression': 0.5,
'RandomForestClassifier': 0.5,
'MLPClassifier': 0.5,
'GaussianNB': 0.5,
}
thresholds_sup = {
"ClinPred": 0.298126307851977,
"Eigen": -0.353569576359789,
"M-CAP": 0.026337,
"REVEL": 0.235,
"FATHMM-XF": 0.22374,
"PrimateAI": 0.358395427465,
"MISTIC": 0.277,
# "MISTIC" : 0.198003954007379,
}
classifiers = ['Eigen', 'PrimateAI', 'FATHMM-XF', 'ClinPred', 'REVEL', 'M-CAP', 'MISTIC']
pool_df_0 = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo = algos.SelectAll(include_no_data=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[0]] = np.nan
data['c1'].ix[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[0]] = np.nan
data['c1'].ix[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('bt.ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.) | pandas.DataFrame |
import pandas as pd
from modules.locale_generator.data import LocaleOutData
from helper.utils.utils import read_sheet_map_file
class LocaleProcessor:
def __init__(self, language_name, english_column_name):
self.language_name = language_name
self.english_column_name = english_column_name
self.additional_replacer_list = read_sheet_map_file()
def add_translation_if_present(self, df_row):
if self.language_name in list(df_row.index):
if | pd.notnull(df_row[self.language_name]) | pandas.notnull |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 09:35:14 2019
@author: ACN980
"""
import os, glob, sys
import calendar
import pandas as pd
import numpy as np
import math
import warnings
import scipy
import scipy.stats as sp
import scipy.signal as ss
from sklearn.linear_model import LinearRegression
from datetime import date
import matplotlib.pyplot as plt
import itertools
from scipy.interpolate import Rbf
import matplotlib as mpl
warnings.filterwarnings("ignore")
def make_pseudo_obs(var1, var2):
pseudo1 = var1.rank(method='first', ascending = True)/(len(var1)+1)
pseudo2 = var2.rank(method='first', ascending = True)/(len(var2)+1)
return pseudo1, pseudo2
def day_to_month_rad_year(data):
""" Converts the Julian day of a date to radian (to perform directional statistics).
input: data is a univariate series with Timestamp index
output: return a DataFrame with the angle in rad and corresponding x and y coordinate"""
day_of_year = data.apply(lambda x: x.timetuple().tm_yday)
day_of_year.name = 'day_of_yr'
month_of_year = data.apply(lambda x: x.timetuple().tm_mon)
month_of_year.name = 'month_of_yr'
leap_year = data.apply(lambda x: x.is_leap_year)
length_year = data.apply(lambda x: 365)
length_year[leap_year] = 366
length_year.name = 'length_of_yr'
output = pd.concat([data,day_of_year,length_year, month_of_year], axis = 1)
output['angle_rad'] = output['day_of_yr']*2*math.pi/output['length_of_yr']
output = output.assign(**{'x': output.angle_rad.apply(lambda x: math.cos(x))})
output = output.assign(**{'y': output.angle_rad.apply(lambda x: math.sin(x))})
return output
def select_epoch_pairs(cont_data, epoch = 'AS', nbofdays = 5, nbofrepet = 500, test_ind = False):
#epoch = 'AS' #weekly = 'W', daily = 'D', monthly = 'MS'
result_max = pd.DataFrame(data = None)
for window_time in cont_data.groupby(pd.Grouper(freq=epoch)):
if window_time[1].empty: continue
ts_data = pd.DataFrame(window_time[1]) #Just selecting the data
max_pairs = ts_data.max(axis=0).to_frame().transpose()
max_time = ts_data.idxmax(axis = 0).to_frame().transpose()
max_time.rename(columns={max_time.columns[0]: max_time.columns[0]+'_date', max_time.columns[1]: max_time.columns[1]+'_date'}, inplace = True)
result = pd.concat([max_pairs, max_time], axis = 1)
result_max = pd.concat([result_max, result], axis = 0, sort = False)
if test_ind == True:
result_ind_final = pd.DataFrame(data = None, index = np.arange(result_max.shape[0]))
#Random interactions
for j in np.arange(nbofrepet):
date1_2 = np.random.randint(1, nbofdays+1, size = (result_max.shape[0],2))
result_ind = pd.DataFrame(data = abs(date1_2[:,0]-date1_2[:,1]))
result_ind_final = pd.concat([result_ind_final, result_ind], axis = 1)
else:
result_ind_final = []
return (result_max, result_ind_final)
def import_skew(fn2):
dateparse = lambda x: pd.datetime.strptime(x, '%d-%m-%Y %H:%M:%S')
skew = pd.read_csv(fn2, parse_dates = True, date_parser=dateparse, index_col = 'Date', usecols = ['Date','skew '])
skew.rename(columns = {skew.columns[0]:'skew'}, inplace = True)
skew2 = skew.reset_index()
ind_null = skew2[skew2['skew'].isnull()].index.tolist()
for i in ind_null:
skew2.loc[i-1,'skew'] = np.nan
skew2.loc[i+1,'skew'] = np.nan
skew2.set_index('Date', inplace = True)
return skew2
def get_skew_surge(pandas_twl,pandas_tide,distance=6):
'''
Function from <NAME>
The goal of this function is to compute annual maximum skew surge levels
Input variables:
pandas_twl: total water levels time series provided as a pandas dataframe
pandas_tide: tidal levels time series provided as a pandas dataframe
distance: minimum number of timesteps between two tidal minima's.
If not defined, set to 36.
Return:
skew_surge_yearmax: pandas dataframe with annual maximum skew surge levels, sorted by height
'''
#1. reverse tidal levels and find indexes of tide minima's
tide_array_inverse = pandas_tide.waterlevel.values*-1
tide_minima_index, tide_minima_values = ss.find_peaks(tide_array_inverse, distance=distance, height = -10)
tide_time_array = pandas_tide.index.values
peaks_tide_time = tide_time_array[tide_minima_index.tolist()]
#2. find maximum total water level and maximum tidal level between each two tidal minima's
skew_surges=[]
skew_surge_dates=[]
max_tides=[]
high_tide_dates=[]
print('number of timesteps to be processed: ',len(peaks_tide_time)-1)
print('number of timesteps processed: ')
for ii in range(len(peaks_tide_time)-1):
if ii%1000==0:
print(ii)
t1 = peaks_tide_time[ii]
t2 = peaks_tide_time[ii+1]
max_twl = pandas_twl[t1:t2].waterlevel.max()
max_tide = pandas_tide[t1:t2].waterlevel.max()
skew_surges.append(max_twl-max_tide)
max_tides.append(max_tide)
skew_surge_dates.append(pandas_twl[t1:t2].waterlevel.idxmax())
high_tide_dates.append(pandas_tide[t1:t2].waterlevel.idxmax())
#3. create a dataframe of the annual maximum skew surge levels together with the timestamp of the maximum total water level
df = pd.DataFrame(data={'skew_surge':skew_surges},index=skew_surge_dates)
df2 = pd.DataFrame(data={'high_tide':max_tides},index=high_tide_dates)
return df, df2
def collect_rainfall(fn, figure_plotting = False): #####
all_files = glob.glob(os.path.join(fn,'daily_*_rainfall_cleaned.csv'))
result = pd.DataFrame(data = None, index = pd.date_range(start = pd.datetime(1978,1,1), end = pd.datetime(2018,12,31), freq = 'D'))
for file in all_files:
print(file)
rain = pd.read_csv(file, index_col = 'date', dtype={'value':np.float32}, parse_dates = True)
name = file.split('_')[1]
rain.rename(columns={'value':name}, inplace = True)
if figure_plotting == True:
plt.figure()
plt.plot(rain.index, rain[name])
plt.show()
plt.title(name)
plt.ylim(0, 250)
result = pd.merge(result, rain, how = 'outer', left_index = True, right_index = True, sort = True)
result = result.loc[result.index.isin(pd.date_range(start = pd.datetime(1978,1,1), end = pd.datetime(2018,12,31), freq = 'H')),:].copy()
if figure_plotting == True:
result.plot()
plt.show()
cmap = plt.cm.seismic
bounds = np.linspace(-1,1,21)
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
names = result.columns
correlations = result.corr()
# plot correlation matrix
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(correlations, vmin=-1, vmax=1, cmap=cmap, norm=norm)
fig.colorbar(cax)
ticks = np.arange(0,len(names),1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(names)
ax.set_yticklabels(names)
plt.show()
return result
def thiessen_rain(fn_thiessen, rainfall):
weights = pd.read_csv(fn_thiessen, usecols = ['Station','Weight'])
for i in weights.index:
weights.loc[i,'Station'] = weights.loc[i,'Station'].replace(" ", "")
weights = weights.set_index('Station').transpose()
sel_rainfall = rainfall.loc[:,weights.columns]
for col in sel_rainfall.columns:
# print(col)
sel_rainfall[col] = sel_rainfall[col].apply(lambda x: x*weights.loc[weights.index[0],col])
thiessen_rainfall = pd.DataFrame(sel_rainfall.sum(axis = 1))
thiessen_rainfall.rename(columns={thiessen_rainfall.columns[0]:'Thiessen_sum'}, inplace = True)
return thiessen_rainfall
def calc_avg_max_min_rainfall(result, threshold=40): ####
rainfall_years = pd.DataFrame(data = None, index = pd.date_range(start = pd.datetime(1978,1,1), end = pd.datetime(2018,12,31), freq = 'D'))
for col in result.columns:
print(col)
ts_rain, years_removed = keep_full_years(sel = result[col].copy(), threshold = threshold)
ts_rain = ts_rain.fillna(0)
rainfall_years = pd.merge(rainfall_years,ts_rain,how = 'outer', left_index = True, right_index = True, sort = True)
res_isna = rainfall_years.isna().sum(axis=1)
average = rainfall_years.where(res_isna<=3).mean(axis=1)
max_values = rainfall_years.where(res_isna<=3).max(axis=1)
min_values = rainfall_years.where(res_isna<=3).min(axis=1)
rainfall_years['average'] = average
rainfall_years['maximum'] = max_values
rainfall_years['minimum'] = min_values
return rainfall_years
def import_monthly_rain(fn2):
allfiles = glob.glob(os.path.join(fn2, 'NewRain\TRENDS\MONTH_CORRECTED', 'Thiessen_*.csv'))
all_rain = pd.DataFrame(data=None)
for file in allfiles:
month = pd.read_csv(file, index_col = 'Year', parse_dates=True)
month.rename(columns={month.columns[0]:'Thiessen'}, inplace = True)
all_rain = pd.concat([all_rain, month], axis = 0)
return all_rain
def collect_swl(fn, figure_plotting = False):
all_files = glob.glob(os.path.join(fn,'hourly_*_swl_cleaned.csv'))
result = pd.DataFrame(data = None, index = pd.date_range(start = pd.datetime(1980,1,1), end = pd.datetime(2018,12,31), freq = 'H'))
for file in all_files:
print(file)
rain = pd.read_csv(file, index_col = 'date', dtype={'value':np.float32}, parse_dates = True)
name = file.split('_')[1]
rain.rename(columns={rain.columns[0]:name}, inplace = True)
if figure_plotting == True:
plt.figure()
plt.plot(rain.index, rain[name])
plt.show()
plt.title(name)
# plt.ylim(0, 250)
result = pd.merge(result, rain, how = 'outer', left_index = True, right_index = True, sort = True)
result = result.loc[result.index.isin(pd.date_range(start = pd.datetime(1978,1,1), end = pd.datetime(2018,12,31), freq = 'H')),:].copy()
if figure_plotting == True:
result.plot()
return result
def keep_full_years(sel, threshold): ####
"""
> sel: is a time-series of the rainfall with a datetime as index
> threshold: is the minimum number of days to consider a year valid. Here this is somewhat
ambiguous what a good threshold is as there might be a lot of 0 if it doesn't rain in a year
"""
check = sel.groupby(sel.index.map(lambda x: x.year)).count()
years_to_remove = check.where(check<threshold).dropna().index.values
ts = pd.DataFrame(data = sel.copy())
ts.index.rename('date', inplace = True)
ts.reset_index(drop = False, inplace = True)
ts['year'] = ts.date.dt.year
ts = ts.set_index('year').drop(labels = years_to_remove).set_index('date')
return ts, years_to_remove
#%%
def median_detrend_wNa(data, tot_periods, min_periods, figure_plotting = False):
"""Removes trends and SL variation by substracting the moving median
tot_periods is the number of steps considered to calculate the median
min_periods is the minimum number of periods considered to calculate the median"""
inland_day_median = data.rolling(tot_periods, min_periods=min_periods, center=False).median()
inland_day_median = inland_day_median.fillna(method='ffill').copy()
inland_day_median = inland_day_median.fillna(method='bfill').copy()
inland_day_detrend = data - inland_day_median
inland = inland_day_detrend.copy()
if figure_plotting == True:
plt.figure()
inland_day_median.plot()
plt.show()
f, ax = plt.subplots(nrows=len(data.columns), ncols=2, sharex=True)
ax = ax.reshape(-1)
for i in np.arange(len(data.columns)):
print(i)
ax[int(i*2)].plot(data.index, data.iloc[:,i], '-k', inland_day_median.index, inland_day_median.iloc[:,i], '-r')
ax[int((i*2)+1)].plot(inland.index, inland.iloc[:,i], '-b')
plt.show()
plt.figure()
inland.plot()
plt.show()
return inland
def lin_detrend_wNa(data, ref_date, remove_means = True, figure_plotting = False):
"""arguments:
data is a pd.Series with date as index
ref_date: if a date is mentioned, remove trend taking the swl on this date as ref
remove_means: if True, centers the detrended ts around 0
figure_plotting: if True returns a figure of both ts
returns:
the linearly detrended data with time as index"""
y = np.array(data)
x = np.arange(0,len(y),1)
not_nan_ind = ~np.isnan(y)
m, b, r_val, p_val, std_err = sp.linregress(x[not_nan_ind],y[not_nan_ind])
if remove_means == True:
detrend_y = y - (m*x + b)
elif ref_date is not None:
x_0 = np.flatnonzero(data.index == ref_date)
detrend_y = y - (m*x + b) + (m * x_0 + b)
else:
detrend_y = y - (m*x)
print('Linear trend is: ', m)
print('p-value is: ', p_val)
if figure_plotting == True:
plt.figure()
plt.plot(x, y, label = 'original')
plt.plot(x, detrend_y, label = 'detrended')
plt.legend()
result = pd.DataFrame(data = detrend_y, index = data.index, columns = [data.name])
return result
#%% TOP EVENTS
def top_n_events_per_year_tide(x, n_top, label_value = 'tide', time_frequency = 'AS'):
x=pd.DataFrame(x, columns=[label_value])
x.rename(columns={x.columns.values[0]:label_value}, inplace = True)
x.index.rename('index', inplace = True)
y= x.groupby(pd.Grouper(freq=time_frequency)).apply(lambda g: g.nlargest(n = n_top, columns = label_value))
res = pd.DataFrame(y)
res['year'] = [i[0].year for i in res.index]
res['date'] = [i[1] for i in res.index]
# res.reset_index(inplace=True, drop = True)
return res
def top_n_events_per_year_rainfall(x, n_top, label_value = 'tide', time_frequency = 'AS'):
x.rename(columns={x.columns.values[0]:label_value}, inplace = True)
x.index.rename('index', inplace = True)
y= x.groupby(pd.Grouper(freq=time_frequency)).apply(lambda g: g.nlargest(n = n_top, columns = label_value))
res = pd.DataFrame(y)
res['year'] = [i[0].year for i in res.index]
res['date'] = [i[1] for i in res.index]
res.reset_index(inplace=True, drop = True)
return res
#%% FFT SKEW
def detrend_fft(daily_skew, fillnavalue=0, frequency = 1. / 365, figure_plotting = 0):
"""Takes a ts with no Nan and continuous time series
frequency is the corresponding frequency of the index in year (daily --> 1/365)"""
import scipy.fftpack
skew_day = daily_skew.fillna(fillnavalue)
skew_values = skew_day.iloc[:,0].copy()
skew_fft = scipy.fftpack.fft(np.array(skew_values))
skew_psd = np.abs(skew_fft) ** 2 #Taking the power spectral density
fftfreq = scipy.fftpack.fftfreq(len(skew_psd), frequency)
i = fftfreq > 0 #only taking positive frequencies
temp_fft_bis = skew_fft.copy()
temp_fft_bis[np.abs(fftfreq) > 1.0] = 0 # temp_fft_bis[np.abs(fftfreq) > 1.1] = 0
skew_slow = np.real(scipy.fftpack.ifft(temp_fft_bis))
daily_skew = pd.DataFrame(daily_skew.iloc[:,0] - skew_slow)
#skew_slow = pd.DataFrame(index=daily_skew.index, data=skew_slow)
#daily_skew_runmean = skew_day - skew_day.rolling(365, min_periods=150, center=True).mean()
if figure_plotting == 1:
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(fftfreq[i], 10 * np.log10(skew_psd[i]))
ax.set_xlim(0, 5)
ax.set_xlabel('Frequency (1/year)')
ax.set_ylabel('PSD (dB)')
fig, ax = plt.subplots(1, 1, figsize=(6, 3))
ax.plot(skew_day.index, skew_day, '-b', lw = 0.5)#skew_day.plot(ax=ax, lw=.5)
ax.plot(skew_day.index, skew_slow, '-r', lw = 2)
ax.plot(skew_day.index, skew_day.rolling(365, min_periods=150, center=True).mean(), '-g', lw = 1.5)
ax.set_xlabel('Date')
ax.set_ylabel('Skew surge')
plt.show()
fig, ax = plt.subplots(1, 1, figsize=(6, 3))
ax.plot(daily_skew.index, daily_skew.iloc[:,0],'-b', lw = 0.5)
ax.set_xlabel('Date')
ax.set_ylabel('Skew surge')
plt.show()
return daily_skew
def remove_NaN_skew(skew):
isna_skew = skew[skew[skew.columns[0]].isnull()].index
for na_date in isna_skew:
# print(na_date)
i_ind = np.flatnonzero(skew.index == na_date)
bef = i_ind - 1
aft = i_ind + 1
if bef>0:
skew.iloc[bef,0] = np.nan
if aft < len(skew):
skew.iloc[aft,0] = np.nan
return skew.copy()
def import_monthly_skew(fn):
date_parser = lambda x: pd.datetime.strptime(x, "%d-%m-%Y %H:%M:%S")
fn_skew = os.path.join(fn,'skew_WACC_VungTau_Cleaned_Detrended_Strict_sel_const.csv')
skew = pd.read_csv(fn_skew, parse_dates = True, date_parser= date_parser, index_col = 'Date')
skew.rename(columns = {skew.columns[0]:'skew'}, inplace = True)
skew = remove_NaN_skew(skew)
skew_day = skew.resample('D').max()
skew_detrend = detrend_fft(skew_day, fillnavalue=0, frequency = 1./(2*365), figure_plotting =0)
skew_detrend_day = skew_detrend.resample('D').max()
skew_month = skew_detrend_day.resample('M').max()
return skew_month
#%%
def extract_MM(tide, freq='MS', label='sealevel'):
sel_sel = pd.concat([tide,tide], axis = 1).dropna()
dates_tide = select_epoch_pairs(sel_sel, epoch = freq, nbofdays = 5, nbofrepet = 500, test_ind = False)
dates_MM_tide = dates_tide[0].reset_index(drop=True).iloc[:,[0,-1]]
dates_MM_tide['index'] = [pd.to_datetime(date(d.year, d.month, calendar.monthrange(d.year, d.month)[-1])) for d in dates_MM_tide[f'{label}_date']]
dates_MM_tide[f'{label}_date'] = [pd.to_datetime(date(d.year, d.month, d.day)) for d in dates_MM_tide[f'{label}_date']]
dates_MM_tide.set_index('index',inplace = True)
return dates_MM_tide
def make_cmap_month():
# COLORSCALE
# get discrete colormap
n_clusters = 15
cmap = plt.get_cmap('hsv', n_clusters)
colors = cmap(np.linspace(0.05, 0.90, 13))
cmap2 = mpl.colors.ListedColormap(colors)
bounds = np.arange(1,14,1)
norm = mpl.colors.BoundaryNorm(bounds, cmap2.N)
bounds_day = np.arange(1,366,1)
norm_day = mpl.colors.BoundaryNorm(bounds_day, cmap2.N)
return cmap2, norm
def ax_joint_mm(var1, var2, ax, label='_date', lag_joint=0, ls=7, formatting = True, plotting=True):
var1_name = var1.columns[~var1.columns.str.endswith(label)][0]
var2_name = var2.columns[~var2.columns.str.endswith(label)][0]
var1_date = var1_name+label
var2_date = var2_name+label
both = pd.concat([var1, var2], axis = 1).dropna()
both.reset_index(inplace = True, drop = True)
Joint_MM = both[[var1_date,var2_date]].copy()
Joint_MM['diff_days'] = Joint_MM.loc[:,var1_date]-Joint_MM.loc[:,var2_date]
Joint_MM['abs_days'] = np.abs(Joint_MM['diff_days'].dt.days)
# Joint_MM.reset_index(drop=True, inplace = True)
Joint_MM = pd.concat([both, Joint_MM[['diff_days','abs_days']]], axis = 1)
joint_points_MM = Joint_MM.where(Joint_MM.abs_days < lag_joint+1).dropna()
if len(joint_points_MM)>0:
time_of_year = day_to_month_rad_year(data = joint_points_MM.loc[:,var1_date])
time_of_year.rename(columns={time_of_year.columns[0]:'date'}, inplace = True)
time_of_year = time_of_year.set_index('date').reset_index()
cmap2, norm = make_cmap_month()
if plotting == True:
ax.scatter(both.loc[:,var1_name], both.loc[:,var2_name], marker = 'o', c = 'white', edgecolors='k', linewidths=0.3, alpha = 0.5, s=6)
if len(joint_points_MM)>0:
ax.scatter(joint_points_MM.loc[:,var1_name], joint_points_MM.loc[:,var2_name], marker = 'o', edgecolors ='k', linewidths=0.3, c = time_of_year['month_of_yr'], cmap=cmap2, alpha = 1, s=15, norm=norm)
if formatting == True:
ax.set_xlabel(var1_name,fontsize=ls)
ax.set_ylabel(var2_name,fontsize=ls)
ax.tick_params(axis='both', labelsize=ls)
return Joint_MM
def joint_mm_all_cooc(Joint_MM, max_lag = 7, label = '_date'):
var1_name = Joint_MM.columns[~Joint_MM.columns.str.endswith(label)][0]
var2_name = Joint_MM.columns[~Joint_MM.columns.str.endswith(label)][1]
var1_date = var1_name+label
var2_date = var2_name+label
var1_result ={}
month = np.arange(1,13,1)
dates_month = day_to_month_rad_year(data = Joint_MM.loc[:,var1_date])
for m in month:
print(m)
var1_result[m] ={}
sel = Joint_MM.where(dates_month.month_of_yr == m).dropna().copy()
var1_result[m]['data'] = sel
corr_sel_MM = sp.kendalltau(sel.loc[:,var2_name].values, sel.loc[:,var1_name].values, nan_policy='omit')
var1_result[m]['data_corr'] = corr_sel_MM
co_occur_n_samples = pd.DataFrame(data = None, index = ['N'], columns = np.arange(0,max_lag+1))
for lag_joint in np.arange(0,max_lag+1):
joint_points_sel = sel.where(sel.abs_days < lag_joint+1).dropna()
co_occur_n_samples.loc['N',lag_joint] = len(joint_points_sel)
var1_result[m]['co_occur_n_samples'] = co_occur_n_samples
return var1_result
def joint_mm_permonth(Joint_MM, lag_joint=0, label = '_date'):
var1_name = Joint_MM.columns[~Joint_MM.columns.str.endswith(label)][0]
var2_name = Joint_MM.columns[~Joint_MM.columns.str.endswith(label)][1]
var1_date = var1_name+label
var2_date = var2_name+label
var1_result ={}
month = np.arange(1,13,1)
dates_month = day_to_month_rad_year(data = Joint_MM.loc[:,var1_date])
for m in month:
print(m)
var1_result[m] ={}
sel = Joint_MM.where(dates_month.month_of_yr == m).dropna().copy()
var1_result[m]['data'] = sel
corr_sel_MM = sp.kendalltau(sel.loc[:,var2_name].values, sel.loc[:,var1_name].values, nan_policy='omit')
var1_result[m]['data_corr'] = corr_sel_MM
joint_points_sel = sel.where(sel.abs_days < lag_joint+1).dropna()
if len(joint_points_sel)>0:
time_of_year = day_to_month_rad_year(data = joint_points_sel.loc[:,var1_date])
joint_points_sel = pd.concat([joint_points_sel, time_of_year['month_of_yr']], axis = 1)
try:
corr_joint_points_sel = sp.kendalltau(joint_points_sel.loc[:,var2_name].values, joint_points_sel.loc[:,var1_name].values, nan_policy='omit')
except:
corr_joint_points_sel = np.nan
var1_result[m]['co_occur_data'] = joint_points_sel
var1_result[m]['co_occur_corr'] = corr_joint_points_sel
var1_result[m]['co_occur_n_samples'] = len(joint_points_sel.dropna())
return var1_result
def plot_cooc_CI(result_pair, ax, lag_joint =0, c = 'r', size = 5, label = None, background = True):
fm = os.path.join(r'E:\surfdrive\Documents\Master2019\Thomas\data\Binomial')
#month_label = ['Jan', 'Feb', 'Mar', 'Apr', 'May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
month_label = ['J', 'F', 'M', 'A', 'M','J','J','A','S','O','N','D']
len_month = [31,28,31,30,31,30,31,31,30,31,30,31]
all_exp = pd.DataFrame()
q50=pd.DataFrame()
q2=pd.DataFrame()
q97=pd.DataFrame()
nb_years = pd.DataFrame(index = np.arange(1,13,1), columns=['nb'])
obs_cooc = pd.DataFrame(index = np.arange(1,13,1), columns = np.arange(0,8,1))
for i in np.arange(1,13,1):
obs_cooc.loc[i,:] = result_pair[i]['co_occur_n_samples'].loc['N',:]
nb_years.loc[i,'nb'] = 37#len(result_pair[i]['data'])
#We read the table
for i in nb_years.index:
print(i)
case = os.path.join(str(len_month[i-1])+'days',str(nb_years.loc[i, 'nb'])+'years')
data = pd.read_csv(os.path.join(fm,case,'Independent_Binomial_Expectation.csv'), index_col = 'index')
data.rename(index={'expectation':i}, inplace = True)
all_exp = pd.concat([all_exp, data], axis = 0)
ci_data = pd.read_csv(os.path.join(fm,case,'Independent_Binomial_Expectation_CI.csv'), index_col = 'quantile')
ci_data.rename(index={'quantile':i}, inplace = True)
q2 = pd.concat([q2,pd.DataFrame(ci_data.loc['q2.5',:]).transpose().rename(index={'q2.5':i})], axis = 0)
q50 = pd.concat([q50,pd.DataFrame(ci_data.loc['q50',:]).transpose().rename(index={'q50':i})], axis = 0)
q97 = pd.concat([q97,pd.DataFrame(ci_data.loc['q97.5',:]).transpose().rename(index={'q97.5':i})], axis = 0)
# f,ax = plt.subplots(nrows=1, ncols = 1, figsize=(8,3))
#ax = ax.reshape(-1)
if background:
lw=1.3
# ax.fill_between(np.arange(1,13,1),q2.loc[:,str(lag_joint)].values, q97.loc[:,str(lag_joint)].values, color = 'k', alpha = 0.3)
ax.plot(all_exp.index, all_exp.loc[:,str(lag_joint)], '--', color = 'k', linewidth = lw) #'*', mec = 'k', mfc = 'k', markersize = size/1.5)
length = size
space = size
# if c == 'y':
# c='orange'
# lw = 1
# length = 5
# space = 10
ax.plot(q2.index, q2.loc[:,str(lag_joint)], ':', color = 'k', linewidth = lw)#, dashes=(size/2, size/2)) #length of 5, space of 1
ax.plot(q97.index, q97.loc[:,str(lag_joint)], ':', color = 'k', linewidth = lw)#, dashes=(length/2, space/2)) #length of 5, space of 1)
ax.grid(lw=0.5)
ax.plot(obs_cooc.index, obs_cooc.loc[:,lag_joint], 'o', markersize = size, mfc = c, mec='k', mew=0.5)
ax.annotate(label, (0.05,0.90), xycoords='axes fraction', fontsize=8, weight="bold")
ax.set_xlim(0.7,12.3)
ax.set_ylim(-0.2,6)
ax.set_yticks(np.arange(0,7,1))
ax.set_xticks(np.arange(1,13,1))
ax.set_xticklabels(month_label, fontsize = 7)
def kendall_CI(bs_data, var1_name = 'Thiessen', var2_name='skew', label='_date', iterations = 500):
#Calculate kendall CI
kend_bs = pd.Series(index = np.arange(iterations))
for x in np.arange(iterations):
rand1 = bs_data[var1_name].sample(n=bs_data.shape[0], replace=True, axis=0)
kend_bs[x] = sp.kendalltau(rand1.values, bs_data.loc[:,var2_name].values, nan_policy='omit')[0]
kend_025 = kend_bs.quantile(q=0.025, interpolation='linear')
kend_975 = kend_bs.quantile(q=0.975, interpolation='linear')
return kend_025, kend_975
def kendall_CI_allmonth(result_pair, var1_name = 'Thiessen', var2_name='skew', label='_date', iterations = 500):
kend_025 = pd.DataFrame(index=np.arange(1,13,1), columns=['q2.5'])
kend_975 = pd.DataFrame(index=np.arange(1,13,1), columns=['q97.5'])
for i in np.arange(1,13,1):
bs_data = result_pair[i]['data']
kend_025.loc[i,'q2.5'], kend_975.loc[i,'q97.5']= kendall_CI(bs_data, var1_name = var1_name, var2_name=var2_name, label=label, iterations = iterations)
return kend_025, kend_975
def ax_kendall_mm(result_pair, ax, var1_name = 'Thiessen', var2_name='skew', label='_date', iterations = 500, c = 'k', size = 7, background=True):
month_label = ['Jan', 'Feb', 'Mar', 'Apr', 'May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
#SKEW-RAIN
kendall = pd.DataFrame(index = np.arange(1,13,1), columns=['kendall', 'p_value'])
for i in np.arange(1,13,1):
kendall.loc[i,'kendall'] = sp.kendalltau(result_pair[i]['data'].loc[:,var1_name].values, result_pair[i]['data'].loc[:,var2_name].values, nan_policy='omit')[0]
kendall.loc[i,'p_value'] = sp.kendalltau(result_pair[i]['data'].loc[:,var1_name].values, result_pair[i]['data'].loc[:,var2_name].values, nan_policy='omit')[1]
if background == True:
lw=1.3
length = size
space = size
if c == 'y':
c='orange'
lw = 1
length = 5
space = 10
k25, k975 = kendall_CI_allmonth(result_pair, var1_name = var1_name, var2_name = var2_name, label = label, iterations = iterations)
ax.plot(k25.index, k25.loc[:,'q2.5'], '--', color = c, linewidth = lw, dashes=(size, size))
ax.plot(k975.index, k975.loc[:,'q97.5'], '--', color = c, linewidth = lw, dashes=(length, space))
ax.axhline(0, color = 'black', lw=1)# , xmin=1, xmax=12, color = 'k', lw=0.5, ls='-')
ax.plot(kendall.index, kendall.loc[:,'kendall'], 'o', markersize = size, color = c, mfc = c, mec='k', mew=0.5)
ax.set_xlim(0.9,12.1)
ax.set_xticks(np.arange(1,13,1))
ax.set_xticklabels(month_label, fontsize = 7)
#%%
def get_samples_from_dist(n, params, dist_type):
dist = getattr(sp, dist_type)
if len(params) == 3: #shape, loc and scale
data = dist.rvs(params[0], params[1], params[2], n)
elif len(params) == 2:#loc and scale
data = dist.rvs(params[0], params[1], n)
elif len(params) == 1:
data = dist.rvs(params[0], n)
else:
print('Something is wrong!')
return data
def get_ICDF_from_dist(q, params, dist_type):
dist = getattr(sp, dist_type)
if len(params) == 3: #shape, loc and scale
data = dist.ppf(q, params[0], params[1], params[2])
elif len(params) == 2:#loc and scale
data = dist.ppf(q, params[0], params[1])
elif len(params) == 1:
data = dist.ppf(q, params[0])
else:
print('Something is wrong!')
return data
def get_line_pt_RP_fit(exc_prob_x, params, dist_type):
dist = getattr(sp, dist_type)
if len(params) == 3: #shape, loc and scale
rp_y = dist.isf(exc_prob_x, params[0], params[1], params[2])
elif len(params) == 2:#loc and scale
rp_y = dist.isf(exc_prob_x, params[0], params[1])
elif len(params) == 1:
rp_y = dist.isf(exc_prob_x, params[0])
else:
print('Something is wrong!')
return rp_y
def get_RPs(return_periods, params, dist_type):
return_periods_col = [str(i) for i in return_periods]
dist = getattr(sp, dist_type)
if len(params) == 3:
a = dist.isf(1./return_periods, params[0], params[1], params[2])
elif len(params) == 2:
a = dist.isf(1./return_periods, params[0], params[1])
elif len(params) == 1:
a = dist.isf(1./return_periods, params[0])
else:
print('Something is wrong!')
RP_EVAs = pd.Series(a, index = return_periods_col)
return RP_EVAs
def empirical_RP(data):
#Calculating empirical
emp_p = pd.DataFrame(data=data)
emp_p['rank'] = emp_p.iloc[:,0].rank(axis=0, ascending=False)
emp_p['exc_prob'] = emp_p['rank']/(emp_p['rank'].size+1) #change this line with what Anaïs sends to me, but is already correct
emp_p['cum_prob'] = 1 - emp_p['exc_prob']
emp_p['emp_rp'] = 1/emp_p['exc_prob']
return emp_p
# def get_line_pt_RP_fit(exc_prob_x, data, params, dist_type):
# dist = getattr(sp, dist_type)
# if len(params) == 3: #shape, loc and scale
# #print('Skew param ', f.fitted_param[dist_type][0])
# print('Check param ', params)
# inv_cdf_dist = dist.sf(data, params[0], params[1], params[2])
# rp_y = dist.isf(exc_prob_x, params[0], params[1], params[2])
# elif len(params) == 2:#loc and scale
# inv_cdf_dist = dist.sf(data, params[0], params[1])
# rp_y = dist.isf(exc_prob_x, params[0], params[1])
# elif len(params) == 1:
# inv_cdf_dist = dist.sf(data, params[0])
# rp_y = dist.isf(exc_prob_x, params[0])
# else:
# print('Something is wrong!')
# return inv_cdf_dist, rp_y
#%%
def plot_damage_grid(damage_grid, alphas, ax, rstride, ctride, cmap, norm):
coords, dam = damage_surface_coord_z(damage_grid)
RBFi = Rbf(coords[:,0], coords[:,1], dam, function='linear', smooth=0)
rain_int = list(np.arange(0,int(damage_grid.index.max())+10, 10))
sl_int = list(np.arange(0,int(damage_grid.columns.max())+100,100))
all_S = np.array([ x for x in itertools.product(rain_int,sl_int)])
all_dam = RBFi(all_S[:,0], all_S[:,1])
X, Y = np.meshgrid(rain_int, sl_int, indexing='ij')
damage_grid_plot = damage_surface_df_z(all_S, all_dam)
Z = damage_grid_plot.to_numpy()
damage_grid_scenario = damage_grid.drop(0,axis=1)
coords_sce, dam_sce = damage_surface_coord_z(damage_grid_scenario)
# fig = plt.figure(figsize=[8.5, 4])
# gs = GridSpec(2, 2, left=0.05, bottom=0.1, right=0.95, top=0.90, width_ratios=[1,1], height_ratios=[1,1], wspace=0.40, hspace=0.50)#, width_ratios=None, height_ratios=[0.9,0.9,0.9,0.9,0.9,0.9])
# ax = fig.add_subplot(gs[:, 0], projection='3d', azim=-60, elev=25)
ax.plot_wireframe(X, Y/1000, Z/1e6, color='grey',linewidth=1, antialiased=True, rstride=rstride, cstride=ctride, zorder=1, alpha=0.5) #plot_surface
# alphas = np.linspace(0.2,1,len(damage_grid_scenario.columns))
for i in np.arange(0,len(damage_grid_scenario.columns)):
print(i)
ax.scatter(damage_grid_scenario.iloc[:,i].index, np.repeat(damage_grid_scenario.columns[i]/1000, len(damage_grid_scenario.iloc[:,i])),damage_grid_scenario.iloc[:,i].values/1e6, c=damage_grid_scenario.iloc[:,i].index, s = 35, edgecolors='k', linewidths=0, alpha=alphas[i], cmap=cmap, norm=norm, zorder=10, depthshade=False) #alpha=alphas[i],
ax.scatter(damage_grid_scenario.iloc[:,i].index.values, np.repeat(damage_grid_scenario.columns[i]/1000, len(damage_grid_scenario.iloc[:,i])),damage_grid_scenario.iloc[:,i].values/1e6, facecolor=(0,0,0,0), s = 35, edgecolor='k', linewidths=1, depthshade=False, zorder=11)
#ax.plot_wireframe(xv, yv/1000, Z/1e6, color='black',linewidth=0.2)
ax.set_xlabel('Rainfall (mm/day)', size = 8)
ax.set_ylabel('Sea Level (m)', size = 8)
ax.set_zlabel('Damage (M$)', size = 8)
def damage_surface_coord_z(damage_grid):
coords = np.zeros((damage_grid.shape[0]*damage_grid.shape[1],2))
dam = np.zeros((damage_grid.shape[0]*damage_grid.shape[1],1))
z = 0
for i in damage_grid.index: #rain
# print(i)
for j in damage_grid.columns: #sea
# print(j)
coords[z,:] = [i, j]
dam[z] = damage_grid.loc[i,j]
z += 1
return coords, dam
def damage_surface_df_z(coords, dam):
rain = np.unique(coords[:,0])
sl = np.unique(coords[:,1])
Z = pd.DataFrame(index = rain, columns=sl, data=dam.reshape(len(rain), len(sl)))
return Z
def add_extra_sealevel(i_extra_sealevel, damage_grid, drop_i=[]):
new_damage_sl = pd.DataFrame(data=None, index=[i_extra_sealevel], columns = damage_grid.index)
for i_rain in damage_grid.index:
print(i_rain)
if len(drop_i)>0:
sel = damage_grid.drop(drop_i, axis = 1).loc[i_rain,:]
else:
sel = damage_grid.loc[i_rain,:]
X=sel.index.values.reshape(-1, 1)
Y =sel.values.reshape(-1,1)
linear_regressor = LinearRegression() # create object for the class
linear_regressor.fit(X, Y) # perform linear regression
Y_pred = linear_regressor.predict(np.array(i_extra_sealevel).reshape(1,-1)) # make predictions
new_damage_sl.loc[i_extra_sealevel, i_rain] = Y_pred
new_damage_sl = new_damage_sl.astype(float)
return new_damage_sl
def add_extra_rain(i_extra_rain, damage_grid, drop_i=[]):
new_damage_rain = pd.DataFrame(data=None, index=[i_extra_rain], columns = damage_grid.columns)
for i_sl in damage_grid.columns:
print(i_sl)
if len(drop_i)>0:
sel = damage_grid.drop(drop_i, axis = 0).loc[:,i_sl]
else:
sel = damage_grid.loc[:,i_sl]
X=sel.index.values.reshape(-1, 1)
Y =sel.values.reshape(-1,1)
linear_regressor = LinearRegression() # create object for the class
linear_regressor.fit(X, Y) # perform linear regression
Y_pred = linear_regressor.predict(np.array(i_extra_rain).reshape(1,-1)) # make predictions
new_damage_rain.loc[i_extra_rain, i_sl] = float(Y_pred) #f(i_extra_rain)
#sel = new_damage_rain.drop([0,60,120,max_rain], axis=1).loc[i_sl, :]
new_damage_rain = new_damage_rain.astype(float)
return new_damage_rain
def load_damage(fn_trunk, fn_files, max_rain, max_sl, thr_rain, thr_sl):
damage = pd.read_csv(os.path.join(fn_trunk, fn_files,'summary_damage_cases.csv'), index_col = 'landuse')
damage_tot = damage.sum(axis = 0)
rain = [np.int(col.split('_')[1].strip('R')) for col in damage.columns]
sea = [np.int(col.split('_')[2].strip('H')) for col in damage.columns]
damage_grid = pd.DataFrame(index=np.unique(rain), columns = np.unique(sea), data=None)
for value in damage.columns:
# print(value)
i_rain = np.int(value.split('_')[1].strip('R'))
i_sea = np.int(value.split('_')[2].strip('H'))
damage_grid.loc[i_rain,i_sea] = damage_tot[value]
damage_grid = damage_grid.astype(float)
#Extrapolation
new_damage_sl_high = add_extra_sealevel(max_sl, damage_grid, drop_i=[610,860,1110])
new_damage_sl_low = add_extra_sealevel(0, damage_grid, drop_i=[1110, 1360,1610,1860])
damage_grid = pd.concat([damage_grid, new_damage_sl_high.transpose(), new_damage_sl_low.transpose()], axis = 1)
damage_grid.sort_index(axis = 1, inplace = True)
new_damage_rain = add_extra_rain(max_rain, damage_grid, drop_i=[0,60,120])
damage_grid = pd.concat([damage_grid, new_damage_rain], axis = 0)
new_damage_rain_0 = add_extra_rain(180, damage_grid, drop_i=[0,60,180,300])
damage_grid.loc[180,0] = new_damage_rain_0.loc[180,0]
damage_grid.sort_index(inplace = True)
del new_damage_rain, new_damage_sl_high, new_damage_sl_low, new_damage_rain_0
damage_grid = damage_grid.astype(float)
#Setting threshold
coords, dam = damage_surface_coord_z(damage_grid)
new_damage_rain = [float(scipy.interpolate.griddata(coords, dam, (thr_rain,sl), method = 'linear')) for sl in damage_grid.columns]
new_line = pd.DataFrame(data=np.array(new_damage_rain), index = damage_grid.columns, columns=[thr_rain])
damage_grid = pd.concat([damage_grid, new_line.transpose()], axis = 0)
coords, dam = damage_surface_coord_z(damage_grid)
new_damage_sl = [float(scipy.interpolate.griddata(coords, dam, (i_rain,thr_sl), method = 'linear')) for i_rain in damage_grid.index]
new_line = pd.DataFrame(data=np.array(new_damage_sl), index = damage_grid.index, columns=[thr_sl])
damage_grid = pd.concat([damage_grid, new_line], axis = 1)
# damage_grid[0] = damage_grid.loc[:,610]
damage_grid.sort_index(inplace = True)
damage_grid.sort_index(axis = 1, inplace = True)
damage_grid = damage_grid.astype(float)
return damage_grid
def simulate_rain(rain_simcdf, params, dist_type):
rain_rvs = get_ICDF_from_dist(rain_simcdf, params, dist_type)
rain_rvs = np.reshape(rain_rvs, rain_rvs.shape[0])
rain_rvs[rain_rvs<0]=0
return rain_rvs
def simulate_skew(cdf_swl_rvs, params_skew, dist_type_skew):
skew_rvs = get_ICDF_from_dist(cdf_swl_rvs, params_skew, dist_type_skew)
skew_rvs = np.reshape(skew_rvs, skew_rvs.shape[0]) * 1000
return skew_rvs
def sample_tide(month, fn_tide, n):
tide_sim = pd.read_csv(os.path.join(fn_tide, 'samples_tide_month_{}.csv'.format(str(month))), usecols=['tide'])
#tide_sim.hist(bins=100)
# # #################################################################################################################################
# #Selected mean = 0.86
# mean = 0.86
# std = 0.02
# tide_sim = np.random.normal(loc=mean, scale=std, size = 50000)
# tide_sim = pd.DataFrame(tide_sim)
# #tide_sim.hist(bins=100)
# ## #tide_sim.hist(bins=100)
# ## ref = tide_sim/tide_sim.max()
# ## tide_sim = tide_sim*np.exp(-ref)
# ## #tide_sim.hist(bins=100)
# ## ###################################################################################################################################
tide_rvs = tide_sim.sample(n, replace = True).values
tide_rvs = np.reshape(tide_rvs, tide_rvs.shape[0]) * 1000
return tide_rvs
def get_swl(skew_rvs, tide_rvs):
swl_rvs = skew_rvs + tide_rvs
swl_rvs = np.reshape(swl_rvs, swl_rvs.shape[0])
return swl_rvs
def pairs_cooc(rain_rvs, skew_rvs, tide_rvs):
cooc_events = pd.concat([pd.DataFrame(rain_rvs, columns = ['rain']), pd.DataFrame(skew_rvs + tide_rvs, columns = ['sealevel'])], axis = 1)
return cooc_events
def pairs_rain(rain_rvs, tide_rvs, skew_month_avg, month):
rain_events = pd.concat([pd.DataFrame(rain_rvs, columns = ['rain']), pd.DataFrame(tide_rvs, columns = ['sealevel']) + (skew_month_avg.loc[month,'skew']*1000)], axis = 1)
return rain_events
def pairs_sl(skew_rvs, tide_rvs, rainfall_month_avg, month):
sealevel_events = pd.concat([ pd.DataFrame(np.zeros(tide_rvs.shape) + rainfall_month_avg.loc[month,'Thiessen_sum'], columns = ['rain']), pd.DataFrame(skew_rvs + tide_rvs, columns = ['sealevel'])], axis = 1)
return sealevel_events
#%%
def calculate_monthly_damage(best_fit_rain, param_rain, best_fit_skew, param_skew, n, monthly_data, coords, dam, skew_month_avg, rainfall_month_avg,
p_month, month_duration, cooc, lag_joint, selected_copulas, fn_tide, fn_copula, fn_trunk, varname1='Thiessen', varname2='skew',
dep_type='copula', figure_joint=True):
#Storing results
damage_mod = pd.DataFrame(data = None, index = np.arange(1,13,1), columns = ['simulated_highest', 'full_dep', 'ind_highest', 'exclusive_highest'])
all_events_sampled = pd.DataFrame(data=None, columns=['rain','sealevel','month'])
all_events_sampled_dep = pd.DataFrame(data=None, columns=['rain','sealevel', 'month'])
all_events_sampled_ind = pd.DataFrame(data=None, columns=['rain','sealevel', 'month'])
all_events_sampled_excl = pd.DataFrame(data=None, columns=['rain','sealevel', 'month'])
if figure_joint==True:
#Preparing figure
f, axs = plt.subplots(nrows=2, ncols=6, linewidth = 0, facecolor='w', edgecolor='w', sharex=True, sharey=True, figsize=(8, 4)) # , sharex=True, sharey=True gridspec_kw={'height_ratios': [1,1]}, #sharex=True, sharey=True,
axs = axs.reshape(-1)
month_label = ['Jan', 'Feb', 'Mar', 'Apr', 'May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
for month in np.arange(1,13,1):
# print(month)
#Select marginal distribution for rain
dist_type = best_fit_rain.loc[month, 'distribution']
# dist_type = 'gumbel_r'
# print('Rainfall distype: ', dist_type)
list_params = param_rain.loc[dist_type,str(month)].replace('(', '').replace(')','').split(',')
params = [float(e) for e in list_params]
#Select marginal distribution for skew
dist_type_skew = best_fit_skew.loc[month, 'distribution']
# dist_type_skew = 'gumbel_r'
# print('Skew distype: ', dist_type_skew)
list_params_skew = param_skew.loc[dist_type_skew,str(month)].replace('(', '').replace(')','').split(',')
params_skew = [float(e) for e in list_params_skew]
if dep_type == 'copula':
rain_simcdf = pd.read_csv(os.path.join(fn_copula, 'New{}_New{}_data_month_{}.csv'.format(str(varname1),str(varname2),str(month))), usecols=['V1'])
rain_rvs = simulate_rain(rain_simcdf, params, dist_type)
cdf_swl_rvs = pd.read_csv(os.path.join(fn_copula, 'New{}_New{}_data_month_{}.csv'.format(str(varname1),str(varname2),str(month))), usecols=['V2']).values
if varname2 == 'skew':
skew_rvs = simulate_skew(cdf_swl_rvs, params_skew, dist_type_skew)
tide_rvs = sample_tide(month, fn_tide, n)
if figure_joint==True:
kend = pd.read_csv(os.path.join(fn_trunk, 'Master2019/Thomas/data/NewBivariate/Simulated', 'New{}_New{}_copulatype_month_{}.csv'.format(varname1, varname2, str(month))), index_col = 0)
pseudo_rain, pseudo_skew = make_pseudo_obs(monthly_data['Thiessen'].where(monthly_data['month']==month).dropna(), monthly_data['skew'].where(monthly_data['month']==month).dropna())
axs[month-1].annotate("{}".format(month_label[month-1]), xy = (0.05, 0.95), xycoords = 'axes fraction', size=7)
axs[month-1].annotate(r"$\tau$ ={0:5.3f}".format(float(kend.iloc[-1,0])), xy = (0.50, 0.95), xycoords = 'axes fraction', size=7)
# axs[month-1].scatter(cdf_swl_rvs, rain_simcdf, linestyle = 'None', marker = 'o', c = 'grey', edgecolors='none', alpha = 0.8, s=0.2, zorder=1) #markeredgewidth=0.5,
# axs[month-1].scatter(pseudo_skew, pseudo_rain, marker = 'o', edgecolors ='k', linewidths=0.3, c = 'k', alpha = 1, s=10)
if selected_copulas[month]=='Independence':
skew_rvs_shuffled = skew_rvs.copy()
rain_rvs_shuffled = rain_rvs.copy()
np.random.shuffle(skew_rvs_shuffled)
np.random.shuffle(rain_rvs_shuffled)
axs[month-1].scatter(skew_rvs_shuffled, rain_rvs_shuffled, linestyle = 'None', marker = 'o', c = 'blue', edgecolors='none', alpha = 0.8, s=0.2, zorder=1) #markeredgewidth=0.5,
else:
axs[month-1].scatter(skew_rvs, rain_rvs, linestyle = 'None', marker = 'o', c = 'blue', edgecolors='none', alpha = 0.8, s=0.2, zorder=1) #markeredgewidth=0.5,
# axs[month-1].scatter(monthly_data['skew'].where(monthly_data['month']==month).dropna()*1000, monthly_data['Thiessen'].where(monthly_data['month']==month).dropna(), marker = 'o', edgecolors ='k', linewidths=0.3, c = 'k', alpha = 1, s=10) #markeredgewidth=0.5,
# axs[month-1].xaxis.set_major_locator(MultipleLocator(0.5))
# axs[month-1].yaxis.set_major_locator(MultipleLocator(0.5))
# axs[month-1].xaxis.set_minor_locator(MultipleLocator(0.1))
# axs[month-1].yaxis.set_minor_locator(MultipleLocator(0.1))
axs[month-1].tick_params(axis='both', labelsize=7, direction='out')
# axs[month-1].scatter(skew_rvs, rain_rvs, linestyle = 'None', marker = 'o', c = 'grey', edgecolors='none', alpha = 0.8, s=0.1, zorder=1) #markeredgewidth=0.5,
# axs[month-1].scatter(swl_rvs[:n_cooc_month], rain_rvs[:n_cooc_month], marker = 'o', edgecolors ='k', linewidths=0.3, c = np.repeat(month, len(swl_rvs[:n_cooc_month])), cmap=cmap2, alpha = 1, s=18, norm=norm)
del cdf_swl_rvs, rain_simcdf
if dep_type == 'full corr':
quantiles = np.random.random(n)
# print('Quantiles shape:', quantiles.shape)
rain_rvs = simulate_rain(quantiles, params, dist_type)
if varname2 == 'skew':
skew_rvs = simulate_skew(quantiles, params_skew, dist_type_skew)
del quantiles
if figure_joint==True:
axs[month-1].scatter(monthly_data['skew'].where(monthly_data['month']==month).dropna()*1000, monthly_data['Thiessen'].where(monthly_data['month']==month).dropna(), marker = 'o', edgecolors ='k', linewidths=0.3, c = 'k', alpha = 1, s=10)
axs[month-1].scatter(skew_rvs, rain_rvs, linestyle = 'None', marker = 'o', c = 'blue', edgecolors='none', alpha = 0.8, s=0.2, zorder=1) #markeredgewidth=0.5,
axs[month-1].tick_params(axis='both', labelsize=7, direction='out')
if len(rain_rvs) != n:
# print('Performing analysis on less samples')
i_random = np.random.choice(np.arange(0, len(rain_rvs)), n, replace = False)
rain_rvs = rain_rvs[i_random]
skew_rvs = skew_rvs[i_random]
##### FULL DEPENDENCE ######
tide_rvs = sample_tide(month, fn_tide, n)
cooc_events = pairs_cooc(rain_rvs, skew_rvs, tide_rvs)
if figure_joint==True:
axs[month-1].scatter(cooc_events.loc[:,'sealevel'], cooc_events.loc[:,'rain'], linestyle = 'None', marker = 'o', c = 'grey', edgecolors='none', alpha = 0.8, s=0.2, zorder=1) #markeredgewidth=0.5,
axs[month-1].scatter(monthly_data['skew'].where(monthly_data['month']==month).dropna()*1000, monthly_data['Thiessen'].where(monthly_data['month']==month).dropna(), marker = 'o', edgecolors ='k', linewidths=0.3, c = 'k', alpha = 1, s=10) #markeredgewidth=0.5,
sampled_month_dep = pd.DataFrame(data=cooc_events, columns=['rain', 'sealevel'])
sampled_month_dep['month'] = month
dam_full_dep = scipy.interpolate.griddata(coords, dam, cooc_events.values, method = 'linear')
dam_full = np.sum(dam_full_dep)
damage_mod.loc[month, 'full_dep'] = dam_full/n
sampled_month_dep['cooc_damage'] = dam_full_dep
all_events_sampled_dep = pd.concat([all_events_sampled_dep, sampled_month_dep], axis = 0, ignore_index=True)
del dam_full_dep, dam_full, sampled_month_dep, tide_rvs, cooc_events
##### EXCLUSIVE ######
tide_rvs = sample_tide(month, fn_tide, n)
rain_events = pairs_rain(rain_rvs, tide_rvs, skew_month_avg, month)
del tide_rvs
tide_rvs = sample_tide(month, fn_tide, n)
sealevel_events = pairs_sl(skew_rvs, tide_rvs, rainfall_month_avg, month)
dam_excl_rain = scipy.interpolate.griddata(coords, dam, (rain_events.values), method = 'linear') #
dam_excl_sl = scipy.interpolate.griddata(coords, dam, (sealevel_events.values), method = 'linear') #np.zeros(events_month[:,1].shape)
dam_excl_highest = pd.DataFrame(data=np.concatenate((dam_excl_rain, dam_excl_sl), axis=1), columns = ['rain_damage', 'sealevel_damage'])
dam_highest = dam_excl_highest.max(axis=1)
damage_mod.loc[month, 'exclusive_highest'] = (np.sum(dam_highest))/n
dam_highest_type = dam_excl_highest.idxmax(axis=1)
sampled_month_excl = pd.concat([pd.concat([rain_events[dam_highest_type=='rain_damage'], dam_excl_highest[dam_highest_type=='rain_damage']['rain_damage']], axis = 1),
pd.concat([sealevel_events[dam_highest_type=='sealevel_damage'], dam_excl_highest[dam_highest_type=='sealevel_damage']['sealevel_damage']], axis = 1)
], axis = 0, ignore_index=True)
sampled_month_excl['month'] = month
all_events_sampled_excl = pd.concat([all_events_sampled_excl, sampled_month_excl], axis = 0, ignore_index=True)
del rain_events, sealevel_events, tide_rvs, dam_highest, dam_highest_type, dam_excl_rain, dam_excl_sl, sampled_month_excl, dam_excl_highest
#### INDEPENDENCE ####
n_cooc_ind = int(p_month.loc[month_duration.loc[month,'length'],str(lag_joint)] * n)
i_cooc_ind = np.random.choice(np.arange(0, n), n_cooc_ind, replace = False)
i_ind = np.delete(np.arange(0, n), i_cooc_ind)
tide_rvs = sample_tide(month, fn_tide, len(i_cooc_ind))
cooc_events = pairs_cooc(rain_rvs[i_cooc_ind], skew_rvs[i_cooc_ind], tide_rvs)
tide_rvs = sample_tide(month, fn_tide, len(i_ind))
rain_events = pairs_rain(rain_rvs[i_ind], tide_rvs, skew_month_avg, month)
tide_rvs = sample_tide(month, fn_tide, len(i_ind))
sealevel_events = pairs_sl(skew_rvs[i_ind], tide_rvs, rainfall_month_avg, month)
dam_excl_rain = scipy.interpolate.griddata(coords, dam, (rain_events.values), method = 'linear') #
dam_excl_sl = scipy.interpolate.griddata(coords, dam, (sealevel_events.values), method = 'linear') #np.zeros(events_month[:,1].shape)
dam_cooc = scipy.interpolate.griddata(coords, dam, (cooc_events.values), method = 'linear') #np.zeros(events_month[:,1].shape)
dam_excl_highest = pd.DataFrame(data=np.concatenate((dam_excl_rain, dam_excl_sl), axis=1), columns = ['rain_damage', 'sealevel_damage'])
dam_highest = dam_excl_highest.max(axis=1)
dam_highest_type = dam_excl_highest.idxmax(axis=1)
damage_mod.loc[month, 'ind_highest'] = (np.sum(dam_highest) + np.sum(dam_cooc))/n
sampled_month_ind = pd.concat([pd.concat([rain_events[dam_highest_type=='rain_damage'], dam_excl_highest[dam_highest_type=='rain_damage']['rain_damage']], axis = 1),
pd.concat([sealevel_events[dam_highest_type=='sealevel_damage'], dam_excl_highest[dam_highest_type=='sealevel_damage']['sealevel_damage']], axis = 1),
pd.concat([cooc_events, pd.DataFrame(dam_cooc, columns = ['cooc_damage'])], axis = 1)
], axis = 0, ignore_index=True)
sampled_month_ind['month'] = month
all_events_sampled_ind = | pd.concat([all_events_sampled_ind, sampled_month_ind], axis = 0, ignore_index=True) | pandas.concat |
import unittest
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
import numpy as np
from ITMO_FS.embedded import *
np.random.seed(42)
class TestCases(unittest.TestCase):
data, target = np.random.randint(10, size=(100, 20)), np.random.randint(10, size=(100,))
feature_names = [''.join(['f', str(i)]) for i in range(data.shape[1])]
feature_names_override = [''.join(['g', str(i)]) for i in range(data.shape[1])]
def test_MOSS(self):
# MOSS
res = MOS().fit_transform(self.data, self.target, sampling=True)
assert self.data.shape[0] == res.shape[0]
print("MOSS:", self.data.shape, '--->', res.shape)
def test_MOSNS(self):
# MOSNS
res = MOS().fit_transform(self.data, self.target, sampling=False)
assert self.data.shape[0] == res.shape[0]
print("MOSNS:", self.data.shape, '--->', res.shape)
def test_losses(self):
for loss in ['log', 'hinge']:
res = MOS(loss=loss).fit_transform(self.data, self.target)
assert self.data.shape[0] == res.shape[0]
def test_df(self):
f = MOS()
df = f.fit_transform(pd.DataFrame(self.data), pd.DataFrame(self.target), sampling=True)
arr = f.fit_transform(self.data, self.target, sampling=True)
np.testing.assert_array_equal(df, arr)
df = f.fit_transform(pd.DataFrame(self.data), pd.DataFrame(self.target), sampling=False)
arr = f.fit_transform(self.data, self.target, sampling=False)
np.testing.assert_array_equal(df, arr)
def test_pipeline(self):
# FS
p = Pipeline([('FS1', MOS())])
p.fit(self.data, self.target)
res = p.transform(self.data)
assert self.data.shape[0] == res.shape[0]
# FS - estim
p = Pipeline([('FS1', MOS()), ('E1', LogisticRegression())])
p.fit(self.data, self.target)
assert 0 <= p.score(self.data, self.target) <= 1
# FS - FS
p = Pipeline([('FS1', MOS(loss='log')), ('FS2', MOS(loss='hinge'))])
p.fit(self.data, self.target)
res = p.transform(self.data)
assert self.data.shape[0] == res.shape[0]
# FS - FS - estim
p = Pipeline([('FS1', MOS(loss='log')), ('FS2', MOS(loss='hinge')), ('E1', LogisticRegression())])
p.fit(self.data, self.target)
assert 0 <= p.score(self.data, self.target) <= 1
def test_feature_names_np(self):
f = MOS()
arr = f.fit_transform(self.data, self.target, feature_names=self.feature_names, sampling=True)
assert np.all([feature in self.feature_names for feature in f.get_feature_names()])
arr = f.fit_transform(self.data, self.target, feature_names=self.feature_names, sampling=False)
assert np.all([feature in self.feature_names for feature in f.get_feature_names()])
def test_feature_names_df(self):
f = MOS()
arr = f.fit_transform(pd.DataFrame(self.data), | pd.DataFrame(self.target) | pandas.DataFrame |
import pandas as pd
data = pd.read_csv('data/citibike_tripdata.csv', sep=',')
print(data.info)
print(data['starttime'].dtype)
print(round(data['start station id'].mode()[0]))
print(data['bikeid'].mode()[0])
mode_usertype = data['usertype'].mode()[0]
count_mode_user = data[data['usertype'] == mode_usertype].shape[0]
print(round(count_mode_user / data.shape[0], 2))
male_count = data[data['gender'] == 1].shape[0]
female_count = data[data['gender'] == 0].shape[0]
print(max([male_count]))
print(max([female_count]))
print(data.describe())
data.drop(['start station id', 'end station id'], axis=1, inplace=True)
print(data.shape[1])
data['age'] = 2018 - data['birth year']
data.drop(['birth year'], axis=1, inplace=True)
print(data[data['age'] > 60].shape[0])
data['starttime'] = | pd.to_datetime(data['starttime']) | pandas.to_datetime |
"""
Author: <NAME>
Date: December 2020
"""
import configparser
import os.path as osp
import tempfile
from tqdm import tqdm
from pandas_plink import read_plink1_bin
import dask.array as da
import pandas as pd
import numpy as np
from scipy import stats
import zarr
from magenpy.AnnotationMatrix import AnnotationMatrix
from magenpy.LDMatrix import LDMatrix
from magenpy.parsers.plink_parsers import parse_fam_file, parse_bim_file
from magenpy.parsers.misc_parsers import read_snp_filter_file, read_individual_filter_file, parse_ld_block_data
from magenpy.utils.c_utils import (find_windowed_ld_boundaries,
find_shrinkage_ld_boundaries,
find_ld_block_boundaries)
from magenpy.utils.ld_utils import (_validate_ld_matrix,
from_plink_ld_bin_to_zarr,
from_plink_ld_table_to_zarr_chunked,
shrink_ld_matrix,
zarr_array_to_ragged,
rechunk_zarr,
move_ld_store)
from magenpy.utils.model_utils import standardize_genotype_matrix, merge_snp_tables
from magenpy.utils.compute_utils import intersect_arrays, iterable
from magenpy.utils.system_utils import makedir, get_filenames, run_shell_script, is_cmd_tool
class GWASDataLoader(object):
def __init__(self,
bed_files=None,
standardize_genotype=True,
phenotype_likelihood='gaussian',
phenotype_file=None,
phenotype_header=None,
phenotype_col=2,
phenotype_id=None,
standardize_phenotype=True,
sumstats_files=None,
sumstats_format='magenpy',
keep_individuals=None,
keep_snps=None,
min_maf=None,
min_mac=1,
remove_duplicated=True,
annotation_files=None,
genmap_Ne=None,
genmap_sample_size=None,
shrinkage_cutoff=1e-5,
compute_ld=False,
ld_store_files=None,
ld_block_files=None,
ld_estimator='windowed',
window_unit='cM',
cm_window_cutoff=3.,
window_size_cutoff=2000,
use_plink=False,
batch_size=200,
temp_dir='temp',
output_dir='output',
verbose=True,
n_threads=1):
# ------- General options -------
self.verbose = verbose
self.n_threads = n_threads
makedir([temp_dir, output_dir])
self.use_plink = use_plink
self.bed_files = None
self.temp_dir = temp_dir
self.output_dir = output_dir
self.cleanup_dir_list = [] # Directories to clean up after execution.
self.batch_size = batch_size
# Access the configuration file:
config = configparser.ConfigParser()
config.read(osp.join(osp.dirname(__file__), 'config/paths.ini'))
try:
self.config = config['USER']
except KeyError:
self.config = config['DEFAULT']
if self.use_plink:
if not is_cmd_tool(self.config.get('plink2_path')):
raise Exception("To use `plink` as a backend, make sure that the path for the "
"plink2 executable is configured properly.")
# ------- General parameters -------
self.standardize_phenotype = standardize_phenotype
self.phenotype_likelihood = phenotype_likelihood
self.phenotype_id = None # Name or ID of the phenotype
# ------- LD computation options -------
self.ld_estimator = ld_estimator
assert self.ld_estimator in ('block', 'windowed', 'sample', 'shrinkage')
# For the block estimator of the LD matrix:
self.ld_blocks = None
if self.ld_estimator == 'block':
assert ld_block_files is not None
self.ld_blocks = parse_ld_block_data(ld_block_files)
# For the shrinkage estimator of the LD matrix:
self.genmap_Ne = genmap_Ne
self.genmap_sample_size = genmap_sample_size
self.shrinkage_cutoff = shrinkage_cutoff
if self.ld_estimator == 'shrinkage':
assert self.genmap_Ne is not None
assert self.genmap_sample_size is not None
# For the windowed estimator of the LD matrix:
self.window_unit = window_unit
self.cm_window_cutoff = cm_window_cutoff
self.window_size_cutoff = window_size_cutoff
# ------- Filter data -------
try:
self.keep_individuals = read_individual_filter_file(keep_individuals)
except ValueError:
self.keep_individuals = None
try:
self.keep_snps = read_snp_filter_file(keep_snps)
except ValueError:
self.keep_snps = None
# ------- Genotype data -------
self.standardize_genotype = standardize_genotype
self.genotypes = None
self._snps = None
self._bp_pos = None # SNP position in BP
self._cm_pos = None # SNP position in cM
self.n_per_snp = None # Sample size per SNP
self._a1 = None # Minor allele
self._a2 = None # Major allele
self.maf = None # Minor allele frequency
self._fid = None # Family IDs
self._iid = None # Individual IDs
self.annotations = None
# ------- LD-related data -------
self.ld_boundaries = None
self.ld = None
# ------- Phenotype data -------
self.phenotypes = None
# ------- Summary statistics data -------
self.beta_hats = None
self.z_scores = None
self.se = None
self.p_values = None
# ------- Read data files -------
self.read_genotypes(bed_files)
self.read_annotations(annotation_files)
# TODO: Figure out optimal checks and placement of SNP filters
if bed_files is not None:
self.filter_by_allele_frequency(min_maf=min_maf, min_mac=min_mac)
# ------- Compute LD matrices -------
if ld_store_files is not None:
self.read_ld(ld_store_files)
elif compute_ld:
self.compute_ld()
# ------- Read phenotype/sumstats files -------
self.read_phenotypes(phenotype_file, phenotype_id=phenotype_id,
header=phenotype_header, phenotype_col=phenotype_col,
standardize=self.standardize_phenotype)
self.read_summary_stats(sumstats_files, sumstats_format)
# ------- Harmonize data sources -------
if bed_files is None:
self.filter_by_allele_frequency(min_maf=min_maf, min_mac=min_mac)
if self.genotypes is None and remove_duplicated:
self.filter_duplicated_snps()
if ld_store_files is not None or sumstats_files is not None:
self.harmonize_data()
@classmethod
def from_table(cls, table):
"""
Initialize a GDL object from table.
:param table: A pandas dataframe with at leat 4 column defined: `CHR`, `SNP`, `A1`, `POS`
Other column names that will be parsed from this table are:
A2, MAF, N
"""
assert all([col in table.columns for col in ('CHR', 'SNP', 'A1', 'POS')])
gdl = cls()
gdl._snps = {}
gdl._bp_pos = {}
gdl._a1 = {}
for c in table['CHR'].unique():
chrom_table = table.loc[table['CHR'] == c].sort_values('POS')
gdl._snps[c] = chrom_table['SNP'].values
gdl._a1[c] = chrom_table['A1'].values
gdl._bp_pos[c] = chrom_table['POS'].values
if 'A2' in chrom_table.columns:
if gdl._a2 is None:
gdl._a2 = {}
gdl._a2[c] = chrom_table['A2'].values
if 'MAF' in chrom_table.columns:
if gdl.maf is None:
gdl.maf = {}
gdl.maf[c] = chrom_table['MAF'].values
if 'N' in chrom_table.columns:
if gdl.n_per_snp is None:
gdl.n_per_snp = {}
gdl.n_per_snp[c] = chrom_table['N'].values
return gdl
@property
def n_annotations(self):
assert self.annotations is not None
return self.annotations[self.chromosomes[0]].n_annotations
@property
def sample_size(self):
return self.N
@property
def N(self, agg='max'):
"""
The number of samples
:param agg: Aggregation (max, mean, or None)
"""
if agg == 'max':
if self._iid is not None:
return len(self._iid)
else:
if self.n_per_snp is None:
return None
return max([nps.max() for nps in self.n_per_snp.values()])
else:
if self.n_per_snp is None:
self.compute_n_per_snp()
if agg is None:
return self.n_per_snp
elif agg == 'mean':
return np.mean([nps.mean() for nps in self.n_per_snp.values()])
@property
def M(self):
return sum(self.shapes.values())
@property
def snps(self):
return self._snps
@property
def bp_pos(self):
return self._bp_pos
@property
def cm_pos(self):
return self._cm_pos
@property
def ref_alleles(self):
return self._a2
@property
def alt_alleles(self):
return self._a1
@property
def shapes(self):
return {c: len(snps) for c, snps in self.snps.items()}
@property
def chromosomes(self):
return list(self.shapes.keys())
def sample_ids_to_index(self, ids):
return np.where(np.isin(self._iid, ids))[0]
def sample_index_to_ids(self, idx):
return self._iid[idx]
def filter_snps(self, keep_snps, chrom=None):
"""
:param keep_snps:
:param chrom:
"""
if chrom is None:
snp_dict = self._snps
else:
snp_dict = {chrom: self._snps[chrom]}
for c, snps in snp_dict.items():
if np.array_equal(snps, keep_snps):
continue
common_idx = intersect_arrays(snps, keep_snps, return_index=True)
# SNP vectors that must exist in all GDL objects:
self._snps[c] = self._snps[c][common_idx]
self._a1[c] = self._a1[c][common_idx]
# Optional SNP vectors/matrices:
if self._bp_pos is not None and c in self._bp_pos:
self._bp_pos[c] = self._bp_pos[c][common_idx]
if self._cm_pos is not None and c in self._cm_pos:
self._cm_pos[c] = self._cm_pos[c][common_idx]
if self._a2 is not None and c in self._a2:
self._a2[c] = self._a2[c][common_idx]
if self.genotypes is not None and c in self.genotypes:
self.genotypes[c] = self.genotypes[c].isel(variant=common_idx)
if self.n_per_snp is not None and c in self.n_per_snp:
self.n_per_snp[c] = self.n_per_snp[c][common_idx]
if self.maf is not None and c in self.maf:
self.maf[c] = self.maf[c][common_idx]
if self.beta_hats is not None and c in self.beta_hats:
self.beta_hats[c] = self.beta_hats[c][common_idx]
if self.se is not None and c in self.se:
self.se[c] = self.se[c][common_idx]
if self.z_scores is not None and c in self.z_scores:
self.z_scores[c] = self.z_scores[c][common_idx]
if self.p_values is not None and c in self.p_values:
self.p_values[c] = self.p_values[c][common_idx]
# Filter the annotation table as well:
if self.annotations is not None and c in self.annotations:
self.annotations[c].filter_snps(self._snps[c])
def filter_by_allele_frequency(self, min_maf=None, min_mac=1):
"""
Filter SNPs by minimum allele frequency or allele count
:param min_maf: Minimum allele frequency
:param min_mac: Minimum allele count (1 by default)
"""
cond_dict = {}
if min_mac is not None or min_maf is not None:
if self.maf is None:
self.compute_allele_frequency()
if self.n_per_snp is None:
self.compute_n_per_snp()
if min_mac is not None:
for c, maf in self.maf.items():
mac = (2*maf*self.n_per_snp[c]).astype(np.int64)
cond_dict[c] = (mac >= min_mac) & ((2*self.n_per_snp[c] - mac) >= min_mac)
if min_maf is not None:
for c, maf in self.maf.items():
maf_cond = (maf >= min_maf) & (1. - maf >= min_maf)
if c in cond_dict:
cond_dict[c] = cond_dict[c] & maf_cond
else:
cond_dict[c] = maf_cond
if len(cond_dict) > 0:
filt_count = 0
for c, snps in tqdm(self.snps.items(),
total=len(self.chromosomes),
desc="Filtering SNPs by allele frequency/count",
disable=not self.verbose):
keep_snps = snps[cond_dict[c]]
if len(keep_snps) != len(snps):
filt_count += len(snps) - len(keep_snps)
self.filter_snps(keep_snps, chrom=c)
if filt_count > 0:
if self.verbose:
print(f"> Filtered {filt_count} SNPs due to MAC/MAF thresholds.")
def filter_duplicated_snps(self):
"""
This method filters all duplicated SNPs.
TODO: Add options to keep at least one of the duplicated snps.
:return:
"""
for c, snps in self.snps.items():
u_snps, counts = np.unique(snps, return_counts=True)
if len(u_snps) < len(snps):
# Keep only SNPs which occur once in the sequence:
self.filter_snps(u_snps[counts == 1], chrom=c)
def filter_samples(self, keep_samples):
common_samples = intersect_arrays(self._iid, keep_samples, return_index=True)
for c in self.chromosomes:
if self.genotypes is not None and c in self.genotypes:
self.genotypes[c] = self.genotypes[c].isel(sample=common_samples)
self._fid = self._fid[common_samples]
self._iid = self._iid[common_samples]
def read_annotations(self, annot_files):
"""
Read the annotation files
"""
if annot_files is None:
return
if not iterable(annot_files):
annot_files = [annot_files]
self.annotations = {}
for annot_file in tqdm(annot_files,
total=len(annot_files),
desc="Reading annotation files",
disable=not self.verbose):
annot_mat = AnnotationMatrix.from_file(annot_file)
annot_mat.filter_snps(self.snps[annot_mat.chromosome])
self.annotations[annot_mat.chromosome] = annot_mat
def read_genotypes_plink(self, bed_files):
"""
This is an alternative to `.read_genotypes` that doesn't attempt to
parse to process the genotype matrix and instead focuses on loading
the individual and SNP data and preparing it for downstream tasks.
"""
if bed_files is None:
return
if not iterable(bed_files):
bed_files = get_filenames(bed_files, extension='.bed')
self._snps = {}
self._a1 = {}
self._a2 = {}
self._cm_pos = {}
self._bp_pos = {}
self.bed_files = {}
for i, bfile in tqdm(enumerate(bed_files),
total=len(bed_files),
desc="Reading genotype files",
disable=not self.verbose):
# Read plink file:
try:
bim_df = parse_bim_file(bfile)
if i == 0:
fam_df = parse_fam_file(bfile)
except Exception as e:
self.genotypes = None
self._fid = None
self._iid = None
raise e
# Filter individuals:
if self.keep_individuals is not None and i == 0:
common_samples = intersect_arrays(fam_df.IID.values, self.keep_individuals, return_index=True)
fam_df = fam_df.iloc[common_samples, ]
# Filter SNPs:
if self.keep_snps is not None:
common_snps = intersect_arrays(bim_df.SNP.values, self.keep_snps, return_index=True)
bim_df = bim_df.iloc[common_snps, ]
# Obtain information about current chromosome:
chr_id = int(bim_df.CHR.values[0])
# Add filename to the bedfiles dictionary:
self.bed_files[chr_id] = bfile
# Keep track of the SNPs:
self._snps[chr_id] = bim_df.SNP.values
self._a1[chr_id] = bim_df.A1.values
self._a2[chr_id] = bim_df.A2.values
self._bp_pos[chr_id] = bim_df.POS.values
self._cm_pos[chr_id] = bim_df.cM.values
if i == 0:
self._fid = fam_df.FID.values
self._iid = fam_df.IID.values
def read_genotypes(self, bed_files):
"""
Read the genotype files
"""
if self.use_plink:
return self.read_genotypes_plink(bed_files)
if bed_files is None:
return
if not iterable(bed_files):
bed_files = get_filenames(bed_files, extension='.bed')
self._snps = {}
self._a1 = {}
self._a2 = {}
self._cm_pos = {}
self._bp_pos = {}
self.genotypes = {}
self.bed_files = {}
for i, bfile in tqdm(enumerate(bed_files),
total=len(bed_files),
desc="Reading genotype files",
disable=not self.verbose):
# Read plink file:
try:
gt_ac = read_plink1_bin(bfile + ".bed", ref="a0", verbose=False)
except ValueError:
gt_ac = read_plink1_bin(bfile, ref="a0", verbose=False)
except Exception as e:
self.genotypes = None
self._fid = None
self._iid = None
raise e
gt_ac = gt_ac.set_index(variant='snp')
# Filter individuals:
if self.keep_individuals is not None:
common_samples = intersect_arrays(gt_ac.sample.values, self.keep_individuals)
gt_ac = gt_ac.sel(sample=common_samples)
# Filter SNPs:
if self.keep_snps is not None:
common_snps = intersect_arrays(gt_ac.variant.values, self.keep_snps)
gt_ac = gt_ac.sel(variant=common_snps)
# Obtain information about current chromosome:
chr_id = int(gt_ac.chrom.values[0])
# Add filename to the bedfiles dictionary:
self.bed_files[chr_id] = bfile
# Keep track of the SNPs:
self._snps[chr_id] = gt_ac.variant.values
self._a1[chr_id] = gt_ac.variant.a0.values
self._a2[chr_id] = gt_ac.variant.a1.values
self._bp_pos[chr_id] = gt_ac.variant.pos.values
self._cm_pos[chr_id] = gt_ac.variant.cm.values
if i == 0:
self._fid = gt_ac.fid.values
self._iid = gt_ac.iid.values
self.genotypes[chr_id] = gt_ac
def read_phenotypes(self, phenotype_file, header=None,
phenotype_col=2, standardize=True,
phenotype_id=None,
filter_na=True):
if phenotype_file is None:
return
if self.verbose:
print("> Reading phenotype files...")
try:
phe = pd.read_csv(phenotype_file, sep="\s+", header=header)
phe = phe.iloc[:, [0, 1, phenotype_col]]
phe.columns = ['FID', 'IID', 'phenotype']
phe['IID'] = phe['IID'].astype(type(self._iid[0]))
except Exception as e:
raise e
phe = pd.DataFrame({'IID': self._iid}).merge(phe)
# Filter individuals with missing phenotypes:
# TODO: Add functionality to filter on other values (e.g. -9)
if filter_na:
phe = phe.dropna(subset=['phenotype'])
self.filter_samples(phe['IID'].values)
if self.phenotype_likelihood == 'binomial':
unique_vals = sorted(phe['phenotype'].unique())
if unique_vals == [1, 2]:
# Plink coding for case/control
phe['phenotype'] -= 1
elif unique_vals != [0, 1]:
raise ValueError(f"Unknown values for binary traits: {unique_vals}")
self.phenotypes = phe['phenotype'].values
if standardize and self.phenotype_likelihood == 'gaussian':
self.phenotypes -= self.phenotypes.mean()
self.phenotypes /= self.phenotypes.std()
if phenotype_id is None:
self.phenotype_id = str(np.random.randint(1, 1000))
else:
self.phenotype_id = phenotype_id
def read_summary_stats(self, sumstats_files, sumstats_format='magenpy'):
"""
TODO: implement parsers for summary statistics
TODO: Move these parsers to `parsers.py`
"""
if sumstats_files is None:
return
if not iterable(sumstats_files):
sumstats_files = get_filenames(sumstats_files)
ss = []
print("> Reading GWAS summary statistics...")
for ssf in sumstats_files:
ss_df = pd.read_csv(ssf, delim_whitespace=True)
# Drop missing values:
ss_df = ss_df.dropna()
ss.append(ss_df)
ss = pd.concat(ss)
# ------------- Standardize inputs -------------
# TODO: Move this part to parsers.py
if sumstats_format == 'LDSC':
# Useful here: https://www.biostars.org/p/319584/
pass
elif sumstats_format == 'SBayesR':
pass
elif sumstats_format == 'plink':
ss.rename(columns={
'#CHROM': 'CHR',
'ID': 'SNP',
'P': 'PVAL',
'OBS_CT': 'N',
'A1_FREQ': 'MAF'
}, inplace=True)
ss['A2'] = ss.apply(lambda x: [x['ALT1'], x['REF']][x['A1'] == x['ALT1']], axis=1)
ss['Z'] = ss['BETA'] / ss['SE']
# -------------------------------------------------
# If SNP list is not set, initialize it using the sumstats table:
if self.snps is None:
# Check that the sumstats table has the following columns:
assert all([col in ss.columns for col in ('CHR', 'POS', 'SNP', 'A1')])
self._snps = {}
self._a1 = {}
self._bp_pos = {}
for c in ss['CHR'].unique():
m_ss = ss.loc[ss['CHR'] == c].sort_values('POS')
self._snps[c] = m_ss['SNP'].values
self._a1[c] = m_ss['A1'].values
self._bp_pos[c] = m_ss['POS'].values
# -------------------------------------------------
# Prepare the fields for the sumstats provided in the table:
if 'A1' in ss.columns:
update_a1 = True
else:
update_a1 = False
if 'POS' in ss.columns:
update_pos = True
else:
update_pos = False
if 'A2' in ss.columns:
update_a2 = True
self._a2 = {}
else:
update_a2 = False
if 'MAF' in ss.columns:
self.maf = {}
update_maf = True
else:
update_maf = False
if 'N' in ss.columns:
self.n_per_snp = {}
update_n = True
else:
update_n = False
if 'BETA' in ss.columns:
self.beta_hats = {}
update_beta = True
else:
update_beta = False
if 'Z' in ss.columns:
self.z_scores = {}
update_z = True
else:
update_z = False
if 'SE' in ss.columns:
self.se = {}
update_se = True
else:
update_se = False
if 'PVAL' in ss.columns:
self.p_values = {}
update_pval = True
else:
update_pval = False
for c, snps in self.snps.items():
m_ss = merge_snp_tables(pd.DataFrame({'SNP': snps, 'A1': self._a1[c]}), ss)
if len(m_ss) > 1:
# Filter the SNP list first!
if len(snps) != len(m_ss):
self.filter_snps(m_ss['SNP'], chrom=c)
# Populate the sumstats fields:
if update_a1:
self._a1[c] = m_ss['A1'].values
if update_pos:
self._bp_pos[c] = m_ss['POS'].values
if update_a2:
self._a2[c] = m_ss['A2'].values
if update_maf:
self.maf[c] = m_ss['MAF'].values
if update_n:
self.n_per_snp[c] = m_ss['N'].values
if update_beta:
self.beta_hats[c] = m_ss['BETA'].values
if update_z:
self.z_scores[c] = m_ss['Z'].values
if update_se:
self.se[c] = m_ss['SE'].values
if update_pval:
self.p_values[c] = m_ss['PVAL'].values
print(f"> Read summary statistics data for {self.M} SNPs.")
def read_ld(self, ld_store_files):
"""
:param ld_store_files:
"""
if self.verbose:
print("> Reading LD matrices...")
if not iterable(ld_store_files):
ld_store_files = get_filenames(ld_store_files, extension='.zarr')
self.ld = {}
if self._snps is None:
init_snps = True
self._snps = {}
self._a1 = {}
self._bp_pos = {}
self.maf = {}
else:
init_snps = False
for f in ld_store_files:
z = LDMatrix.from_path(f)
self.ld[z.chromosome] = z
# If the SNP list is not set,
# initialize it with the SNP list from the LD store:
if init_snps:
self._snps[z.chromosome] = z.snps
self._a1[z.chromosome] = z.a1
self._bp_pos[z.chromosome] = z.bp_position
self.maf[z.chromosome] = z.maf
def load_ld(self):
if self.ld is not None:
for ld in self.ld.values():
ld.load()
def release_ld(self):
if self.ld is not None:
for ld in self.ld.values():
ld.release()
def compute_ld_boundaries(self, recompute=False):
self.ld_boundaries = {}
if recompute:
# If recomputing from existing LD matrices:
shapes = self.shapes
for c, ld in tqdm(self.ld.items(), total=len(self.chromosomes),
desc='Recomputing LD boundaries',
disable=not self.verbose):
common_idx = intersect_arrays(ld.snps, self.snps[c], return_index=True)
M = shapes[c]
estimator = ld.ld_estimator
est_properties = ld.estimator_properties
if estimator == 'sample':
self.ld_boundaries[c] = np.array((np.zeros(M), np.ones(M)*M)).astype(np.int64)
elif estimator == 'block':
self.ld_boundaries[c] = find_ld_block_boundaries(ld.bp_position[common_idx],
np.array(est_properties['LD blocks'], dtype=int))
elif estimator == 'windowed':
if est_properties['Window units'] == 'cM':
self.ld_boundaries[c] = find_windowed_ld_boundaries(ld.cm_position[common_idx],
est_properties['Window cutoff'])
else:
idx = np.arange(M)
self.ld_boundaries[c] = np.array((idx - est_properties['Window cutoff'],
idx + est_properties['Window cutoff'])).astype(np.int64)
self.ld_boundaries[c] = np.clip(self.ld_boundaries[c], 0, M)
else:
self.ld_boundaries[c] = find_shrinkage_ld_boundaries(ld.cm_position[common_idx],
est_properties['Genetic map Ne'],
est_properties['Genetic map sample size'],
est_properties['Cutoff'])
else:
for c, M in tqdm(self.shapes.items(),
total=len(self.chromosomes),
desc="Computing LD boundaries",
disable=not self.verbose):
if self.ld_estimator == 'sample':
self.ld_boundaries[c] = np.array((np.zeros(M), np.ones(M)*M)).astype(np.int64)
elif self.ld_estimator == 'block':
if self._bp_pos and c in self._bp_pos:
self.ld_boundaries[c] = find_ld_block_boundaries(self._bp_pos[c].astype(int),
self.ld_blocks[c])
else:
raise Exception("SNP position in BP is missing!")
elif self.ld_estimator == 'windowed':
if self.window_unit == 'cM':
if self._cm_pos and c in self._cm_pos:
self.ld_boundaries[c] = find_windowed_ld_boundaries(self._cm_pos[c],
self.cm_window_cutoff)
else:
raise Exception("cM information for SNPs is missing. "
"Make sure to populate it with a reference genetic map "
"or use a pre-specified window size around each SNP.")
else:
idx = np.arange(M)
self.ld_boundaries[c] = np.array((idx - self.window_size_cutoff,
idx + self.window_size_cutoff)).astype(np.int64)
self.ld_boundaries[c] = np.clip(self.ld_boundaries[c],
0, M)
elif self.ld_estimator == 'shrinkage':
if self._cm_pos and c in self._cm_pos:
self.ld_boundaries[c] = find_shrinkage_ld_boundaries(self._cm_pos[c],
self.genmap_Ne,
self.genmap_sample_size,
self.shrinkage_cutoff)
else:
raise Exception("cM information for SNPs is missing. "
"Make sure to populate it with a reference genetic map "
"or use a different LD estimator.")
return self.ld_boundaries
def compute_ld_plink(self):
"""
Compute the Linkage-Disequilibrium (LD) matrix between SNPs using plink1.9
"""
if not is_cmd_tool(self.config.get('plink1.9_path')):
raise Exception("To use `plink` as a backend for LD calculation, "
"make sure that the path for the plink1.9 executable is configured properly.")
if self.maf is None:
self.compute_allele_frequency()
if self.ld_boundaries is None:
self.compute_ld_boundaries()
tmp_ld_dir = tempfile.TemporaryDirectory(dir=self.temp_dir, prefix='ld_')
self.cleanup_dir_list.append(tmp_ld_dir)
# Create the samples file:
keep_file = osp.join(tmp_ld_dir.name, 'samples.keep')
keep_table = self.to_individual_table()
keep_table.to_csv(keep_file, index=False, header=False, sep="\t")
self.ld = {}
for c, b_file in tqdm(self.bed_files.items(),
total=len(self.chromosomes),
desc="Computing LD matrices using PLINK",
disable=not self.verbose):
snp_keepfile = osp.join(tmp_ld_dir.name, f"chr_{c}.keep")
pd.DataFrame({'SNP': self.snps[c]}).to_csv(snp_keepfile,
index=False, header=False)
plink_output = osp.join(tmp_ld_dir.name, f"chr_{c}")
cmd = [
self.config.get('plink1.9_path'),
f"--bfile {b_file.replace('.bed', '')}",
f"--keep {keep_file}",
f"--extract {snp_keepfile}",
"--keep-allele-order",
f"--out {plink_output}",
f"--threads {self.n_threads}"
]
# For the block and shrinkage estimators, ask plink to compute
# LD between focal SNP and max(window_size) around it.
# Then, once we have a square matrix out of that, we can apply
# a per-SNP filter:
max_window_size = (self.ld_boundaries[c][1, :] - self.ld_boundaries[c][0, :]).max() + 1
max_kb = round(.001*(self.bp_pos[c].max() - self.bp_pos[c].min()))
if self.ld_estimator in ('shrinkage', 'block'):
cmd.append("--r gz")
cmd.append(f"--ld-window {max_window_size} "
f"--ld-window-kb {max_kb}")
elif self.ld_estimator == 'windowed':
cmd.append("--r gz")
cmd.append(f"--ld-window {len(self.snps[c]) + 1} "
f"--ld-window-kb {max_kb} "
f"--ld-window-cm {self.cm_window_cutoff}")
else:
cmd.append("--r bin")
cmd.append(f"--ld-window {len(self.snps[c]) + 1} "
f"--ld-window-kb {max_kb} ")
run_shell_script(" ".join(cmd))
# Convert from PLINK LD files to Zarr:
fin_ld_store = osp.join(self.output_dir, 'ld', 'chr_' + str(c))
if self.ld_estimator == 'sample':
z_ld_mat = from_plink_ld_bin_to_zarr(f"{plink_output}.ld.bin",
fin_ld_store,
self.ld_boundaries[c])
else:
z_ld_mat = from_plink_ld_table_to_zarr_chunked(f"{plink_output}.ld.gz",
fin_ld_store,
self.ld_boundaries[c],
self.snps[c])
# Add LD matrix properties:
z_ld_mat.attrs['Chromosome'] = c
z_ld_mat.attrs['Sample size'] = self.sample_size
z_ld_mat.attrs['SNP'] = list(self.snps[c])
z_ld_mat.attrs['LD estimator'] = self.ld_estimator
z_ld_mat.attrs['LD boundaries'] = self.ld_boundaries[c].tolist()
ld_estimator_properties = None
if self.ld_estimator == 'shrinkage':
z_ld_mat = shrink_ld_matrix(z_ld_mat,
self.cm_pos[c],
self.genmap_Ne,
self.genmap_sample_size,
self.shrinkage_cutoff,
ld_boundaries=self.ld_boundaries[c])
ld_estimator_properties = {
'Genetic map Ne': self.genmap_Ne,
'Genetic map sample size': self.genmap_sample_size,
'Cutoff': self.shrinkage_cutoff
}
elif self.ld_estimator == 'windowed':
ld_estimator_properties = {
'Window units': self.window_unit,
'Window cutoff': [self.window_size_cutoff, self.cm_window_cutoff][self.window_unit == 'cM']
}
elif self.ld_estimator == 'block':
ld_estimator_properties = {
'LD blocks': self.ld_blocks[c].tolist()
}
# Add detailed LD matrix properties:
z_ld_mat.attrs['BP'] = list(map(int, self.bp_pos[c]))
z_ld_mat.attrs['cM'] = list(map(float, self.cm_pos[c]))
z_ld_mat.attrs['MAF'] = list(map(float, self.maf[c]))
z_ld_mat.attrs['A1'] = list(self._a1[c])
if ld_estimator_properties is not None:
z_ld_mat.attrs['Estimator properties'] = ld_estimator_properties
self.ld[c] = LDMatrix(z_ld_mat)
self.ld[c].set_store_attr('LDScore', self.ld[c].compute_ld_scores().tolist())
_validate_ld_matrix(self.ld[c])
def compute_ld(self):
"""
Compute the Linkage-Disequilibrium (LD) matrix between SNPs.
This function only considers correlations between SNPs on the same chromosome.
The function involves computing X'X and then applying transformations to it,
according to the estimator that the user specifies.
"""
if self.use_plink:
self.compute_ld_plink()
return
if self.maf is None:
self.compute_allele_frequency()
if self.ld_boundaries is None:
self.compute_ld_boundaries()
tmp_ld_dir = tempfile.TemporaryDirectory(dir=self.temp_dir, prefix='ld_')
self.cleanup_dir_list.append(tmp_ld_dir)
self.ld = {}
for c, g_data in tqdm(self.genotypes.items(),
total=len(self.chromosomes),
desc="Computing LD matrices",
disable=not self.verbose):
tmp_ld_store = osp.join(tmp_ld_dir.name, 'chr_' + str(c))
fin_ld_store = osp.join(self.output_dir, 'ld', 'chr_' + str(c))
# Re-chunk the array
g_data = g_data.chunk((min(1024, g_data.shape[0]),
min(1024, g_data.shape[1])))
# Standardize the genotype matrix and fill missing data with zeros:
g_mat = standardize_genotype_matrix(g_data).fillna(0.)
# Compute the LD matrix:
ld_mat = (da.dot(g_mat.T, g_mat) / self.N).astype(np.float64)
ld_mat.to_zarr(tmp_ld_store, overwrite=True)
z_ld_mat = zarr.open(tmp_ld_store)
z_ld_mat = rechunk_zarr(z_ld_mat,
ld_mat.rechunk({0: 'auto', 1: None}).chunksize,
tmp_ld_store + '_rechunked',
tmp_ld_store + '_intermediate')
# Add LD matrix properties:
z_ld_mat.attrs['Chromosome'] = c
z_ld_mat.attrs['Sample size'] = self.sample_size
z_ld_mat.attrs['SNP'] = list(self.snps[c])
z_ld_mat.attrs['LD estimator'] = self.ld_estimator
z_ld_mat.attrs['LD boundaries'] = self.ld_boundaries[c].tolist()
ld_estimator_properties = None
if self.ld_estimator == 'sample':
z_ld_mat = move_ld_store(z_ld_mat, fin_ld_store)
if self.ld_estimator == 'shrinkage':
z_ld_mat = shrink_ld_matrix(z_ld_mat,
self.cm_pos[c],
self.genmap_Ne,
self.genmap_sample_size,
self.shrinkage_cutoff)
ld_estimator_properties = {
'Genetic map Ne': self.genmap_Ne,
'Genetic map sample size': self.genmap_sample_size,
'Cutoff': self.shrinkage_cutoff
}
elif self.ld_estimator == 'windowed':
ld_estimator_properties = {
'Window units': self.window_unit,
'Window cutoff': [self.window_size_cutoff, self.cm_window_cutoff][self.window_unit == 'cM']
}
elif self.ld_estimator == 'block':
ld_estimator_properties = {
'LD blocks': self.ld_blocks[c].tolist()
}
if self.ld_estimator in ('block', 'shrinkage', 'windowed'):
z_ld_mat = zarr_array_to_ragged(z_ld_mat,
fin_ld_store,
bounds=self.ld_boundaries[c],
delete_original=True)
# Add detailed LD matrix properties:
z_ld_mat.attrs['BP'] = list(map(int, self.bp_pos[c]))
z_ld_mat.attrs['cM'] = list(map(float, self.cm_pos[c]))
z_ld_mat.attrs['MAF'] = list(map(float, self.maf[c]))
z_ld_mat.attrs['A1'] = list(self._a1[c])
if ld_estimator_properties is not None:
z_ld_mat.attrs['Estimator properties'] = ld_estimator_properties
self.ld[c] = LDMatrix(z_ld_mat)
self.ld[c].set_store_attr('LDScore', self.ld[c].compute_ld_scores().tolist())
_validate_ld_matrix(self.ld[c])
def get_ld_matrices(self):
return self.ld
def get_ld_boundaries(self):
if self.ld is None:
return None
return {c: ld.get_masked_boundaries() for c, ld in self.ld.items()}
def realign_ld(self):
"""
This method realigns a pre-computed LD matrix with the
current genotype matrix and/or summary statistics.
"""
if self.ld is None:
raise Exception("No pre-computed LD matrices are provided.")
self.compute_ld_boundaries(recompute=True)
ld_tmpdir = tempfile.TemporaryDirectory(dir=self.temp_dir, prefix='ld_')
self.cleanup_dir_list.append(ld_tmpdir)
for c, snps in tqdm(self.snps.items(), total=len(self.chromosomes),
desc="Matching LD matrices with sumstats/genotypes",
disable=not self.verbose):
ld_snps = self.ld[c].snps
if not np.array_equal(snps, ld_snps):
self.ld[c] = LDMatrix(
zarr_array_to_ragged(self.ld[c].z_array,
dir_store=osp.join(ld_tmpdir.name, f'chr_{c}'),
keep_snps=snps,
bounds=self.ld_boundaries[c])
)
def harmonize_data(self):
"""
This method ensures that all the data sources (reference genotype,
LD matrices, summary statistics) are aligned.
"""
if self.verbose:
print("> Harmonizing data...")
update_ld = False
sumstats_tables = self.to_snp_table(per_chromosome=True, col_subset=['SNP', 'A1', 'MAF', 'BETA', 'Z'])
for c, snps in self.snps.items():
# Harmonize SNPs in LD store and summary statistics/genotype matrix:
if self.ld is not None:
self.ld[c].set_mask(None)
ld_snps = self.ld[c].to_snp_table(col_subset=['SNP', 'A1'])
matched_snps = merge_snp_tables(ld_snps, sumstats_tables[c])
# If the SNP list doesn't align with the matched SNPs,
# then filter the SNP list
if len(snps) != len(matched_snps):
self.filter_snps(matched_snps['SNP'].values, chrom=c)
if len(matched_snps) != len(ld_snps):
# If the percentage of SNPs that will need to be excluded from the
# LD matrix exceeds 30% (and greater than 5000), then copy and update the matrix.
# Otherwise, introduce a mask that ensures those SNPs are excluded from
# downstream tasks.
#
# NOTE: This behavior is deprecated for now...
# We simply apply a mask to the LD matrix, and depending on the size
# unmasked elements, downstream tasks can decide whether or not to load
# the matrix to memory.
#
# To be revisited...
#n_miss = len(ld_snps) - len(matched_snps)
#if float(n_miss) / len(ld_snps) > .3 and n_miss > 5000:
# update_ld = True
#else:
remain_index = intersect_arrays(ld_snps['SNP'].values,
matched_snps['SNP'].values,
return_index=True)
mask = np.zeros(len(ld_snps))
mask[remain_index] = 1
self.ld[c].set_mask(mask.astype(bool))
flip_01 = matched_snps['flip'].values
num_flips = flip_01.sum()
if num_flips > 0:
print(f"> Detected {num_flips} SNPs with strand flipping. Correcting summary statistics...")
# Correct strand information:
self._a1[c] = matched_snps['A1'].values
# Correct MAF:
if self.maf is not None:
self.maf[c] = matched_snps['MAF'].values
# Correct BETA:
if self.beta_hats is not None:
self.beta_hats[c] = matched_snps['BETA'].values
# Correct Z-score:
if self.z_scores is not None:
self.z_scores[c] = matched_snps['Z'].values
if update_ld:
self.realign_ld()
def score_plink(self, betas=None):
"""
Perform linear scoring using PLINK2
:param betas:
"""
if betas is None:
if self.beta_hats is None:
raise Exception("Neither betas nor beta hats are provided or set."
" Please provide betas to perform prediction.")
else:
betas = {c: b for c, b in self.beta_hats.items()}
# Initialize the PGS object with zeros
# The construction here accounts for multiple betas per SNP
try:
betas_shape = betas[next(iter(betas))].shape[1]
if betas_shape == 1:
raise IndexError
score_col_nums = f"--score-col-nums 3-{3 + betas_shape - 1}"
except IndexError:
betas_shape = 1
for c, b in betas.items():
betas[c] = b.reshape(-1, 1)
score_col_nums = f"--score-col-nums 3"
pgs = np.zeros(shape=(self.N, betas_shape))
# Create a temporary directory for the score files:
score_tmpdir = tempfile.TemporaryDirectory(dir=self.temp_dir, prefix='score_')
self.cleanup_dir_list.append(score_tmpdir)
# Create the samples file:
keep_file = osp.join(score_tmpdir.name, 'samples.keep')
keep_table = self.to_individual_table()
keep_table.to_csv(keep_file, index=False, header=False, sep="\t")
for c, beta in tqdm(betas.items(), total=len(betas),
desc="Generating polygenic scores using PLINK",
disable=not self.verbose):
eff_file = osp.join(score_tmpdir.name, f'chr_{c}.txt')
df = pd.DataFrame({'SNP': self.snps[c], 'A1': self.alt_alleles[c]})
for i in range(betas_shape):
df['BETA' + str(i)] = betas[c][:, i]
df = df.loc[df[['BETA' + str(i) for i in range(betas_shape)]].sum(axis=1) != 0]
try:
df.to_csv(eff_file, index=False, sep="\t")
cmd = [
self.config.get('plink2_path'),
f"--bfile {self.bed_files[c].replace('.bed', '')}",
f"--keep {keep_file}",
f"--score {eff_file} 1 2 header-read cols=+scoresums variance-standardize",
score_col_nums,
f"--out {eff_file.replace('.txt', '')}",
f"--threads {self.n_threads}"
]
try:
run_shell_script(" ".join(cmd))
if not osp.isfile(eff_file.replace('.txt', '.sscore')):
raise FileNotFoundError
except Exception as e:
raise Exception("plink polygenic scoring failed to run!\nDeployed command:" +
" ".join(cmd))
dtypes = {'FID': str, 'IID': str}
for i in range(betas_shape):
dtypes.update({'PRS' + str(i): np.float64})
chr_pgs = pd.read_csv(eff_file.replace('.txt', '.sscore'), delim_whitespace=True,
names=['FID', 'IID'] + ['PRS' + str(i) for i in range(betas_shape)],
skiprows=1,
usecols=[0, 1] + [4 + betas_shape + i for i in range(betas_shape)],
dtype=dtypes)
chr_pgs = keep_table.astype({'FID': str, 'IID': str}).merge(chr_pgs)
pgs += chr_pgs[['PRS' + str(i) for i in range(betas_shape)]].values
except Exception as e:
raise e
if betas_shape == 1:
pgs = pgs.flatten()
return pgs
def score(self, betas=None):
if self.use_plink:
return self.score_plink(betas)
if betas is None:
if self.beta_hats is None:
raise Exception("Neither betas nor beta hats are provided or set."
" Please provide betas to perform prediction.")
else:
betas = {c: b for c, b in self.beta_hats.items()}
if not self.standardize_genotype and self.maf is None:
self.compute_allele_frequency()
try:
betas_shape = betas[next(iter(betas))].shape[1]
except IndexError:
betas_shape = 1
for c, b in betas.items():
betas[c] = b.reshape(-1, 1)
pgs = np.zeros(shape=(self.N, betas_shape))
for c, gt in tqdm(self.genotypes.items(), total=len(self.chromosomes),
desc="Generating polygenic scores",
disable=not self.verbose):
if self.standardize_genotype:
pgs += np.dot(standardize_genotype_matrix(gt).fillna(0.), betas[c])
else:
pgs += np.dot(gt.fillna(self.maf[c]), betas[c])
if betas_shape == 1:
pgs = pgs.flatten()
return pgs
def predict(self, betas=None):
pgs = self.score(betas)
if self.phenotype_likelihood == 'binomial':
# apply sigmoid function:
# TODO: Check this (maybe convert to probit?)
pgs = 1./(1. + np.exp(-pgs))
return pgs
def perform_gwas_plink(self):
"""
Perform GWAS using PLINK
"""
# Create a temporary directory for the gwas files:
gwas_tmpdir = tempfile.TemporaryDirectory(dir=self.temp_dir, prefix='gwas_')
self.cleanup_dir_list.append(gwas_tmpdir)
# Output the phenotype file:
phe_fname = osp.join(gwas_tmpdir.name, "pheno.txt")
phe_table = self.to_phenotype_table()
if self.phenotype_likelihood == 'binomial':
phe_table['phenotype'] += 1
phe_table.to_csv(phe_fname, sep="\t", index=False, header=False)
plink_reg_type = ['linear', 'logistic'][self.phenotype_likelihood == 'binomial']
self.n_per_snp = {}
self.maf = {}
self.beta_hats = {}
self.se = {}
self.z_scores = {}
self.p_values = {}
for c, bf in tqdm(self.bed_files.items(),
total=len(self.chromosomes),
desc="Performing GWAS using PLINK",
disable=not self.verbose):
# Output a keep file for SNPs:
snp_keepfile = osp.join(gwas_tmpdir.name, f"chr_{c}.keep")
pd.DataFrame({'SNP': self.snps[c]}).to_csv(snp_keepfile,
index=False, header=False)
plink_output = osp.join(gwas_tmpdir.name, f"chr_{c}")
cmd = [
self.config.get('plink2_path'),
f"--bfile {bf.replace('.bed', '')}",
f"--extract {snp_keepfile}",
f"--{plink_reg_type} hide-covar cols=chrom,pos,alt1,ref,a1freq,nobs,beta,se,tz,p",
f"--pheno {phe_fname}",
f"--out {plink_output}",
f"--threads {self.n_threads}"
]
if self.standardize_phenotype:
cmd.append('--variance-standardize')
run_shell_script(" ".join(cmd))
output_fname = plink_output + f".PHENO1.glm.{plink_reg_type}"
if not osp.isfile(output_fname):
if plink_reg_type == 'logistic' and osp.isfile(output_fname + ".hybrid"):
output_fname += ".hybrid"
else:
raise FileNotFoundError
res = pd.read_csv(output_fname, delim_whitespace=True)
res.rename(columns={
'#CHROM': 'CHR',
'ID': 'SNP',
'P': 'PVAL',
'OBS_CT': 'N',
'A1_FREQ': 'MAF'
}, inplace=True)
# TODO: Filter NaN values that may arise from PLINK.
# Merge to make sure that summary statistics are in order:
res = merge_snp_tables(pd.DataFrame({'SNP': self.snps[c], 'A1': self._a1[c]}), res)
if len(res) != len(self.snps[c]):
raise ValueError("Length of GWAS table does not match number of SNPs.")
self.n_per_snp[c] = res['N'].values
self.maf[c] = res['MAF'].values
self.beta_hats[c] = res['BETA'].values
self.se[c] = res['SE'].values
self.z_scores[c] = self.beta_hats[c] / self.se[c]
self.p_values[c] = res['PVAL'].values
def perform_gwas(self):
"""
Peform GWAS using closed form solutions.
(Only applicable to quantitative traits)
"""
if self.use_plink:
self.perform_gwas_plink()
else:
if self.phenotype_likelihood == 'binomial':
raise Exception("Software does not support GWAS with case/control phenotypes. Use plink instead.")
if self.n_per_snp is None:
self.compute_allele_frequency()
for c in tqdm(self.chromosomes, desc="Performing GWAS", disable=not self.verbose):
self.verbose = False
self.compute_beta_hats(chrom=c)
self.compute_standard_errors(chrom=c)
self.compute_z_scores(chrom=c)
self.compute_p_values(chrom=c)
self.verbose = True
def estimate_snp_heritability(self, per_chromosome=False):
"""
Provides an estimate of SNP heritability from summary statistics using
a simplified version of the LD Score Regression framework.
E[X_j^2] = h_g^2*l_j + int
Where the response is the Chi-Squared statistic for SNP j
and the variable is its LD score.
NOTE: For now, we constrain the slope to 1.
TODO: Maybe move into its own module?
:param per_chromosome: Estimate heritability per chromosome
"""
if self.ld is None or self.z_scores is None:
raise Exception("Estimating SNP heritability requires z-scores and LD matrices!")
chr_ldsc = {}
chr_xi_sq = {}
for c, ldm in tqdm(self.ld.items(),
total=len(self.chromosomes),
desc="Estimating SNP-heritability",
disable=not self.verbose):
chr_ldsc[c] = ldm.ld_score
chr_xi_sq[c] = self.z_scores[c]**2
if per_chromosome:
chr_h2g = {}
for c in chr_ldsc:
# h2g, int, _, _, _ = stats.linregress(chr_ldsc[c], chr_xi_sq[c])
# chr_h2g[c] = h2g
chr_h2g[c] = (chr_xi_sq[c].mean() - 1.)*len(chr_ldsc[c]) / (chr_ldsc[c].mean()*self.N)
return chr_h2g
else:
concat_ldsc = np.concatenate(list(chr_ldsc.values()))
concat_xi_sq = np.concatenate(list(chr_xi_sq.values()))
# h2g, int, _, _, _ = stats.linregress(concat_ldsc, concat_xi_sq)
return (concat_xi_sq.mean() - 1.)*len(concat_ldsc) / (concat_ldsc.mean()*self.N)
def compute_allele_frequency_plink(self):
# Create a temporary directory for the allele frequency files:
freq_tmpdir = tempfile.TemporaryDirectory(dir=self.temp_dir, prefix='freq_')
self.cleanup_dir_list.append(freq_tmpdir)
# Create the samples file:
keep_file = osp.join(freq_tmpdir.name, 'samples.keep')
keep_table = self.to_individual_table()
keep_table.to_csv(keep_file, index=False, header=False, sep="\t")
self.maf = {}
for c, bf in tqdm(self.bed_files.items(),
total=len(self.chromosomes),
desc="Computing allele frequencies using PLINK",
disable=not self.verbose):
snp_keepfile = osp.join(freq_tmpdir.name, f"chr_{c}.keep")
pd.DataFrame({'SNP': self.snps[c]}).to_csv(snp_keepfile,
index=False, header=False)
plink_output = osp.join(freq_tmpdir.name, f"chr_{c}")
cmd = [
self.config.get('plink2_path'),
f"--bfile {bf.replace('.bed', '')}",
f"--keep {keep_file}",
f"--extract {snp_keepfile}",
f"--freq",
f"--out {plink_output}",
f"--threads {self.n_threads}"
]
run_shell_script(" ".join(cmd))
freq_df = pd.read_csv(plink_output + ".afreq", delim_whitespace=True)
freq_df.rename(columns={'ID': 'SNP', 'ALT': 'A1', 'ALT_FREQS': 'MAF'}, inplace=True)
merged_df = merge_snp_tables(pd.DataFrame({'SNP': self.snps[c], 'A1': self._a1[c]}), freq_df)
if len(merged_df) != len(self.snps[c]):
raise ValueError("Length of allele frequency table does not match number of SNPs.")
self.maf[c] = merged_df['MAF'].values
return self.maf
def compute_allele_frequency(self):
if self.use_plink:
return self.compute_allele_frequency_plink()
if self.n_per_snp is None:
self.compute_n_per_snp()
self.maf = {}
for c, gt in tqdm(self.genotypes.items(),
total=len(self.chromosomes),
desc="Computing allele frequencies",
disable=not self.verbose):
self.maf[c] = (gt.sum(axis=0) / (2. * self.n_per_snp[c])).compute().values
return self.maf
def compute_allele_frequency_variance(self):
if self.maf is None:
self.compute_allele_frequency()
maf_var = {}
for c, maf in tqdm(self.maf.items(),
total=len(self.chromosomes),
desc="Computing allele frequency variance",
disable=not self.verbose):
maf_var[c] = 2.*maf*(1. - maf)
return maf_var
def compute_n_per_snp_plink(self):
# Create a temporary directory for missingness count:
miss_tmpdir = tempfile.TemporaryDirectory(dir=self.temp_dir, prefix='miss_')
self.cleanup_dir_list.append(miss_tmpdir)
# Create the samples file:
keep_file = osp.join(miss_tmpdir.name, 'samples.keep')
keep_table = self.to_individual_table()
keep_table.to_csv(keep_file, index=False, header=False, sep="\t")
self.n_per_snp = {}
for c, bf in tqdm(self.bed_files.items(),
total=len(self.chromosomes),
desc="Computing effective sample size per SNP using PLINK",
disable=not self.verbose):
snp_keepfile = osp.join(miss_tmpdir.name, f"chr_{c}.keep")
pd.DataFrame({'SNP': self.snps[c]}).to_csv(snp_keepfile,
index=False, header=False)
plink_output = osp.join(miss_tmpdir.name, f"chr_{c}")
cmd = [
self.config.get('plink2_path'),
f"--bfile {bf.replace('.bed', '')}",
f"--keep {keep_file}",
f"--extract {snp_keepfile}",
f"--missing variant-only",
f"--out {plink_output}",
f"--threads {self.n_threads}"
]
run_shell_script(" ".join(cmd))
miss_df = pd.read_csv(plink_output + ".vmiss", delim_whitespace=True)
miss_df = pd.DataFrame({'ID': self.snps[c]}).merge(miss_df)
if len(miss_df) != len(self.snps[c]):
raise ValueError("Length of missingness table does not match number of SNPs.")
self.n_per_snp[c] = (miss_df['OBS_CT'] - miss_df['MISSING_CT']).values
return self.n_per_snp
def compute_n_per_snp(self):
if self.use_plink:
return self.compute_n_per_snp_plink()
self.n_per_snp = {}
for c, gt in tqdm(self.genotypes.items(), total=len(self.chromosomes),
desc="Computing effective sample size per SNP",
disable=not self.verbose):
self.n_per_snp[c] = gt.shape[0] - gt.isnull().sum(axis=0).compute().values
return self.n_per_snp
def compute_snp_pseudo_corr(self):
"""
Computes the pseudo-correlation coefficient (standardized beta) between the SNP and
the phenotype (X_jTy / N) from GWAS summary statistics.
Uses Equation 15 in Mak et al. 2017
beta = z_j / sqrt(n - 1 + z_j^2)
Where z_j is the marginal GWAS Z-score
"""
if self.z_scores is None:
raise Exception("Z-scores are not set!")
if self.n_per_snp is None:
raise Exception("Sample size is not set!")
snp_corr = {}
for c, zsc in tqdm(self.z_scores.items(),
total=len(self.chromosomes),
desc="Computing SNP-wise correlations",
disable=not self.verbose):
# z_j / sqrt(n - 1 + z_j^2)
snp_corr[c] = zsc / (np.sqrt(self.n_per_snp[c] - 1 + zsc**2))
return snp_corr
def compute_yy_per_snp(self):
"""
Computes the quantity (y'y)_j/n_j following SBayesR (Lloyd-Jones 2019) and Yang et al. (2012).
(y'y)_j/n_j is the empirical variance for continuous phenotypes and may be estimated
from GWAS summary statistics by re-arranging the equation for the
squared standard error:
SE(b_j)^2 = (Var(y) - Var(x_j)*b_j^2) / (Var(x)*n)
Which gives the following estimate:
(y'y)_j / n_j = (n_j - 2)*SE(b_j)^2 + b_j^2
TODO: Verify the derivation and logic here, ensure it's consistent.
"""
if self.beta_hats is None:
raise Exception("Betas are not set!")
if self.n_per_snp is None:
raise Exception("Sample size is not set!")
if self.se is None:
raise Exception("Standard errors are not set!")
yy = {}
for c, b_hat in tqdm(self.beta_hats.items(),
total=len(self.chromosomes),
desc="Computing SNP-wise yTy",
disable=not self.verbose):
yy[c] = (self.n_per_snp[c] - 2)*self.se[c]**2 + b_hat**2
return yy
def compute_beta_hats(self, chrom=None):
if self.phenotypes is None or self.genotypes is None:
raise Exception("Genotype and phenotype data are needed to compute betas!")
if self.maf is None:
self.compute_allele_frequency()
if chrom is None:
self.beta_hats = {}
chroms = self.chromosomes
else:
if chrom not in self.chromosomes:
raise KeyError("Chromosome is not valid!")
if self.beta_hats is None:
self.beta_hats = {}
chroms = [chrom]
for c in tqdm(chroms, desc="Computing beta hats", disable=not self.verbose):
if self.standardize_genotype:
numer = np.dot(standardize_genotype_matrix(self.genotypes[c]).T, self.phenotypes)
denom = self.n_per_snp[c]
else:
numer = np.dot(self.genotypes[c].fillna(self.maf[c]).T, self.phenotypes)
denom = self.n_per_snp[c] * self.genotypes[c].var(axis=0).compute()
self.beta_hats[c] = numer / denom
return self.beta_hats
def compute_standard_errors(self, chrom=None):
if self.phenotypes is None or self.genotypes is None:
raise Exception("Genotype and phenotype data are needed to compute standard errors!")
if self.n_per_snp is None:
self.compute_n_per_snp()
if chrom is None:
self.se = {}
chroms = self.chromosomes
else:
if chrom not in self.chromosomes:
raise KeyError("Chromosome is not valid!")
if self.se is None:
self.se = {}
chroms = [chrom]
sigma_y = np.var(self.phenotypes) # phenotypic variance
for c in tqdm(chroms, desc="Computing standard errors", disable=not self.verbose):
if self.standardize_genotype:
xtx = self.n_per_snp[c]
else:
xtx = self.n_per_snp[c]*self.genotypes[c].var(axis=0).compute()
self.se[c] = np.sqrt(sigma_y/xtx)
return self.se
def compute_z_scores(self, chrom=None):
if self.beta_hats is None or self.se is None:
raise Exception("beta hats and standard errors are needed to compute z-scores!")
if chrom is None:
self.z_scores = {}
chroms = self.chromosomes
else:
if chrom not in self.chromosomes:
raise KeyError("Chromosome is not valid!")
if self.z_scores is None:
self.z_scores = {}
chroms = [chrom]
for c in tqdm(chroms, desc="Computing z-scores", disable=not self.verbose):
self.z_scores[c] = self.beta_hats[c] / self.se[c]
return self.z_scores
def compute_p_values(self, chrom=None, log10=False):
if self.z_scores is None:
raise Exception("Z-scores are needed to compute p-values!")
if chrom is None:
self.p_values = {}
chroms = self.chromosomes
else:
if chrom not in self.chromosomes:
raise KeyError("Chromosome is not valid!")
if self.p_values is None:
self.p_values = {}
chroms = [chrom]
for c in tqdm(chroms, desc="Computing p-values", disable=not self.verbose):
self.p_values[c] = 2.*stats.norm.sf(abs(self.z_scores[c]))
if log10:
self.p_values[c] = np.log10(self.p_values[c])
return self.p_values
def to_individual_table(self):
if self._iid is None:
raise Exception("Individual data is not provided!")
return pd.DataFrame({
'FID': self._fid,
'IID': self._iid
})
def to_phenotype_table(self):
if self.phenotypes is None:
print("Warning: Phenotypes are not set! Exporting NaNs")
pheno_df = self.to_individual_table()
pheno_df['phenotype'] = self.phenotypes
return pheno_df
def to_snp_table(self, per_chromosome=False, col_subset=None):
if col_subset is None:
col_subset = ['CHR', 'SNP', 'POS', 'A1', 'A2', 'MAF', 'N', 'BETA', 'Z', 'SE', 'PVAL']
snp_tables = {}
for c in self.chromosomes:
ss_df = | pd.DataFrame({'SNP': self.snps[c], 'A1': self.alt_alleles[c]}) | pandas.DataFrame |
import datetime as dt
import pandas as pd
# TODO: Unit tests
def compute_work_item_times(df: pd.DataFrame) -> pd.DataFrame:
"""
Takes a DataFrame with the ticket data and computes the start time, end time, duration and the
duration_in_hours.
:param df: As described above
:return: As described above
"""
# We can't be sure that NAs were already renamed. Do it again just to be sure.
df.from_phase.fillna('Start', inplace=True)
df.to_phase.fillna('End', inplace=True)
relevant_columns = ['work_item', 'timestamp']
start_times = df[df.from_phase == 'Start'][relevant_columns]
end_times = df[df.to_phase == 'End'][relevant_columns]
times = pd.merge(start_times, end_times, on='work_item', how='left')
times.rename(columns={'timestamp_x': 'start', 'timestamp_y': 'end'}, inplace=True)
times['duration'] = times['end'] - times['start']
times['duration_in_days'] = times['duration'].apply(lambda x: round(x.total_seconds() / (24*3600), 2))
return times
def split_times(times: pd.DataFrame, sep_date_str: str) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame):
"""
Splits the times DataFrame into three different DataFrames
- train: closed before the sep_date
- test: closed after the sep_date and started before the sep_date
- closed: tickets that are closed
- open: tickets that arent't closed yet
:param times: DataFrames containing the start and end times for each work items.
:param sep_date_str: Separation date as string in the format DD.MM.YYYY
:return: A tuple of (train, test, open) as defined above
"""
open_times = times[pd.isnull(times.duration)]
closed_times = times[~ | pd.isnull(times.duration) | pandas.isnull |
from typing import List, Tuple
import numpy as np
from nptyping import NDArray
from pandas import DataFrame
from scipy.stats import expon
from dlsys.model import DualSysyem
def expon_equally_spaced(mean_interval: float, _min: float,
n: int) -> NDArray[1, float]:
intervals = expon.ppf(
np.linspace(0.01, 0.99, n), scale=mean_interval, loc=_min) - _min
return np.random.choice(intervals, size=len(intervals), replace=False)
RowOfResult = Tuple[float, float] # gk, hk
def variable_interval_schedule(
agent: DualSysyem,
intervals: NDArray[1, float]) -> Tuple[List[int], DataFrame]:
# for yoked control
response_since_reward = 0
required_responses: List[int] = []
row_of_result: List[RowOfResult] = []
for interval in intervals:
response = False
while interval > 0 or not response:
p = agent.compute_response_probability() * STEP_SIZE
response = agent.emit_response(p)
interval -= STEP_SIZE
response_since_reward += response
if interval <= 0. and response:
rpe = agent.compute_prediction_error(1.)
required_responses.append(response_since_reward)
response_since_reward = 0
elif response:
rpe = agent.compute_prediction_error(0.)
else:
rpe = 0.
agent.update_hkt(rpe)
gk_hk = agent.step(STEP_SIZE)
if gk_hk is not None:
row_of_result.append(gk_hk)
result = DataFrame(row_of_result, columns=["gk", "hk"])
return required_responses, result
def variable_ratio_schedule(agent: DualSysyem,
required_responses: List[int]) -> DataFrame:
row_of_result: List[RowOfResult] = []
for required_response in required_responses:
while required_response > 0:
p = agent.compute_response_probability() * STEP_SIZE
response = agent.emit_response(p)
required_response -= response
if required_response <= 0 and response:
rpe = agent.compute_prediction_error(1.)
elif response:
rpe = agent.compute_prediction_error(0.)
else:
rpe = 0.
agent.update_hkt(rpe)
gk_hk = agent.step(STEP_SIZE)
if gk_hk is not None:
row_of_result.append(gk_hk)
result = | DataFrame(row_of_result, columns=["gk", "hk"]) | pandas.DataFrame |
# SPDX-License-Identifier: Apache-2.0
import unittest
import numbers
from distutils.version import StrictVersion
import numpy as np
from numpy.testing import assert_almost_equal
import pandas
from onnxruntime import InferenceSession
from sklearn.datasets import load_iris
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Normalizer
try:
from sklearn.ensemble import StackingClassifier
except ImportError:
# New in 0.22
StackingClassifier = None
from skl2onnx import update_registered_converter, convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
from skl2onnx.common.shape_calculator import (
calculate_linear_classifier_output_shapes, # noqa
calculate_linear_regressor_output_shapes)
from skl2onnx._parse import _parse_sklearn_classifier
from xgboost import XGBRegressor, XGBClassifier
import onnxmltools
from onnxmltools.convert.xgboost.operator_converters.XGBoost import (
convert_xgboost # noqa
)
try:
from test_utils import dump_single_regression
except ImportError:
import os
import sys
sys.path.append(
os.path.join(
os.path.dirname(__file__), "..", "tests"))
from test_utils import dump_single_regression
from test_utils import (
dump_multiple_classification, TARGET_OPSET, TARGET_OPSET_ML)
class TestXGBoostModels(unittest.TestCase):
@classmethod
def setUpClass(self):
def custom_parser(scope, model, inputs, custom_parsers=None):
if custom_parsers is not None and model in custom_parsers:
return custom_parsers[model](
scope, model, inputs, custom_parsers=custom_parsers)
if not all(isinstance(i, (numbers.Real, bool, np.bool_))
for i in model.classes_):
raise NotImplementedError(
"Current converter does not support string labels.")
return _parse_sklearn_classifier(scope, model, inputs)
update_registered_converter(
XGBClassifier, 'XGBClassifier',
calculate_linear_classifier_output_shapes,
convert_xgboost, parser=custom_parser,
options={'zipmap': [True, False, 'columns'],
'nocl': [True, False]})
update_registered_converter(
XGBRegressor, 'XGBRegressor',
calculate_linear_regressor_output_shapes,
convert_xgboost)
@unittest.skipIf(
StrictVersion(onnxmltools.__version__) < StrictVersion('1.11'),
reason="converter for xgboost is too old")
def test_xgb_regressor(self):
iris = load_iris()
X = iris.data[:, :2]
y = iris.target
xgb = XGBRegressor()
xgb.fit(X, y)
conv_model = convert_sklearn(
xgb,
initial_types=[
('input', FloatTensorType(shape=[None, X.shape[1]]))],
target_opset={'': TARGET_OPSET, 'ai.onnx.ml': TARGET_OPSET_ML})
self.assertTrue(conv_model is not None)
dump_single_regression(xgb, suffix="-Dec4")
def test_xgb_classifier(self):
xgb = XGBClassifier(n_estimators=2, max_depth=2)
iris = load_iris()
X = iris.data[:, :2]
y = iris.target
y[y == 2] = 0
xgb.fit(X, y)
conv_model = convert_sklearn(
xgb, initial_types=[
('input', FloatTensorType(shape=[None, X.shape[1]]))],
options={id(xgb): {'zipmap': False}},
target_opset={'': TARGET_OPSET, 'ai.onnx.ml': TARGET_OPSET_ML})
sess = InferenceSession(conv_model.SerializeToString())
res = sess.run(None, {'input': X.astype(np.float32)})
assert_almost_equal(xgb.predict_proba(X), res[1])
assert_almost_equal(xgb.predict(X), res[0])
@unittest.skipIf(
StrictVersion(onnxmltools.__version__) < StrictVersion('1.11'),
reason="converter for xgboost is too old")
def test_xgb_classifier_multi(self):
iris = load_iris()
X = iris.data[:, :2]
y = iris.target
xgb = XGBClassifier()
xgb.fit(X, y)
conv_model = convert_sklearn(
xgb,
initial_types=[
('input', FloatTensorType(shape=[None, X.shape[1]]))],
target_opset={'': TARGET_OPSET, 'ai.onnx.ml': TARGET_OPSET_ML})
self.assertTrue(conv_model is not None)
dump_multiple_classification(xgb)
@unittest.skipIf(
StrictVersion(onnxmltools.__version__) < StrictVersion('1.11'),
reason="converter for xgboost is too old")
def test_xgb_classifier_multi_reglog(self):
iris = load_iris()
X = iris.data[:, :2]
y = iris.target
xgb = XGBClassifier(objective='reg:logistic')
xgb.fit(X, y)
conv_model = convert_sklearn(
xgb, initial_types=[
('input', FloatTensorType(shape=[None, X.shape[1]]))],
target_opset={'': TARGET_OPSET, 'ai.onnx.ml': TARGET_OPSET_ML})
self.assertTrue(conv_model is not None)
dump_multiple_classification(xgb, suffix="RegLog")
def test_xgb_classifier_reglog(self):
iris = load_iris()
X = iris.data[:, :2]
y = iris.target
y[y == 2] = 0
xgb = XGBClassifier(objective='binary:logistic')
xgb.fit(X, y)
conv_model = convert_sklearn(
xgb, initial_types=[
('input', FloatTensorType(shape=[None, X.shape[1]]))],
options={id(xgb): {'zipmap': False}},
target_opset={'': TARGET_OPSET, 'ai.onnx.ml': TARGET_OPSET_ML})
self.assertTrue(conv_model is not None)
sess = InferenceSession(conv_model.SerializeToString())
res = sess.run(None, {'input': X.astype(np.float32)})
assert_almost_equal(xgb.predict_proba(X), res[1])
assert_almost_equal(xgb.predict(X), res[0])
@unittest.skipIf(StackingClassifier is None,
reason="new in 0.22")
def test_model_stacking_classifier_column_transformer(self):
classifiers = {
'A': XGBClassifier(n_estimators=5, random_state=42),
'B': XGBClassifier(n_estimators=5, random_state=42)
}
model_to_test = Pipeline(steps=[
('cbe', ColumnTransformer([
("norm1", Normalizer(norm='l1'), [0, 1]),
("norm2", Normalizer(norm='l2'), [2, 3])])),
('sc', StackingClassifier(
estimators=list(map(tuple, classifiers.items())),
stack_method='predict_proba',
passthrough=False
))
])
iris = load_iris()
X = iris.data.astype(np.float32)
y = (iris.target == 0).astype(np.int32)
model_to_test.fit(X, y)
model_onnx = convert_sklearn(
model_to_test, "stacking classifier",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset={'': TARGET_OPSET, 'ai.onnx.ml': TARGET_OPSET_ML},
options={'zipmap': False})
sess = InferenceSession(model_onnx.SerializeToString())
res = sess.run(None, {'input': X.astype(np.float32)})
assert_almost_equal(model_to_test.predict_proba(X), res[1])
assert_almost_equal(model_to_test.predict(X), res[0])
@unittest.skipIf(StackingClassifier is None,
reason="new in 0.22")
def test_model_stacking_classifier_column_transformer_custom(self):
classifiers = {
'A': XGBClassifier(n_estimators=5, random_state=42),
'B': XGBClassifier(n_estimators=5, random_state=42)
}
model_to_test = Pipeline(steps=[
('cbe', ColumnTransformer([
("norm1", Normalizer(norm='l1'), [0, 1]),
("norm2", Normalizer(norm='l2'), [2, 3])])),
('sc', StackingClassifier(
estimators=list(map(tuple, classifiers.items())),
stack_method='predict_proba',
passthrough=False
))
])
iris = load_iris()
X = iris.data.astype(np.float32)
df = | pandas.DataFrame(X) | pandas.DataFrame |
import streamlit as st
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
import plotly.express as px
#in this one I'm letting people see all of the items for a portal. So they pic that, the data is filtered
#and then you get a chart with all of the items
def comparebar():
# Add histogram data
df = | pd.read_csv("https://raw.githubusercontent.com/tyrin/info-topo-dash/master/data/freshdata.csv") | pandas.read_csv |
import os
from typing import List, Dict, Callable, Tuple
import pandas as pd
from flowpipe import Graph, INode, Node, InputPlug, OutputPlug
from insurance_claims.record_types import *
# let's invent some kind of overhead that goes into processing the claim
CLAIM_VALUE_PROCESSING_OVERHEAD_RATE = 0.05
# threshold to decide if claim is high or low value
HIGH_VALUE_CLAIM_THRESHOLD = 60000
# claims below this value are considered simple
SIMPLE_CLAIM_VALUE_THRESHOLD = 5000
# in reality sometimes the claims will be paid in full, and sometimes partially or not at all
# to average this out let's just always pay out a certain partial amount
# we assume simple claims will be paid out more often
SIMPLE_CLAIMS_PAYOUT_RATE = 0.8
COMPLEX_CLAIMS_PAYOUT_RATE = 0.6
class Stream(INode):
def __init__(self, **kwargs):
super(Stream, self).__init__(**kwargs)
self.data = []
def add_data(self, new_data: List, key: Callable=None) -> None:
if key is None:
self.data.extend(new_data)
return
data_as_dict = {key(x):x for x in self.data}
for record in new_data:
# this may sometimes override existing records
# but that's intentional as we only want one record per key
data_as_dict[key(record)] = record
self.data = list(data_as_dict.values())
def get_data(self, drop=False):
data_to_return = self.data[:]
if drop:
self.data = []
return data_to_return
############ input streams ##############
class NewClaimsStream(Stream):
def __init__(self, **kwargs):
super(NewClaimsStream, self).__init__(**kwargs)
OutputPlug('new_claims', self)
def compute(self) -> Dict:
return {'new_claims': self.data}
############ inner streams ##############
class ClaimValueStream(Stream):
def __init__(self, **kwargs):
super(ClaimValueStream, self).__init__(**kwargs)
InputPlug('claim_values', self)
OutputPlug('claim_values', self)
def compute(self, claim_values: List[ClaimValue]) -> Dict:
self.add_data(claim_values, lambda x: x.claim_id)
return {'claim_values': self.data}
class HighValueClaimsStream(Stream):
def __init__(self, **kwargs):
super(HighValueClaimsStream, self).__init__(**kwargs)
InputPlug('high_value_claims', self)
OutputPlug('high_value_claims', self)
def compute(self, high_value_claims: List[Dict]) -> Dict:
self.add_data(high_value_claims, lambda x: x["claim_id"])
return {'high_value_claims': self.data}
class LowValueClaimsStream(Stream):
def __init__(self, **kwargs):
super(LowValueClaimsStream, self).__init__(**kwargs)
InputPlug('low_value_claims', self)
OutputPlug('low_value_claims', self)
def compute(self, low_value_claims: List[Dict]) -> Dict:
self.add_data(low_value_claims, lambda x: x["claim_id"])
return {'low_value_claims': self.data}
class SimpleClaimsStream(Stream):
def __init__(self, **kwargs):
super(SimpleClaimsStream, self).__init__(**kwargs)
InputPlug('simple_claims', self)
OutputPlug('simple_claims', self)
def compute(self, simple_claims: List[Dict]) -> Dict:
self.add_data(simple_claims, lambda x: x["claim_id"])
return {'simple_claims': self.data}
class ComplexClaimsStream(Stream):
def __init__(self, **kwargs):
super(ComplexClaimsStream, self).__init__(**kwargs)
InputPlug('high_value_claims', self)
InputPlug('complex_claims', self)
OutputPlug('complex_claims', self)
def compute(self, high_value_claims: List[Dict], complex_claims: List[Dict]) -> Dict:
self.add_data(high_value_claims, lambda x: x["claim_id"])
self.add_data(complex_claims, lambda x: x["claim_id"])
return {'complex_claims': self.data}
############ output streams ##############
class ClaimPayoutStream(Stream):
def __init__(self, **kwargs):
super(ClaimPayoutStream, self).__init__(**kwargs)
InputPlug('simple_claim_payouts', self)
InputPlug('complex_claim_payouts', self)
OutputPlug('claim_payouts', self)
def compute(self, simple_claim_payouts: List[ClaimPayout], complex_claim_payouts: List[ClaimPayout]) -> Dict:
self.add_data(simple_claim_payouts, lambda x: x.claim_id)
self.add_data(complex_claim_payouts, lambda x: x.claim_id)
return {'claim_payouts': self.data}
############ processing nodes ##############
class CalculateClaimValue(INode):
def __init__(self, **kwargs):
super(CalculateClaimValue, self).__init__(**kwargs)
InputPlug('claims', self)
OutputPlug('claim_values', self)
def compute(self, claims: List[Dict]) -> Dict:
# claim value itself plus processing overhead
calc_total_claim_value = lambda v: (1.0 + CLAIM_VALUE_PROCESSING_OVERHEAD_RATE) * v
claim_values = [ClaimValue(claim_id=c["claim_id"], value=calc_total_claim_value(c["total_claim_amount"])) for c in claims]
return {'claim_values': claim_values}
class ClassifyClaimValue(INode):
def __init__(self, **kwargs):
super(ClassifyClaimValue, self).__init__(**kwargs)
InputPlug('claims', self)
InputPlug('claim_values', self)
OutputPlug('high_value_claims', self)
OutputPlug('low_value_claims', self)
def compute(self, claims: List[Dict], claim_values: List[ClaimValue]) -> Dict:
# these loops are twice as slow as they should be
# because this filtering can be done in one iteration
# but we won't be running crazy lots of data, so clarity first is ok
# also this can be done with filter(), but i like generator syntax more
high_value_claim_ids = [cv.claim_id for cv in claim_values if cv.value >= HIGH_VALUE_CLAIM_THRESHOLD]
low_value_claim_ids = [cv.claim_id for cv in claim_values if cv.value < HIGH_VALUE_CLAIM_THRESHOLD]
high_value_claims = [c for c in claims if c["claim_id"] in high_value_claim_ids]
low_value_claims = [c for c in claims if c["claim_id"] in low_value_claim_ids]
return {'high_value_claims': high_value_claims, 'low_value_claims': low_value_claims}
class ClassifyClaimComplexity(INode):
def __init__(self, **kwargs):
super(ClassifyClaimComplexity, self).__init__(**kwargs)
InputPlug('claims', self)
OutputPlug('simple_claims', self)
OutputPlug('complex_claims', self)
def compute(self, claims: List[Dict]) -> Dict:
# just some almost random logic here
def is_claim_complex(claim):
if claim["total_claim_amount"] <= SIMPLE_CLAIM_VALUE_THRESHOLD:
# small claims are never complex
return False
if claim["auto_year"] < 2000:
# old cars yield complex cases
return True
if claim["witnesses"] == 0 and claim["police_report_available"] != "YES":
# no objective evidence of incident cause
return True
return False
simple_claims = [c for c in claims if not is_claim_complex(c)]
complex_claims = [c for c in claims if is_claim_complex(c)]
return {'simple_claims': simple_claims, 'complex_claims': complex_claims}
class CalculateSimpleClaimsPayout(INode):
def __init__(self, **kwargs):
super(CalculateSimpleClaimsPayout, self).__init__(**kwargs)
InputPlug('simple_claims', self)
OutputPlug('simple_claim_payouts', self)
def compute(self, simple_claims: List[Dict]) -> Dict:
simple_claim_payouts = [ClaimPayout(claim_id=c["claim_id"], payout=SIMPLE_CLAIMS_PAYOUT_RATE * c["total_claim_amount"])
for c in simple_claims]
return {'simple_claim_payouts': simple_claim_payouts}
class CalculateComplexClaimsPayout(INode):
def __init__(self, **kwargs):
super(CalculateComplexClaimsPayout, self).__init__(**kwargs)
InputPlug('complex_claims', self)
OutputPlug('complex_claim_payouts', self)
def compute(self, complex_claims: List[Dict]) -> Dict:
complex_claim_payouts = [ClaimPayout(claim_id=c["claim_id"], payout=COMPLEX_CLAIMS_PAYOUT_RATE * c["total_claim_amount"])
for c in complex_claims]
return {'complex_claim_payouts': complex_claim_payouts}
class App():
def __init__(self):
self._build()
def evaluate(self, save_dataset=False):
self.graph.evaluate()
if save_dataset:
self._save_dataset()
return self.get_outputs()
def add_data(self, new_claims):
self.new_claims_stream.add_data(new_claims, key=lambda x: x["claim_id"])
def get_outputs(self):
return self.claim_payouts_stream.get_data()
def _build(self) -> Graph:
graph = Graph(name='InsuraceClaims')
# input streams
self.new_claims_stream = NewClaimsStream(graph=graph)
# inner streams
claim_values_stream = ClaimValueStream(graph=graph)
high_value_claims_stream = HighValueClaimsStream(graph=graph)
low_value_claims_stream = LowValueClaimsStream(graph=graph)
simple_claims_stream = SimpleClaimsStream(graph=graph)
complex_claims_stream = ComplexClaimsStream(graph=graph)
# output streams
self.claim_payouts_stream = ClaimPayoutStream(graph=graph)
# processing nodes
calculate_claim_value = CalculateClaimValue(graph=graph)
classify_claim_value = ClassifyClaimValue(graph=graph)
classify_claim_complexity = ClassifyClaimComplexity(graph=graph)
calculate_simple_claim_payout = CalculateSimpleClaimsPayout(graph=graph)
calculate_complex_claim_payout = CalculateComplexClaimsPayout(graph=graph)
# wiring graph components
self.new_claims_stream.outputs["new_claims"] >> calculate_claim_value.inputs["claims"]
calculate_claim_value.outputs["claim_values"] >> claim_values_stream.inputs["claim_values"]
self.new_claims_stream.outputs["new_claims"] >> classify_claim_value.inputs["claims"]
claim_values_stream.outputs["claim_values"] >> classify_claim_value.inputs["claim_values"]
classify_claim_value.outputs["low_value_claims"] >> low_value_claims_stream.inputs["low_value_claims"]
classify_claim_value.outputs["high_value_claims"] >> high_value_claims_stream.inputs["high_value_claims"]
high_value_claims_stream.outputs["high_value_claims"] >> complex_claims_stream.inputs["high_value_claims"]
low_value_claims_stream.outputs["low_value_claims"] >> classify_claim_complexity.inputs["claims"]
classify_claim_complexity.outputs["simple_claims"] >> simple_claims_stream.inputs["simple_claims"]
classify_claim_complexity.outputs["complex_claims"] >> complex_claims_stream.inputs["complex_claims"]
simple_claims_stream.outputs["simple_claims"] >> calculate_simple_claim_payout.inputs["simple_claims"]
complex_claims_stream.outputs["complex_claims"] >> calculate_complex_claim_payout.inputs["complex_claims"]
calculate_simple_claim_payout.outputs["simple_claim_payouts"] >> self.claim_payouts_stream.inputs["simple_claim_payouts"]
calculate_complex_claim_payout.outputs["complex_claim_payouts"] >> self.claim_payouts_stream.inputs["complex_claim_payouts"]
self.graph = graph
def _save_dataset(self):
nodes_to_collect = ["", "ComplexClaimsStream", "SimpleClaimsStream"]
get_stream_node_data = \
lambda node_name: (next(node for node in self.graph.all_nodes if node.name == node_name)).get_data()
new_claims = get_stream_node_data("NewClaimsStream")
complex_claims = get_stream_node_data("ComplexClaimsStream")
complex_claim_ids = [c["claim_id"] for c in complex_claims]
simple_claims = get_stream_node_data("SimpleClaimsStream")
simple_claim_ids = [c["claim_id"] for c in simple_claims]
df = | pd.DataFrame.from_records(new_claims) | pandas.DataFrame.from_records |
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
from scipy import stats as sps
from scipy.interpolate import interp1d
from matplotlib import pyplot as plt
from matplotlib.dates import date2num, num2date
from matplotlib import dates as mdates
from matplotlib import ticker
from matplotlib.colors import ListedColormap
from matplotlib.patches import Patch
from PIL import Image
def other_charts(st, casos_panama):
st.title('Numero de Casos vs Pruebas Realizadas')
casos_pty = casos_panama.copy()
casos_pty['pctg'] = round(casos_pty.positivity_pctg*100, 1)
casos_pty['month'] = | pd.to_datetime(casos_pty['date'], format='%Y-%m-%d') | pandas.to_datetime |
from csv2clean import *
from fuzzywuzzy import fuzz
from tqdm import tqdm
import pandas as pd
import pickle
import spacy
nlp = spacy.load("fr_core_news_lg")
#file_dir='../../data/Catalogue.csv'
stop_loc=['Région', 'Métropole', 'Region', 'Metropole','Mer', 'mer', 'Département', 'DEPARTEMENT', 'Agglomération', 'agglomération','Communauté', 'communauté']
from joblib import Parallel, delayed
id_table=list(np.load('../../data/id_table.npy', allow_pickle=True))
###########
departements=pd.read_csv('../../data/departement2019.csv')
communes= | pd.read_csv('../../data/communes-01012019.csv') | pandas.read_csv |
import abc
import math
import numpy as np
import pandas as pd
import tensorflow as tf
from dataclasses import dataclass
from pathlib import Path
try:
from emnist import extract_samples
except ModuleNotFoundError:
pass
from sklearn.model_selection import train_test_split
from sklearn.base import TransformerMixin
from sklearn.preprocessing import MinMaxScaler
from scipy.io import arff
from typing import List, Callable, Union, Tuple
from libs.DataTypes import AutoencoderLayers
from utils import BASE_PATH
@dataclass
class DataLabels:
"""
Class storing test/train data
"""
# We'll put everything in the train data if no test data was given and split later
x_train: np.ndarray # Train data
y_train: np.ndarray
x_test: np.ndarray = None # Test data
y_test: np.ndarray = None
x_val: np.ndarray = None # Validation data
y_val: np.ndarray = None
# If needed: a scaler
scaler: TransformerMixin = None
# Configuration
test_split: float = .2 # Test data percentage
val_split: float = .05 # Validation data percentage
random_state: int = None # Random seed
# Metadata
shape: tuple = None # Shape of the data
available_classes: Union[List[int], List[str]] = None # all available classes
## Class methods
def __repr__(self):
return self.__class__.__name__
## Retrievers
def get_target_autoencoder_data(
self, data_split: str,
drop_classes: Union[List[int], List[str]] = None, include_classes: Union[List[int], List[str]] = None
) -> Tuple[np.ndarray, np.ndarray]:
"""
Get data for useful for autoencoders
:param data_split: get data of either "train", "val" or "test"
:param drop_classes: which classes to drop, drop none if None
:param include_classes: which classes to include (has priority over drop_classes)
:return: features and labels
"""
# Get data
this_data = self._get_data_set(data_split=data_split)
# Drop the classes
if include_classes:
drop_classes = self.include_to_drop(include_classes)
this_x = np.delete(this_data[0], np.where(np.isin(this_data[1], drop_classes)), axis=0)
# For the autoencoder, we don't need much else than x
return this_x, this_x
def get_target_classifier_data(
self, data_split: str,
drop_classes: Union[List[int], List[str]] = None, include_classes: Union[List[int], List[str]] = None
) -> Tuple[np.ndarray, np.ndarray]:
"""
Get data for useful for classifiers
:param data_split: get data of either "train", "val" or "test"
:param drop_classes: which classes to drop, drop none if None
:param include_classes: which classes to include (has priority over drop_classes)
:return: features and labels
"""
# Get data
this_data = self._get_data_set(data_split=data_split)
# Drop the classes
if include_classes:
drop_classes = self.include_to_drop(include_classes)
this_x = np.delete(this_data[0], np.where(np.isin(this_data[1], drop_classes)), axis=0)
this_y = np.delete(this_data[1], np.where(np.isin(this_data[1], drop_classes)), axis=0)
# Return the data
return this_x, this_y
def get_alarm_data(
self, data_split: str, anomaly_classes: Union[List[int], List[str]], drop_classes: List[int] = None,
include_classes: List[int] = None,
n_anomaly_samples: int = None
) -> Tuple[np.ndarray, np.ndarray]:
"""
Get the labels for the alarm network, i.e. with binary anomaly labels
:param data_split: get data of either "train", "val" or "test"
:param anomaly_classes: classes marked as anomaly
:param drop_classes: which classes to drop (none if None)
:param include_classes: which classes to include (has priority over drop_classes)
:param n_anomaly_samples: reduce the number of anomaly samples
:return: features and labels
"""
# Get data
this_data = self._get_data_set(data_split=data_split)
# Drop the classes
if include_classes:
drop_classes = self.include_to_drop(include_classes)
this_x = np.delete(this_data[0], np.where(np.isin(this_data[1], drop_classes)), axis=0)
this_y = np.delete(this_data[1], np.where(np.isin(this_data[1], drop_classes)), axis=0)
# Make labels binary
this_y[np.where(~np.isin(this_y, anomaly_classes))] = -1
this_y[np.where(np.isin(this_y, anomaly_classes))] = 0
this_y += 1
this_y = this_y.astype("uint8")
# If desired, reduce the number anomalous samples
if n_anomaly_samples is not None:
# IDs of all anomaly samples
idx_anom = np.where(this_y == 1)[0]
# Select the indices to delete
n_delete = len(idx_anom) - n_anomaly_samples
idx_delete = np.random.choice(idx_anom, size=n_delete, replace=False)
# Delete indices
this_x = np.delete(this_x, idx_delete, axis=0)
this_y = np.delete(this_y, idx_delete, axis=0)
# Check if we really have the right amount of anomaly samples
assert np.sum(this_y) == n_anomaly_samples
return this_x, this_y
## Preprocessors
@abc.abstractmethod
def _preprocess(self):
# Preprocessing steps, e.g. data normalisation
raise NotImplementedError("Implement in subclass")
def __post_init__(self):
"""
Process the data
:return:
"""
# Fix randomness
np.random.seed(seed=self.random_state)
# Get all available classes
# TODO: we're only looking at the training data so far
self.available_classes = np.unique(self.y_train).tolist()
# Split in test and train
if self.x_test is None:
self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(
self.x_train, self.y_train, test_size=self.test_split, random_state=self.random_state
)
# Split in train and validation
if self.x_val is None:
self.x_train, self.x_val, self.y_train, self.y_val = train_test_split(
self.x_train, self.y_train, test_size=self.val_split, random_state=self.random_state
)
# Preprocess
self._preprocess()
# Note down the shape
self.shape = self.x_train.shape[1:]
## Helpers
def include_to_drop(self, include_data: Union[List[int], List[str]]) -> Union[List[int], List[str]]:
"""
Convert a list of classes to include to a list of classes to drop
:param include_data: classes to include
:param all_classes: available classes
:return: classes to drop
"""
drop_classes = set(self.available_classes) - set(include_data)
return list(drop_classes)
def _get_data_set(self, data_split: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Get the right data split
:param data_split: train, val or test data?
:return: the right data set
"""
if data_split == "train":
return self.x_train.copy(), self.y_train.copy()
elif data_split == "test":
return self.x_test.copy(), self.y_test.copy()
elif data_split == "val":
return self.x_val.copy(), self.y_val.copy()
else:
raise ValueError("The requested data must be of either train, val or test set.")
@staticmethod
def _ae_feature_selector(selected_layers: List[AutoencoderLayers], n_hidden: int) -> List[int]:
"""
Index of features based on their name representation for symmetric autoencoders
:param selected_layers: list of names for the desired layers
:param n_hidden: number of hidden states
:return: list of indices where to find the desired layers
"""
# If nothing was specified, we'll assume that all features are meant
if not selected_layers:
return list(range(n_hidden))
# If already numbers were given, use them
if isinstance(selected_layers[0], int):
return selected_layers
# 0-indexed list are used
n_hidden -= 1
# We assume symmetric autoencoders, such that the code is in the middle
i_code = math.floor(n_hidden / 2)
# Life is easier with a translation dictionary
trans_dict = {
AutoencoderLayers.OUTPUT: [n_hidden],
AutoencoderLayers.CODE: [i_code],
AutoencoderLayers.ENCODER: list(range(i_code)),
AutoencoderLayers.DECODER: list(range(i_code + 1, n_hidden)),
}
# We'll replace the selected lists by their index values a concatenate them
index_list = [trans_dict[cur_el] for cur_el in selected_layers]
index_list = [cur_el for cur_list in index_list for cur_el in cur_list]
return sorted(index_list)
def scikit_scale(self, scikit_scaler: Callable[[], TransformerMixin] = MinMaxScaler):
"""
Apply a scikit scaler to the data, e.g. MinMaxScaler transform data to [0,1]
:return:
"""
# Fit scaler to train set
self.scaler = scikit_scaler()
self.x_train = self.scaler.fit_transform(self.x_train)
# Scale the rest
self.x_val = self.scaler.transform(self.x_val)
self.x_test = self.scaler.transform(self.x_test)
pass
class MNIST(DataLabels):
def __init__(self, enrich_mnist_by=None, enrich_test_by=None, *args, **kwargs):
"""
Load the MNIST data set
"""
# Simply load the data with the kind help of Keras
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Add channel dimension to the data
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
# If desired, add new samples from EMNIST to MNIST
if enrich_mnist_by:
# load both, train and test data set from EMNIST
emnist_x_train, emnist_y_train = extract_samples('letters', 'train')
emnist_x_test, emnist_y_test = extract_samples('letters', 'test')
# Add channel dimension to emnist data
emnist_x_train = np.expand_dims(emnist_x_train, -1)
emnist_x_test = np.expand_dims(emnist_x_test, -1)
# choose the desired letters from emnist and translate numerical lables to letters
idx_train = []
idx_test = []
enrich_mnist_by = [i-9 for i in enrich_mnist_by]
for i in range(len(enrich_mnist_by)):
# get locations/indices of desired letters
idx_train.append(np.where(emnist_y_train == list(enrich_mnist_by)[i]))
idx_test.append(np.where(emnist_y_test == list(enrich_mnist_by)[i]))
idx_train = np.asarray(idx_train).flatten()
emnist_x_train = emnist_x_train[idx_train]
emnist_y_train = emnist_y_train[idx_train]+9
idx_test = np.asarray(idx_test).flatten()
emnist_x_test = emnist_x_test[idx_test]
emnist_y_test = emnist_y_test[idx_test]+9
# concatenate mnist train set and emnist train dataset
y_train = np.append(y_train, emnist_y_train)
x_train = np.concatenate((x_train, emnist_x_train), axis=0)
# concatenate mnist test set and emnist test dataset
y_test = np.append(y_test, emnist_y_test)
x_test = np.concatenate((x_test, emnist_x_test), axis=0)
super(MNIST, self).__init__(
x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test, *args, **kwargs
)
def _preprocess(self):
"""
For MNIST, we can scale everything by just dividing by 255
:return:
"""
self.x_train = self.x_train / 255.
self.x_test = self.x_test / 255.
self.x_val = self.x_val / 255.
class EMNIST(DataLabels):
def __init__(self, anom_list, *args, **kwargs):
"""
Load the MNIST data set
"""
# load MNIST letters using emnist package
data, labels = extract_samples('letters', 'train')
# Add channel dimension to the data
data = np.expand_dims(data, -1)
# take anom_list as anomalies and delete other values and map to one value
idx = np.where((labels >= anom_list[0]) & (labels <= anom_list[len(anom_list) - 1]))
data = data[idx]
labels = labels[idx]
labels.fill(10)
# load mnist digit dataset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Add channel dimension to the data
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
# concatenate mnist and emnist dataset
dat = np.concatenate((data, x_train, x_test), axis=0)
label = np.concatenate((labels, y_train, y_test), axis=0)
super(EMNIST, self).__init__(x_train=dat, y_train=label, *args,
**kwargs)
def _preprocess(self):
"""
For MNIST, we can scale everything by just dividing by 255
:return:
"""
self.x_train = self.x_train / 255.
self.x_test = self.x_test / 255.
self.x_val = self.x_val / 255.
class CreditCard(DataLabels):
def __init__(
self, data_path: Path = (BASE_PATH / "data" / "creditcard" / "creditcard").with_suffix(".csv"),
*args, **kwargs
):
"""
Load the CreditCard data set (https://www.kaggle.com/mlg-ulb/creditcardfraud)
:param data_path: absolute path to the CreditCard csv
"""
data = pd.read_csv(data_path)
# Time axis does not directly add information (although frequency might be a feature)
data = data.drop(['Time'], axis=1)
# Column class has the anomaly values, the rest is data
x_train = data.drop(['Class'], axis=1)
y_train = data.loc[:, ['Class']]
# We don't need the overhead of pandas here
x_train = x_train.to_numpy()
y_train = y_train.to_numpy()
# TODO: why is this even in here?
# for i in range(len(y_train)):
# y_train[i, 0] = y_train[i, 0].replace("\'", "")
super(CreditCard, self).__init__(
x_train=x_train, y_train=y_train, *args, **kwargs
)
def _preprocess(self):
"""
Standardscale the data
:return:
"""
self.y_test = self.y_test.astype(np.int)
self.y_train = self.y_train.astype(np.int)
self.y_val = self.y_val.astype(np.int)
self.scikit_scale()
def _drop_class(self):
"""
Drop frauds (Class==1)
"""
# Delete from training data if we train the autoencoder
if not self.is_alarm:
self.x_train = np.delete(self.x_train, np.where(self.y_train == self.drop_num), axis=0)
self.y_train = np.delete(self.y_train, np.where(self.y_train == self.drop_num), axis=0)
# We should also drop it from the validation data, such that we only optimise on reconstruction valid data
self.x_val = np.delete(self.x_val, np.where(self.y_val == self.drop_num), axis=0)
self.y_val = np.delete(self.y_val, np.where(self.y_val == self.drop_num), axis=0)
# Rewrite train labels -> not necessary for this data set
if not self.is_binary:
raise NotImplementedError("This data set only has binary labels")
class NSL_KDD(DataLabels):
def __init__(self, data_folder: str = "NSL-KDD", *args, **kwargs):
"""
NSL KDD data set: https://www.unb.ca/cic/datasets/nsl.html
:param data_folder: subfolder of "data" where raw data resides
"""
# Open raw data
common_path = BASE_PATH / "data" / data_folder
train_data = arff.loadarff((common_path / "KDDTrain+").with_suffix(".arff"))
test_data = arff.loadarff((common_path / "KDDTest+").with_suffix(".arff"))
# Extract column names
all_cols = [cur_key for cur_key in test_data[1]._attributes.keys()]
all_cat = {
cur_key: cur_val.range for cur_key, cur_val in test_data[1]._attributes.items()
if cur_val.range is not None
}
# Create pandas dataframe
train_data = pd.DataFrame(data=train_data[0], columns=all_cols)
test_data = pd.DataFrame(data=test_data[0], columns=all_cols)
# Mark respective columns as categorical
for cur_key, cur_val in all_cat.items():
# We need to decode the byte strings first
test_data[cur_key] = pd.Categorical(
test_data[cur_key].str.decode('UTF-8'), categories=cur_val, ordered=False
)
train_data[cur_key] = pd.Categorical(
train_data[cur_key].str.decode('UTF-8'), categories=cur_val, ordered=False
)
# For whatever reason, the anomaly labels are only in the .txt files... load them separately
train_labels = pd.read_csv((common_path / "KDDTrain+").with_suffix(".txt"), header=None)
train_labels = train_labels.iloc[:, -2].astype("category")
train_labels = train_labels.map(self._attack_map())
# NOTE: train_labels categories might not be mapped to the same number as in test_labels -> index by name
test_labels = pd.read_csv((common_path / "KDDTest+").with_suffix(".txt"), header=None)
test_labels = test_labels.iloc[:, -2].astype("category")
test_labels = test_labels.map(self._attack_map())
# Drop the class labels from the original data
train_data = train_data.drop(columns="class")
test_data = test_data.drop(columns="class")
# Finally, 1-Hot encode the categorical data
train_data = pd.get_dummies(train_data)
test_data = | pd.get_dummies(test_data) | pandas.get_dummies |
import numpy as np
import pandas as pd
from collections import defaultdict
import datetime
import math
import os.path
from sklearn.preprocessing import StandardScaler
def feature_engineering(feature):
# confirmed, death, confirmed_diff, death_diff, confirmed_square, death_square
diff = [0 for _ in range(12)]
squared = [0 for _ in range(14)]
for idx in range(2):
for i in range(1, 7):
diff[idx * 6 + i - 1] = feature[idx * 7 + i] - feature[idx * 7 + i - 1]
feature.extend(diff)
for i in range(14):
squared[i] = feature[i] * feature[i]
feature.extend(squared)
return feature
class PreprocessForNN(object):
def __init__(self):
self.deathData = None
self.confirmedData = None
self.features = []
self.icu_beds = defaultdict(int)
self.staffed_beds = defaultdict(int)
self.licensed_beds = defaultdict(int)
self.total_population = defaultdict(int)
self.population_over_sixty = defaultdict(int)
self.policies = defaultdict(lambda: [0 for _ in range(8)])
self.scaler_feature = StandardScaler()
self.scaler_label = StandardScaler()
self.valid_FIPS = set()
self.upper = 50.0
self.mid = 10.0
self.lower = 1.0
def load_policies(self):
'''
dict[key][0] = stay at home
dict[key][1] = >50 gathering
dict[key][2] = >500 gathering
dict[key][3] = public schools
dict[key][4] = restaurant dine-in
dict[key][5] = entertainment/gym
dict[key][6] = federal guidelines
dict[key][7] = foreign travel ban
'''
policy = pd.read_csv(filepath_or_buffer='data/us/other/policies.csv')
label_names = ['stay at home', '>50 gatherings', '>500 gatherings',
'public schools', 'restaurant dine-in', 'entertainment/gym']
mean_times = [0 for _ in range(len(label_names))]
ranges = [0 for _ in range(len(label_names))]
min_times = [0 for _ in range(len(label_names))]
for idx, label in enumerate(label_names):
times = policy[label].values[~np.isnan(policy[label])]
mean_times[idx] = np.mean(times)
ranges[idx] = max(1, np.max(times) - np.min(times))
min_times[idx] = np.min(times)
for item in policy.iterrows():
fips = item[1]['FIPS']
for idx, label in enumerate(label_names):
if not math.isnan(item[1][label]):
scaled = 1 - (item[1][label] - min_times[idx]) / ranges[idx]
self.policies[fips][idx] = scaled
def load_data(self):
self.load_beds_dict()
self.load_population_dict()
self.load_policies()
def fetch_none_zero_data(self):
last_date = self.deathData.columns[-1]
countyNoneZero = self.deathData.loc[self.deathData[last_date] != 0]
countyNoneZero.reset_index(drop=True)
self.valid_FIPS = set(self.deathData['countyFIPS'])
self.deathData = countyNoneZero
keep_flag = []
for FIPS in self.confirmedData['countyFIPS']:
keep_flag.append(int(FIPS) in self.valid_FIPS)
self.confimedData = self.confirmedData.loc[keep_flag, :]
@staticmethod
def add_window(FIPS, death_list, confirmed_list, mode):
window_size_7 = 7
window_size_14 = 14
count = len(death_list) - window_size_7 - window_size_14 + 1
output = []
FIPS_list = []
label_list = []
feature_list = []
if mode == 'train':
for i in range(count):
feature = confirmed_list[i:i+window_size_7] + death_list[i:i+window_size_7]
feature = feature_engineering(feature)
label = death_list[i+window_size_7:i+window_size_7+window_size_14]
if sum(label) == 0:
continue
FIPS_list.append(FIPS)
label_list.append(label)
feature_list.append(feature)
dict = {'FIPS': FIPS_list, 'label': label_list, 'feature': feature_list}
else:
feature = confirmed_list[-window_size_7:] + death_list[-window_size_7:]
feature = feature_engineering(feature)
FIPS_list.append(FIPS)
feature_list.append(feature)
dict = {'FIPS': FIPS_list, 'feature': feature_list}
output = | pd.DataFrame(dict) | pandas.DataFrame |
import os
import sys
import time
import sqlite3
import pyupbit
import pandas as pd
from PyQt5.QtCore import QThread
from pyupbit import WebSocketManager
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import *
from utility.static import now, timedelta_sec, strf_time, timedelta_hour, strp_time
class TraderUpbit(QThread):
def __init__(self, windowQ, coinQ, queryQ, soundQ, cstgQ, teleQ):
super().__init__()
self.windowQ = windowQ
self.coinQ = coinQ
self.queryQ = queryQ
self.soundQ = soundQ
self.cstgQ = cstgQ
self.teleQ = teleQ
self.upbit = None # 매도수 주문 및 체결 확인용 객체
self.buy_uuid = None # 매수 주문 저장용 list: [티커명, uuid]
self.sell_uuid = None # 매도 주문 저장용 list: [티커명, uuid]
self.websocketQ = None # 실시간데이터 수신용 웹소켓큐
self.df_cj = | pd.DataFrame(columns=columns_cj) | pandas.DataFrame |
import os
import re
import shlex
import numpy as np
import pandas as pd
from scipy.io import mmread, mmwrite
from scipy.sparse import csr_matrix
import tempfile
import subprocess
from typing import List, Dict, Tuple, Union
import logging
logger = logging.getLogger(__name__)
from pegasusio import UnimodalData, CITESeqData, MultimodalData
def _enumerate_files(path: str, parts: List[str], repl_list1: List[str], repl_list2: List[str] = None) -> str:
""" Enumerate all possible file names """
if len(parts) <= 2:
for token in repl_list1:
parts[-1] = token
candidate = os.path.join(path, ''.join(parts))
if os.path.isfile(candidate):
return candidate
else:
assert len(parts) == 4
for p2 in repl_list1:
parts[1] = p2
for p4 in repl_list2:
parts[3] = p4
candidate = os.path.join(path, ''.join(parts))
if os.path.isfile(candidate):
return candidate
return None
def _locate_barcode_and_feature_files(path: str, fname: str) -> Tuple[str, str]:
""" Locate barcode and feature files (with path) based on mtx file name (no suffix)
"""
barcode_file = feature_file = None
if fname == "matrix":
barcode_file = _enumerate_files(path, [''], ["cells.tsv.gz", "cells.tsv", "barcodes.tsv.gz", "barcodes.tsv"])
feature_file = _enumerate_files(path, [''], ["genes.tsv.gz", "genes.tsv", "features.tsv.gz", "features.tsv"])
else:
p1, p2, p3 = fname.partition("matrix")
if p2 == '' and p3 == '':
barcode_file = _enumerate_files(path, [p1, ''], [".barcodes.tsv.gz", ".barcodes.tsv", ".cells.tsv.gz", ".cells.tsv", "_barcode.tsv", ".barcodes.txt"])
feature_file = _enumerate_files(path, [p1, ''], [".genes.tsv.gz", ".genes.tsv", ".features.tsv.gz", ".features.tsv", "_gene.tsv", ".genes.txt"])
else:
barcode_file = _enumerate_files(path, [p1, '', p3, ''], ["barcodes", "cells"], [".tsv.gz", ".tsv"])
feature_file = _enumerate_files(path, [p1, '', p3, ''], ["genes", "features"], [".tsv.gz", ".tsv"])
if barcode_file is None:
raise ValueError("Cannot find barcode file!")
if feature_file is None:
raise ValueError("Cannot find feature file!")
return barcode_file, feature_file
def _load_barcode_metadata(barcode_file: str, sep: str = "\t") -> Tuple[pd.DataFrame, str]:
""" Load cell barcode information """
format_type = None
barcode_metadata = | pd.read_csv(barcode_file, sep=sep, header=None) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
import json
import pandas as pd
url = "https://glyconnect.expasy.org/api/glycosylations"
# In[2]:
## send the correct params to query the api
params = {'taxonomy':'Severe acute respiratory syndrome coronavirus 2 (2019-nCoV)', 'protein': 'Recombinant Spike glycoprotein (HEK293) - DRAFT DATA'}
# Severe acute respiratory syndrome coronavirus2 (2019-nCoV)&protein=Recombinant Spike glycoprotein (HEK293)
response = requests.get(url ,params=params)
# In[3]:
my_response = response.json()
df_dump = pd.DataFrame()
for r in range(len(my_response['results'])):
df_results_uniprots = pd.DataFrame(my_response['results'][r]['protein']['uniprots'],index=[r])
df_results_site = pd.DataFrame(my_response['results'][r]['site'],index=[r])
df_results_composition = pd.DataFrame(my_response['results'][r]["composition"],index=[r])
df_temp = pd.concat([df_results_composition,df_results_site, df_results_uniprots], sort=False, axis=1)
df_dump = pd.concat([df_dump,df_temp],sort=True,axis=0)
df_dump.drop_duplicates(inplace=True)
# In[4]:
df_dump.to_csv("../data/HEK293_glycosilations_COVID19.csv",index=False)
# In[5]:
## params for the second protein
params = {'taxonomy':'Severe acute respiratory syndrome coronavirus 2 (2019-nCoV)', 'protein': "Recombinant Spike glycoprotein (BTI-Tn-5B1-4) - DRAFT DATA"}
response = requests.get(url ,params=params)
my_response = response.json()
# In[6]:
df_dump = pd.DataFrame()
for r in range(len(my_response['results'])):
df_results_uniprots = pd.DataFrame(my_response['results'][r]['protein']['uniprots'],index=[r])
df_results_site = pd.DataFrame(my_response['results'][r]['site'],index=[r])
df_results_composition = pd.DataFrame(my_response['results'][r]["composition"],index=[r])
df_temp = pd.concat([df_results_composition,df_results_site, df_results_uniprots], sort=False, axis=1)
df_dump = | pd.concat([df_dump,df_temp],sort=True,axis=0) | pandas.concat |
from unittest import TestCase
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from datasets.formatting import NumpyFormatter, PandasFormatter, PythonFormatter, query_table
from datasets.formatting.formatting import NumpyArrowExtractor, PandasArrowExtractor, PythonArrowExtractor
from datasets.table import InMemoryTable
from .utils import require_tf, require_torch
_COL_A = [0, 1, 2]
_COL_B = ["foo", "bar", "foobar"]
_COL_C = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
_INDICES = [1, 0]
class ArrowExtractorTest(TestCase):
def _create_dummy_table(self):
return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})
def test_python_extractor(self):
pa_table = self._create_dummy_table()
extractor = PythonArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0]})
col = extractor.extract_column(pa_table)
self.assertEqual(col, _COL_A)
batch = extractor.extract_batch(pa_table)
self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C})
def test_numpy_extractor(self):
pa_table = self._create_dummy_table()
extractor = NumpyArrowExtractor()
row = extractor.extract_row(pa_table)
np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0], "c": np.array(_COL_C[0])})
col = extractor.extract_column(pa_table)
np.testing.assert_equal(col, np.array(_COL_A))
batch = extractor.extract_batch(pa_table)
np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B), "c": np.array(_COL_C)})
def test_numpy_extractor_np_array_kwargs(self):
pa_table = self._create_dummy_table().drop(["b"])
extractor = NumpyArrowExtractor(dtype=np.float16)
row = extractor.extract_row(pa_table)
self.assertEqual(row["c"].dtype, np.dtype(np.float16))
col = extractor.extract_column(pa_table)
self.assertEqual(col.dtype, np.float16)
batch = extractor.extract_batch(pa_table)
self.assertEqual(batch["a"].dtype, np.dtype(np.float16))
self.assertEqual(batch["c"].dtype, np.dtype(np.float16))
def test_pandas_extractor(self):
pa_table = self._create_dummy_table()
extractor = PandasArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertIsInstance(row, pd.DataFrame)
pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1])
pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1])
pd.testing.assert_series_equal(row["c"], pd.Series(_COL_C, name="c")[:1])
col = extractor.extract_column(pa_table)
pd.testing.assert_series_equal(col, pd.Series(_COL_A, name="a"))
batch = extractor.extract_batch(pa_table)
self.assertIsInstance(batch, pd.DataFrame)
pd.testing.assert_series_equal(batch["a"], pd.Series(_COL_A, name="a"))
pd.testing.assert_series_equal(batch["b"], pd.Series(_COL_B, name="b"))
pd.testing.assert_series_equal(batch["c"], pd.Series(_COL_C, name="c"))
class FormatterTest(TestCase):
def _create_dummy_table(self):
return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})
def test_python_formatter(self):
pa_table = self._create_dummy_table()
formatter = PythonFormatter()
row = formatter.format_row(pa_table)
self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0]})
col = formatter.format_column(pa_table)
self.assertEqual(col, _COL_A)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C})
def test_numpy_formatter(self):
pa_table = self._create_dummy_table()
formatter = NumpyFormatter()
row = formatter.format_row(pa_table)
np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0], "c": np.array(_COL_C[0])})
col = formatter.format_column(pa_table)
np.testing.assert_equal(col, np.array(_COL_A))
batch = formatter.format_batch(pa_table)
np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B), "c": np.array(_COL_C)})
def test_numpy_formatter_np_array_kwargs(self):
pa_table = self._create_dummy_table().drop(["b"])
formatter = NumpyFormatter(dtype=np.float16)
row = formatter.format_row(pa_table)
self.assertEqual(row["c"].dtype, np.dtype(np.float16))
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, np.float16)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["a"].dtype, np.dtype(np.float16))
self.assertEqual(batch["c"].dtype, np.dtype(np.float16))
def test_pandas_formatter(self):
pa_table = self._create_dummy_table()
formatter = PandasFormatter()
row = formatter.format_row(pa_table)
self.assertIsInstance(row, pd.DataFrame)
pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1])
pd.testing.assert_series_equal(row["b"], | pd.Series(_COL_B, name="b") | pandas.Series |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = | concat([df1, df2], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path
from collections import defaultdict
import mudcod.utils.visualization as VIS # noqa: E402
from mudcod.utils import sutils # noqa: E402
MAIN_DIR = Path(__file__).absolute().parent.parent
SIMULATION_DIR = MAIN_DIR / "simulations"
RESULT_DIR = MAIN_DIR / "results"
RESULTS_PATH = RESULT_DIR / "simulation_results"
FIGURE_DIR = RESULT_DIR / "simulation_figures"
sns.set_theme(style="whitegrid")
sutils.safe_create_dir(RESULTS_PATH)
sutils.safe_create_dir(FIGURE_DIR)
objkey = "loglikelihood"
def get_dataframe(result_dict, index_row, columns, multi_index):
resultsDf = pd.DataFrame(result_dict["val"], index=index_row, columns=columns)
if not multi_index:
resultsDf.reset_index(inplace=True)
return resultsDf
def get_info_from_name(simul_name):
mkint = lambda x: int(x[2:]) # noqa: E731
mkfloat = lambda x: float("0." + x[2:]) # noqa: E731
simul_info = simul_name.split("_")
if simul_name.startswith(".") or len(simul_info) != 6:
raise ValueError(f"Unkown simulation name format: {simul_name}.")
class_dcbm, scenario_msd, th, rt, ns, rs = (
simul_info[0],
simul_info[1],
simul_info[2],
simul_info[3],
simul_info[4],
simul_info[5],
)
ns, th = mkint(ns), mkint(th)
rs, rt = mkfloat(rs), mkfloat(rt)
return class_dcbm, scenario_msd, th, rt, ns, rs
def read_cv_results(results_path, multi_index=False):
# {{{
results_path = Path(results_path)
result_dirs = [f for f in results_path.iterdir() if f.is_dir()]
result_dict = defaultdict(list)
num_result = len(result_dirs)
for i, path in enumerate(sorted(result_dirs)):
simul_name = str(path.stem)
class_dcbm, scenario_msd, th, rt, ns, rs = get_info_from_name(simul_name)
percent = round(100 * i / num_result, 2)
print(
f"Procesing:%{percent}", class_dcbm, scenario_msd, th, rt, ns, rs, end="\r"
)
cv_path = path / "cross_validation"
muspces_cv_path = cv_path.glob("muspces*.csv")
for i, mpath in enumerate(sorted(muspces_cv_path)):
tempDf = | pd.read_csv(mpath) | pandas.read_csv |
""" This module contains a class GwGxg that calculates some
descriptive statistics from a series of groundwater head measurements
used by groundwater practitioners in the Netherlands
History: Created 16-08-2015, last updated 12-02-1016
Migrated to acequia on 15-06-2019
@author: <NAME>
"""
import math
from datetime import datetime
import datetime as dt
import warnings
import numpy as np
from pandas import Series, DataFrame
import pandas as pd
import acequia as aq
def stats_gxg(ts,reflev='datum'):
"""Return table with GxG statistics
Parameters
----------
ts : aq.GwSeries, pd.Series
Groundwater head time series
reflev : {'datum','surface'}, optional
Reference level for groundwater heads
Returns
-------
pd.DataFrame
"""
gxg = aq.GxgStats(ts)
return gxg.gxg(reflev=reflev)
class GxgStats:
"""Calculate descriptive statistics for time series of measured heads
Parameters
----------
gw : aq.GwSeries, pd.Series
timeseries of groundwater head measurements relative to datum level
srname : str, optional
name of groundwater head series
surface : float, optional
surface level height (if ref='datum' this option is ignored)
Notes
-----
In the Netherlands, traditionally groundwater head series are
summarized using decriptive statistics that characterise the mean
highest level (GHG), the mean lowest level (GLG) and the mean spring
level (GVG). These three measures together are reffered to as the GxG.
The definitions of GHG, GLG and GVG are based on time series with
measured heads on the 14th and 28th of each month. Therefore the time
series of measrued heads is internally resampled to values on the 14th
and 28yh before calculating the GxG statistics.
For further reference:
<NAME> and <NAME> (1985). 'Water table classes:
a method to decribe seasonal fluctuation and duration of water table
classes on Dutch soil maps.' Agricultural Water Management 10 (1985)
109 - 125. Elsevier Science Publishers, Amsterdam.
"""
N14 = 18
## REFERENCE = ['datum','surface']
APPROXIMATIONS = ['SLUIJS82','HEESEN74','SLUIJS76a','SLUIJS76b',
'SLUIJS89pol','SLUIJS89sto','RUNHAAR89','GAAST06',]
VGDATES = ['apr1','apr15','mar15']
VGREFDATE = 'apr1'
def __init__(self, gw, srname=None, surface=None):
"""Return GxG object"""
if isinstance(gw,aq.GwSeries):
self._ts = gw.heads(ref='datum')
self.srname = gw.name()
if surface is None:
self._surface = gw.surface()
else:
self._surface = surflevel
self._gw = gw
elif isinstance(gw,pd.Series):
self._ts = gw
self.srname = self._ts.name
self._surface = surface
self._gw = None
else:
raise(f'{gw} is not of type aq.GwSeries or pd.Series')
self._ts1428 = aq.ts1428(self._ts,maxlag=3,remove_nans=False)
self._xgnap = self._calculate_xg_nap()
def _yearseries(self,ts,dtype='float64'):
"""Return empty time series with years as index with all years
between min(year) and max(year) in index (no missing years)"""
if isinstance(ts,pd.Series):
years = set(ts.index.year)
elif isinstance(ts,(list,set,np.ndarray)):
years = set(ts)
else:
raise(f'{ts} must be list-like')
minyear = min(years)
maxyear= max(years)
sr = Series(index=range(minyear,maxyear+1),dtype=dtype,name='year')
return sr
def vg3(self):
"""Return VG3 (Spring Level) for each year
VG3 is calculated as the mean of groundwater head
levels on 14 march, 28 march and 14 april
Return
------
pd.Series
Notes
-----
Calculation of GVG based on the average of three dates was
introduced by Finke et al. (1999)
References
----------
<NAME>., <NAME>, <NAME>, <NAME>, <NAME>
& <NAME> (1999). Actuele grondwaterinformatie 1:10.000 in de
waterschappen Wold en Wieden en Meppelerdiep. Gebruik van digitale
maaiveldshoogtes bij de kartering van GHG, GVG en GLG. SC-rapport
633. (in Dutch).
"""
self._vg3 = self._yearseries(self._ts1428)
for i,year in enumerate(self._vg3.index):
v1 = self._ts1428[dt.datetime(year,3,14)]
v2 = self._ts1428[dt.datetime(year,3,28)]
v3 = self._ts1428[dt.datetime(year,4,14)]
with warnings.catch_warnings():
# numpy raises a silly warning with nanmean on NaNs
warnings.filterwarnings(action='ignore',
message='Mean of empty slice')
self._vg3[year] = np.round(np.nanmean([v1,v2,v3]),2)
self._vg3.name = 'VG3'
return self._vg3
def vg1(self,refdate=VGREFDATE,maxlag=7):
"""Return VG (Spring Level) for each year as the measurement
closest to refdate
Parameters
----------
refdate : {'apr1','apr15','mar15'}, default 'apr1'
reference date for estimating VG
maxlag : number
maximum allowed difference between measurement date en refdate
Return
------
pd.Series
Notes
-----
The VG (Voorjaars Grondwaterstand, Spring Level) is estimated as
the single measurement closest to the reference date given by
refdate.
The reference date for calculation of the GVG was changed from
april 15 to april 1st in de early eighties. In 2000 the
Cultuurtechnisch Vademecum proposed march 15 as the new reference
date for the GVG but this proposal was not generally adopted.
In practice april 1st is allways used as reference date and this
is used as default for calculations.
References
----------
<NAME>, J.W.J., <NAME> & <NAME> (2009). Actuele
grondwaterstandsituatie in natuurgebieden. Rapport 94 WOT. Alterra,
Wageningen. (in Dutch).
"""
if refdate not in self.VGDATES:
warnings.warn((f'Reference date {refdate} for GVG is not '
f'recognised. Reference date \'{self.VGREFDATE}\' is '
f'assumed.'))
refdate = self.VGREFDATE
vg1 = self._yearseries(self._ts1428)
for i,year in enumerate(vg1.index):
if refdate=='apr1':
date = dt.datetime(year,4,1)
if refdate=='apr15':
date = dt.datetime(year,4,15)
if refdate=='mar15':
date = dt.datetime(year,3,15)
daydeltas = self._ts.index - date
mindelta = np.amin(np.abs(daydeltas))
sr_nearest = self._ts[np.abs(daydeltas) == mindelta]
maxdelta = pd.to_timedelta(f'{maxlag} days')
if (mindelta <= maxdelta):
vg1[year] = np.round(sr_nearest.iloc[0],2)
vg1.name = f'VG{refdate}'
return vg1
def _calculate_xg_nap(self):
"""Calculate xg statistics for eacht year and return table"""
hydroyears = aq.hydroyear(self._ts1428)
sr = self._yearseries(hydroyears)
xg = pd.DataFrame(index=sr.index)
xg.index.name = 'year'
for year in xg.index:
ts = self._ts1428[hydroyears==year]
ts = ts[ts.notnull()]
n1428 = len(ts)
if not np.isnan(n1428):
n1428 = math.floor(n1428)
hg3 = np.nan
lg3 = np.nan
if n1428 >= self.N14:
hg3 = ts.nlargest(n=3).mean()
lg3 = ts.nsmallest(n=3).mean()
hg3w = np.nan
lg3s = np.nan
if n1428 >= self.N14:
ts_win = ts[aq.season(ts)=='winter']
ts_sum = ts[aq.season(ts)=='summer']
hg3w = ts_win.nlargest(n=3).mean()
lg3s = ts_sum.nsmallest(n=3).mean()
xg.loc[year,'hg3'] = np.round(hg3,2)
xg.loc[year,'lg3'] = np.round(lg3,2)
xg.loc[year,'hg3w'] = np.round(hg3w,2)
xg.loc[year,'lg3s'] = np.round(lg3s,2)
xg['vg3'] = self.vg3()
for date in self.VGDATES:
xg[f'vg_{date}'] = self.vg1(refdate=date)
xg.loc[year,'n1428'] = n1428
return xg
def xg(self,reference='datum',name=True):
"""Return table of GxG groundwater statistics for each
hydrological year
Parameters
----------
reference : {'datum','surface'}, default 'datum'
reference level for gxg statistics
name : bool, default True
include series name in index
Return
------
pd.DataFrame"""
if reference not in ['datum','surface']:
warnings.warn((f'Reference level \'{reference}\' is not allowed. '
f'Reference level \'datum\' is assumed.'))
reference = 'datum'
xg = self._xgnap.copy()
if name==True:
xg = pd.concat({self.srname: xg}, names=['series'])
if reference=='datum':
return xg
for col in xg.columns:
if col in ['n1428']:
continue
xg[col] = (self._surface - xg[col])*100
xg[col] = xg[col].apply(lambda x:math.floor(x) if
not np.isnan(x) else x)
##if not np.isnan(xg[col]):
## xg[col] = math.floor(xg[col])
return xg
def gxg(self,reference='datum',minimal=False):
"""Return table with GxG for one head series
Parameters
----------
minimal : bool, default True
return minimal selection of stats
reference : {'datum','surface'}, default 'datum'
reference level for gxg statistics
Returns
-------
pd.DataFrame"""
"""
if hasattr(self,'_minimal'):
if self._minimal!=minimal:
self._reset()
self._minimal = minimal
if self._reflev==reflev:
if hasattr(self,'_gxg'):
return self._gxg
else:
self._reset()
self._validate_reflev (reflev)
"""
xg = self.xg(reference=reference,name=False)
gxg = | pd.Series(name=self.srname,dtype='object') | pandas.Series |
"""
Procedures needed for Common support estimation.
Created on Thu Dec 8 15:48:57 2020.
@author: MLechner
# -*- coding: utf-8 -*-
"""
import copy
import numpy as np
import pandas as pd
from mcf import mcf_data_functions as mcf_data
from mcf import general_purpose as gp
from mcf import general_purpose_estimation as gp_est
def common_support(predict_file, tree_file, fill_y_file, fs_file, var_x_type,
v_dict, c_dict, cs_list=None, prime_values_dict=None,
pred_tr_np=None, d_tr_np=None):
"""
Remove observations from data files that are off-support.
Parameters
----------
predict_file : String of csv-file. Data to predict the RF.
train_file : String of csv-file. Data to train the RF.
fill_y_file : String of csv-file. Data with y to be used by RF.
fs_file : String of csv-file. Data with y to be used by RF.
var_x_type : Dict. Features.
v_dict : Dict. Variables.
c_dict : Dict. Parameters.
cs_list: Tuple. Contains the information from estimated propensity score
needed to predict for other data. Default is None.
prime_values_dict: Dict. List of unique values for variables to dummy.
Default is None.
pred_t: Numpy array. Predicted treatment probabilities in training data.
Needed to define cut-offs.
d_train: Numpy series. Observed treatment in training data (tree_file).
Returns
-------
predict_file_new : String of csv-file. Adjusted data.
cs_list: Tuple. Contains the information from estimated propensity score
needed to predict for other data.
pred_t: Numpy array. Predicted treatment probabilities in training data.
d_train_tree: estimated tree by sklearn.
"""
def r2_obb(c_dict, idx, oob_best):
if c_dict['with_output']:
print('\n')
print('-' * 80)
print('Treatment: {:2}'.format(c_dict['d_values'][idx]),
'OOB Score (R2 in %): {:6.3f}'.format(oob_best * 100))
print('-' * 80)
def get_data(file_name, x_name):
data = pd.read_csv(file_name)
x_all = data[x_name] # deep copies
obs = len(x_all.index)
return data, x_all, obs
def check_cols(x_1, x_2, name1, name2):
var1 = set(x_1.columns)
var2 = set(x_2.columns)
if var1 != var2:
if len(var1-var2) > 0:
print('Variables in ', name1, 'not contained in ', name2,
*(var1-var2))
if len(var2-var1) > 0:
print('Variables in ', name2, 'not contained in ', name1,
*(var2-var1))
raise Exception(name1 + ' data and ' + name2 + ' data contain' +
' differnt variables. Programm stopped.')
def mean_by_treatment(treat_pd, data_pd):
treat_pd = treat_pd.squeeze()
treat_vals = pd.unique(treat_pd)
print('--------------- Mean by treatment status ------------------')
if len(treat_vals) > 0:
mean = data_pd.groupby(treat_pd).mean()
print(mean.transpose())
else:
print('All obs have same treatment:', treat_vals)
def on_support_data_and_stats(obs_to_del_np, data_pd, x_data_pd, out_file,
upper_l, lower_l, c_dict, header=False,
d_name=None):
obs_to_keep = np.invert(obs_to_del_np)
data_keep = data_pd[obs_to_keep]
gp.delete_file_if_exists(out_file)
data_keep.to_csv(out_file, index=False)
if c_dict['with_output']:
x_keep = x_data_pd[obs_to_keep]
x_delete = x_data_pd[obs_to_del_np]
if header:
print('\n')
print('=' * 80)
print('Common support check')
print('-' * 80)
print('Upper limits on treatment probabilities: ', upper_l)
print('Lower limits on treatment probabilities: ', lower_l)
print('-' * 80)
print('Data investigated and saved:', out_file)
print('-' * 80)
print('Observations deleted: {:4}'.format(np.sum(obs_to_del_np)),
' ({:6.3f}%)'.format(np.mean(obs_to_del_np)*100))
with pd.option_context(
'display.max_rows', 500,
'display.max_columns', 500,
'display.expand_frame_repr', True,
'display.width', 150,
'chop_threshold', 1e-13):
all_var_names = [name.upper() for name in data_pd.columns]
if d_name[0].upper() in all_var_names:
d_keep = data_keep[d_name]
d_delete = data_pd[d_name]
d_delete = d_delete[obs_to_del_np]
d_keep_count = d_keep.value_counts(sort=False)
d_delete_count = d_delete.value_counts(sort=False)
d_keep_count = pd.concat(
[d_keep_count,
d_keep_count / np.sum(obs_to_keep) * 100], axis=1)
d_delete_count = pd.concat(
[d_delete_count,
d_delete_count / np.sum(obs_to_del_np) * 100], axis=1)
d_keep_count.columns = ['Obs.', 'Share in %']
d_delete_count.columns = ['Obs.', 'Share in %']
if c_dict['panel_data']:
cluster_id = data_pd[v_dict['cluster_name']].squeeze()
cluster_keep = cluster_id[obs_to_keep].squeeze()
cluster_delete = cluster_id[obs_to_del_np].squeeze()
print('-' * 80)
print('Observations kept by treatment')
print(d_keep_count)
print('- ' * 20)
print('Observations deleted by treatment')
print(d_delete_count)
if c_dict['panel_data']:
print('- ' * 20)
print('Total number of panel unit:',
len(cluster_id.unique()))
print('Observations belonging to ',
len(cluster_keep.unique()),
'panel units are ON support')
print('Observations belonging to ',
len(cluster_delete.unique()),
'panel units are OFF support')
if d_name[0].upper() in all_var_names:
print()
print('Full sample (ON and OFF support observations)')
mean_by_treatment(data_pd[d_name], x_data_pd)
print('-' * 80)
print('Data ON support')
print('-' * 80)
print(x_keep.describe().transpose())
if d_name[0].upper() in all_var_names:
print()
mean_by_treatment(d_keep, x_keep)
print('-' * 80)
print('Data OFF support')
print('-' * 80)
print(x_delete.describe().transpose())
if d_name[0].upper() in all_var_names:
print()
if np.sum(obs_to_del_np) > 1:
mean_by_treatment(d_delete, x_delete)
else:
print('Only single observation deleted.')
if np.mean(obs_to_del_np) > c_dict['support_max_del_train']:
raise Exception(
'Less than {:3}%'.format(
100-c_dict['support_max_del_train']*100)
+ ' observations left after common support check of'
+ ' training data. Programme terminated. Improve'
+ ' balance of input data for forest building.')
x_name, x_type = gp.get_key_values_in_list(var_x_type)
names_unordered = [] # Split ordered variables into dummies
for j, val in enumerate(x_type):
if val > 0:
names_unordered.append(x_name[j])
fs_adjust = False
obs_fs = 0
if c_dict['train_mcf']:
data_tr, x_tr, obs_tr = get_data(tree_file, x_name) # train,adj.
data_fy, x_fy, obs_fy = get_data(fill_y_file, x_name) # adj.
if c_dict['fs_yes']:
# if not ((fs_file == tree_file) or (fs_file == fill_y_file)):
if fs_file not in (tree_file, fill_y_file):
data_fs, x_fs, obs_fs = get_data(fs_file, x_name) # adj.
fs_adjust = True
if c_dict['pred_mcf']:
data_pr, x_pr, obs_pr = get_data(predict_file, x_name)
else:
obs_pr = 0
if names_unordered: # List is not empty
if c_dict['train_mcf'] and c_dict['pred_mcf']:
x_total = pd.concat([x_tr, x_fy, x_pr], axis=0)
if fs_adjust:
x_total = pd.concat([x_total, x_fs], axis=0)
x_dummies = pd.get_dummies(x_total[names_unordered],
columns=names_unordered)
x_total = pd.concat([x_total, x_dummies], axis=1)
x_tr = x_total[:obs_tr]
x_fy = x_total[obs_tr:obs_tr+obs_fy]
x_pr = x_total[obs_tr+obs_fy:obs_tr+obs_fy+obs_pr]
if fs_adjust:
x_fs = x_total[obs_tr+obs_fy+obs_pr:]
elif c_dict['train_mcf'] and not c_dict['pred_mcf']:
x_total = pd.concat([x_tr, x_fy], axis=0)
if fs_adjust:
x_total = pd.concat([x_total, x_fs], axis=0)
x_dummies = pd.get_dummies(x_total[names_unordered],
columns=names_unordered)
x_total = pd.concat([x_total, x_dummies], axis=1)
x_tr = x_total[:obs_tr]
x_fy = x_total[obs_tr:obs_tr+obs_fy]
if fs_adjust:
x_fs = x_total[obs_tr+obs_fy:]
else:
x_add_tmp = check_if_obs_needed(names_unordered, x_pr,
prime_values_dict)
if x_add_tmp is not None:
x_total = | pd.concat([x_pr, x_add_tmp], axis=0) | pandas.concat |
from __future__ import division, print_function
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn import tree
from Basic import adult, dutch, testdata
from Utility import Utility
from Detection import Judge
from Basic import get_group
def Ranker(X, Y, Epos):
# One-hot-encoding features
for f in X.columns:
X_dummy = | pd.get_dummies(X[f], prefix=f) | pandas.get_dummies |
#!/usr/bin/env python
# coding: utf-8
# <img style="float: left;" src="earth-lab-logo-rgb.png" width="150" height="150" />
#
# # Earth Analytics Education - EA Python Course Spring 2021
# ## Important - Assignment Guidelines
#
# 1. Before you submit your assignment to GitHub, make sure to run the entire notebook with a fresh kernel. To do this first, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart & Run All)
# 2. Always replace the `raise NotImplementedError()` code with your code that addresses the activity challenge. If you don't replace that code, your notebook will not run.
#
# ```
# # YOUR CODE HERE
# raise NotImplementedError()
# ```
#
# 3. Any open ended questions will have a "YOUR ANSWER HERE" within a markdown cell. Replace that text with your answer also formatted using Markdown.
# 4. **DO NOT RENAME THIS NOTEBOOK File!** If the file name changes, the autograder will not grade your assignment properly.
# 6. When you create a figure, comment out `plt.show()` to ensure the autograder can grade your plots. For figure cells, DO NOT DELETE the code that says `DO NOT REMOVE LINE BELOW`.
#
# ```
# ### DO NOT REMOVE LINE BELOW ###
# student_plot1_ax = nb.convert_axes(plt)
# ```
#
# * Only include the package imports, code, and outputs that are required to run your homework assignment.
# * Be sure that your code can be run on any operating system. This means that:
# 1. the data should be downloaded in the notebook to ensure it's reproducible
# 2. all paths should be created dynamically using the `os.path.join`
#
# ## Follow to PEP 8 Syntax Guidelines & Documentation
#
# * Run the `autopep8` tool on all cells prior to submitting (HINT: hit shift + the tool to run it on all cells at once!
# * Use clear and expressive names for variables.
# * Organize your code to support readability.
# * Check for code line length
# * Use comments and white space sparingly where it is needed
# * Make sure all python imports are at the top of your notebook and follow PEP 8 order conventions
# * Spell check your Notebook before submitting it.
#
# For all of the plots below, be sure to do the following:
#
# * Make sure each plot has a clear TITLE and, where appropriate, label the x and y axes. Be sure to include UNITS in your labels.
#
# ### Add Your Name Below
# **Your Name:** <NAME>
# <img style="float: left;" src="colored-bar.png"/>
# ---
# # Week 04 and 05 Homework - Automate NDVI Workflow
#
# For this assignment, you will write code to generate a plot of the mean normalized difference vegetation index (NDVI) for two different sites in the United States across one year of data:
#
# * San Joaquin Experimental Range (SJER) in Southern California, United States
# * Harvard Forest (HARV) in the Northeastern United States
#
# The data that you will use for this week is available from **earthpy** using the following download:
#
# `et.data.get_data('ndvi-automation')`
#
# ## Assignment Goals
#
# Your goal in this assignment is to create the most efficient and concise workflow that you can that allows for:
#
# 1. The code to scale if you added new sites or more time periods to the analysis.
# 2. Someone else to understand your workflow.
# 3. The LEAST and most efficient (i.e. runs fast, minimize repetition) amount of code that completes the task.
#
# ### HINTS
#
# * Remove values outside of the landsat valid range of values as specified in the metadata, as needed.
# * Keep any output files SEPARATE FROM input files. Outputs should be created in an outputs directory that is created in the code (if needed) and/or tested for.
# * Use the functions that we demonstrated during class to make your workflow more efficient.
# * BONUS - if you chose - you can export your data as a csv file. You will get bonus points for doing this.
#
#
# ## Assignment Requirements
#
# Your submission to the GitHub repository should include:
# * This Jupyter Notebook file (.ipynb) with:
# * The code to create a plot of mean NDVI across a year for 2 NEON Field Sites:
# * NDVI on the x axis and formatted dates on the y for both NEON sites on one figure/axis object
# * The **data should be cleaned to remove the influence of clouds**. See the [earthdatascience website for an example of what your plot might look like with and without removal of clouds](https://www.earthdatascience.org/courses/earth-analytics-python/create-efficient-data-workflows/).
# * BONUS: Create one output `.csv` file that has 3 columns - NDVI, Date and Site Name - with values for SJER and HARV.
#
# Your notebook should:
# * Have *at least* 2 well documented and well named functions with docstrings.
# * Include a Markdown cell at the top of the notebook that outlines the overall workflow using pseudocode (i.e. plain language, not code)
# * Include additional Markdown cells throughout the notebook to describe:
# * the data that you used - and where it is from
# * how data are being processing
# * how the code is optimized to run fast and be more concise
# # Replace this cell with your pseudocode for this workflow
#
# If you happen to be a diagram person a diagram is ok too
#
#
# # Psuedocode for just HARV site
# 1. Go within 'ndvi-automation' folder four levels down to access tif files for HARV
# 2. Extract and sort bands 4-5
# 3. Open up the bands with function open_clean_bands
# - this will use rxr to open the raster, and crop_boundary as a crop extent
# 4. Calculate NDVI
# 5. Obtain QA data from landsat files for cloud mask
# -extract all files from a tif folder that end in "pixel.tif"
# -open up that qa data with rxr
# 6. Create cloud mask from ep cloud pixels
# -refer to textbook for this
# 7. Get the mean of the new masked xarray
# 8. Create df with 3 columns: mean, the site name, and the date in datetime
#
# # Psuedocode for both sites
# Mostly the same as above, except we can't just name the tif file for a specific site. So...
# 1. One for loop for each site directory
# 2. A nested loop for each landsat file directory
# 4. Extract just files for bands 4-5
# 3. Another nested for loop for each band in band folder
# 3. Open up the bands with function open_clean_bands
# 4. Calculate NDVI
# 5. Obtain QA data from landsat files for cloud mask
# - using cloud mask function
# 6. I'll have already created cloud mask from ep cloud pixels - don't need to repeat because it doesn't change
# 7. Get the mean of the new masked xarray
# 8. Create df with 3 columns: mean, the site name, and the date in datetime
# - create list to do so
#
#
# In[1]:
# Autograding imports - do not modify this cell
import matplotcheck.autograde as ag
import matplotcheck.notebook as nb
import matplotcheck.timeseries as ts
from datetime import datetime
# In[2]:
# Import needed packages in PEP 8 order
# and no unused imports listed (10 points total)
# YOUR CODE HERE
import os
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import geopandas as gpd
import rioxarray as rxr
import xarray as xr
from rasterio.plot import plotting_extent
import earthpy as et
import earthpy.mask as em
import earthpy.spatial as es
import earthpy.plot as ep
#set working directory
data = et.data.get_data('ndvi-automation')
os.chdir(os.path.join(et.io.HOME,
"earth-analytics",
"data"))
# In[3]:
# DO NOT MODIFY THIS CELL
# Tests that the working directory is set to earth-analytics/data
path = os.path.normpath(os.getcwd())
student_wd_parts = path.split(os.sep)
if student_wd_parts[-2:] == ['earth-analytics', 'data']:
print("\u2705 Great - it looks like your working directory is set correctly to ~/earth-analytics/data")
else:
print("\u274C Oops, the autograder will not run unless your working directory is set to earth-analytics/data")
# # Figure 1: Plot 1 - Mean NDVI For Each Site Across the Year (50 points)
#
# Create a plot of the mean normalized difference vegetation index (NDVI) for the two different sites in the United States across the year:
#
# * NDVI on the x axis and formatted dates on the y for both NEON sites on one figure/axis object.
# * Each site should be identified with a different color in the plot and legend.
# * The final plot **data should be cleaned to remove the influence of clouds**.
# * Be sure to include appropriate title and axes labels.
#
# Add additional cells as needed for processing data (e.g. defining functions, etc), but be sure to:
# * follow the instructions in the code cells that have been provided to ensure that you are able to use the sanity check tests that are provided.
# * include only the plot code in the cell identified for the final plot code below
# ## Task 1:
#
# In the cell below, create a single dataframe containing MEAN NDVI, the site name,
# and the date of the data for the HARV site
# scene `HARV/landsat-crop/LC080130302017031701T1-SC20181023151837`. The column names for the final
# DataFrame should be`mean_ndvi`, and `site`, and the data should be **indexed on the date**.
#
# Use the functions that we reviewed in class (or create your own versions of them) to implement your code
#
# ### In the Cell below Place All Functions Needed to Run this Notebook (20 points)
# In[4]:
### DO NOT REMOVE THIS LINE OR EDIT / MOVE THIS CELL ###
start_time = datetime.now()
# _This first code block does quite a lot of the exploration of ndvi-automation folder structure. It sets path variables for the HARV site, creates the crop extent for that specific site, and opens the specific tif folder for LC080130302017031701T1-SC20181023151837. Finally, it extracts just the bands 4-5 to calculate the NDVI._
#
# _Because this is just one site, there isn't really a better way to build a function or a for loop that will optimize this workflow._
# In[5]:
#----------------------------------------------
#exploration of folder structure
#----------------------------------------------
# list both site directories
site_path = os.path.join("ndvi-automation", "sites")
# Get a list of both site directories
sites = glob(site_path + "/*/")
#sites
#specifically create df for HARV site
site_name = 'HARV'
#----------------------------------------------
#open shp boundary file from vector directory
#----------------------------------------------
# go into vector directory to get shp file
vector_dir = os.path.join(site_path, site_name,
"vector")
site_boundary_path = os.path.join(vector_dir, site_name + "-crop.shp")
bound = gpd.read_file(site_boundary_path)
bound
#----------------------------------------------
#open tif files from landsat directory
#----------------------------------------------
# In the landsat directory, get files
landsat_dir = os.path.join(site_path, site_name, "landsat-crop")
landsat_folder = os.path.join(landsat_dir, "LC080130302017031701T1-SC20181023151837")
# Open bands in a sorted format
band_files = sorted(glob(os.path.join(landsat_folder, "*band*[4-5].tif")))
band_files
# _The following code block is where my two functions reside - the code was first tested outside of the function and then added after it was confirmed it worked on one site._
#
# _open_clean_bands takes one of the band files I just extracted above, opens it up, clips it to HARV's crop extent, cleans it up, and returns that same band for future use._
#
# _cloud_mask takes the qa file from a tif folder, opens it up using rxr, clips it to HARV's crop extent as well, and then, using an input of mask values, crops whatever NDVI array is given to a specific cloud mask._
#
# _It was important to me not to over complicate these functions. They should be useful and simple to help automate my workflow, and not try to do a billion things at once._
# In[6]:
# In this cell place all of the functions needed to run your notebook
# You will be graded here on function application, docstrings, efficiency so ensure
# All functions are placed here!
# YOUR CODE HERE
def open_clean_bands(band_path,
crop_bound,
valid_range=None):
"""Open and mask a single landsat band using a pixel_qa layer.
Parameters
-----------
band_path : string
A path to the array to be opened
crop_bound : GeoPandas DataFrame
A data from that tells us the extent of the site of interest
valid_range : tuple (optional)
A tuple of min and max range of values for the data. Default = None
Returns
-----------
arr : xarray DataArray
An xarray DataArray with values that should be
masked set to 1 for True (Boolean)
"""
band = (rxr.open_rasterio(band_path, masked=True)
.rio.clip(crop_bound.geometry, from_disk=True)
.squeeze())
# Specify valid range of values
if valid_range:
mask = ((band <= 0) | (band > 10000))
band = band.where(~mask, np.nan)
return band
def cloud_mask(ndvi_array, site_folder, crop_bound, masked_values):
""" This function masks clouds from a landsat band using a
pixel_qa layer.
Parameters
-----------
ndvi_array: xarray DataArray
An xarray DataArray with ndvi values
site_folder : string
A path to the site folder where QA array is
crop_bound : GeoPandas DataFrame
The crop extent of the area of interest
masked_values : list
A list of all values to be masked
Returns
-----------
arr : xarray DataArray
An xarray DataArray with ndvi values, masked to the specific values
"""
#path to specific QA files - they end in pixel.tif
qa_path = glob(os.path.normpath(os.path.join(site_folder, "*pixel*.tif")))
#open path with rxr using crop extent of specific site
qa_file = rxr.open_rasterio(
qa_path[0], masked=True).rio.clip(crop_bound.geometry,
from_disk=True).squeeze()
#crop the NDVI where NOT masked values are (i.e., where there isn't cloud cover)
ndvi_clean_crop = ndvi_array.where(~qa_file.isin(masked_values))
return ndvi_clean_crop
# _This is the bulk of the processing for the HARV site. This code block loops through the bands, opens and cleans them, calculates the NDVI, and then finds the associative qa_path from the tif folder. It uses this to create a cloud mask, and then after the raster has been masked, it calculates the mean NDVI, and builds a dataframe from the site name, the date, and the mean NDVI. Because it doesn't have to do this with more than one site, it's pretty streamlined as is._
# In[7]:
# Create dataframe of mean NDVI in this cell using the functions created above
# Important: to use the ungraded tests below as a sanity check,
# name your columns: mean_ndvi and site
# Call the dataframe at the end of the cell so the tests run on it!
# Be sure that the date column is an index of type date
# HINT: the time series lessons may help you remember how to do this!
# YOUR CODE HERE
#----------------------------------------------------
#loop through each band file to open and clean bands
#----------------------------------------------------
bands = []
for aband in band_files:
#run open_clean_bands function
cleaned_band = open_clean_bands(band_path=aband,
crop_bound=bound,
valid_range=(0, 10000))
bands.append(cleaned_band)
#----------------------------------------------
#calculate NDVI
#----------------------------------------------
# NDVI = (NIR-RED)/(NIR+RED)
ndvi_xr = (bands[1] - bands[0]) / (bands[1] + bands[0])
#ndvi_xr.plot()
#------------------------------------------------
#obtain QA data from landsat files for cloud mask
#------------------------------------------------
qa_path = glob(os.path.normpath(os.path.join(landsat_folder, "*pixel*.tif")))
qa_file = rxr.open_rasterio(qa_path[0], masked=True).rio.clip(bound.geometry,
from_disk=True).squeeze()
#------------------------------------------------
#create cloud mask from ep cloud pixels
#------------------------------------------------
high_cloud_confidence = em.pixel_flags["pixel_qa"]["L8"]["High Cloud Confidence"]
cloud = em.pixel_flags["pixel_qa"]["L8"]["Cloud"]
cloud_shadow = em.pixel_flags["pixel_qa"]["L8"]["Cloud Shadow"]
all_masked_values = cloud_shadow + cloud + high_cloud_confidence
#mask ndvi with cloud mask
ndvi_clean_crop = ndvi_xr.where(~qa_file.isin(all_masked_values))
#ndvi_clean_crop.plot()
#----------------------------------------------
#get mean of xarray
#----------------------------------------------
ndvi_mean = ndvi_clean_crop.mean()
#type(ndvi_mean)
#convert mean to a float instead of xarray
ndvi_mean_value = ndvi_mean.item()
#----------------------------------------------
#create df with site, date, and mean NDVI
#----------------------------------------------
#slice up the path into its components to utilize different names in the path
slice_path = landsat_folder.split(os.sep)
#site is the third slice
site = slice_path[2]
#the file name (with date) is the fifth slice
file_string = slice_path[4]
#the date is the the file name - before 01T1. Year, Month, Day
date = file_string[10:18]
#convert that date from string to datetime
date_time = datetime.strptime(date, '%Y%m%d').strftime('%m/%d/%Y')
#create dataframe
ndvi_df = | pd.DataFrame([[site, date_time, ndvi_mean_value]], columns=['site', 'date', 'mean_ndvi']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Python script for automatic quality-control procedures (CEMADEN data)
# # Created on Aug.12.2020
# ### By:
# <NAME>
# <NAME>
# <NAME>
# Importing libraries used in this code
# In[ ]:
import numpy as np
import pandas as pd
from datetime import datetime
import glob
import warnings
from datetime import datetime
import sys
import esda
import libpysal as lps
# Assigning values to main variables and other parameters
# In[ ]:
#Data storage path
path = 'D:/CEMADEN/'
years = [2014 , 2015, 2016, 2017, 2018, 2019,2020] #years used in the analysis
states = ['PE','PB','RN','BA','SE','PI','CE','MA','AL','AC','AM','RO','RR','AP','TO','PA','MT',
'MS','DF','GO','RS','SC','PR','ES','MG','RJ','SP'] #states used in the analysis
#Filters variables
threshold_missing_data_days=60 #days in a year without data
threshold_days_without_rain=200 #days in a row without rain
threshold_constant_days=35 #days in a row with rain = 0,2mm
threshold_max_peak=40 #record of xmm in 10min
#Moran's I variables
properties=['rainfall_events','mean_rainfall_depth','yearly_rainfall'] #properties calculated based on the events durations defined
mits_integer = [60, 360,1439] #mit lenghts used
n_neighbors = 5
p_value = 0.05
# Functions
# ------
# In[ ]:
#This function inserts a beginning and ending date for each code, each year (1st january till 31st december) to compute 365 days
def insert_begin_end(df, year, code):
datatemp=str(year)+'-01-01 00:00:10' #temporary date - beginning
data_b = {'gauge_code': [code],
'city': ['x'],
'state': ['x'],
'datetime': [datatemp], #assigning beginning date to code
'rain_mm': [-1],
}
datatemp=str(year)+'-12-31 23:59:10' #temporary date - end
data_e = {'gauge_code': [code],
'city': ['x'],
'state': ['x'],
'datetime': [datatemp],
'rain_mm': [0],
}
df_b = pd.DataFrame(data_b)
df_e = pd.DataFrame(data_e)
df_b['datetime']=pd.to_datetime(df_b['datetime'])
df_e['datetime']=pd.to_datetime(df_e['datetime'])
df=pd.concat([df, df_e], ignore_index=True)
df=pd.concat([df_b, df], ignore_index=True)
return df
# In[ ]:
#This function goes through all the CEMADEN files from all states, each year, to assemble a dataframe with their gauge codes
def get_df_codes (year, state):
filename = str(state) +'_'+ str(year) + '.h5'
df_cemaden_info = pd.read_hdf(path+'/data/raw data/'+ filename,'table_info')
df_codes = df_cemaden_info['gauge_code']
return df_codes
# In[ ]:
#This function writes the status (HQ or PQ) on each gauge according to it's classification
#from the filters and moran
def write_status (code, year, state, status,filter_flag, df_filtered_gauges):
df_filtered_gauges.at[(code, year),'state']=state
df_filtered_gauges.at[(code, year),'status']=status
df_filtered_gauges.at[(code, year),'filter']=filter_flag
return df_filtered_gauges
# Single gauge tests - Filters
#
# In[ ]:
# 0. Filter All: This function go through all filters in a specific order, and writes the gauge's final status of this step
# the function raises a "flag" if the gauge doesn't fulfill the condition pre stablished, otherwise, it goes through the next filter
def all_filters (code,df_cemaden_data,year,state,df_filtered_gauges):
write=True
flag=False
flag = filter_missing_data_days (code,df_cemaden_data,year,flag,state) #Filter 1 - missing days
if not flag:
flag = filter_consecutive_constant_values (code,df_cemaden_data,year,flag,state) #Filter 2 - consecutive 0.2mm rain days
elif flag and write:
status, filter_flag ='POOR QUALITY','missing_data_days'
df_filtered_gauges= write_status (code,year,state,status,filter_flag,df_filtered_gauges)
write=False
if not flag:
flag = filter_max_peak (code,df_cemaden_data,year,flag,state) #Filter 3 - max peak in 10min
elif flag and write:
status, filter_flag ='POOR QUALITY','consecutive_constant_values'
df_filtered_gauges= write_status (code,year,state,status,filter_flag,df_filtered_gauges)
write=False
if not flag:
flag= filter_consecutive_period_without_rain (code,df_cemaden_data,year,flag,state) #Filter 4- consecutive w/o rain days
elif flag and write:
status, filter_flag ='POOR QUALITY','max_peak'
df_filtered_gauges= write_status (code, year,state,status,filter_flag,df_filtered_gauges)
write=False
if not flag:
status, filter_flag ='HIGH QUALITY','unflagged'
df_filtered_gauges= write_status (code, year,state,status,filter_flag,df_filtered_gauges)
write=False
elif flag and write:
status, filter_flag ='POOR QUALITY','consecutive_period_without_rain'
df_filtered_gauges= write_status (code, year,state,status,filter_flag,df_filtered_gauges)
write=False
return df_filtered_gauges
# In[ ]:
# 1. Filter: Flags all gauges missing xx or more days of data
def filter_missing_data_days (code,df_cemaden_data,year,flag,state):
df_gauge=df_cemaden_data[(df_cemaden_data['gauge_code'] == code )]
df_gauge['rain_mm']=-1 #overwriting all rain values since missing values are substituted by "0"
df_gauge=df_gauge.set_index('datetime')
df_gauge_resample=df_gauge['rain_mm'].resample('D').sum() #resampling to obtain the information in "days"
number_days_year=get_days (year)
days_without_data= number_days_year - df_gauge_resample[(df_gauge_resample <0)].count()
if days_without_data >= threshold_missing_data_days:
flag=True #raising the flag if the gauge has more missing days in the records than the highest threshold defined
return flag
#This function is to identify whether the analyzed year is a leap year
def get_days (year):
if (year%4==0 and year%100!=0) or (year%400==0):
nday_year=366
else:
nday_year=365
return nday_year
# In[ ]:
# 2. Filter: Exclusion of gauges with consecutive constant values (0.2 mm) per some xx days
def filter_consecutive_constant_values (code,df_cemaden_data, year,flag, state):
df_gauge=df_cemaden_data[(df_cemaden_data['gauge_code'] == code ) & (df_cemaden_data['rain_mm']>0)]
df_gauge=df_gauge.set_index('datetime')
t=str(threshold_constant_days)+'D'
df_rolling_mean=df_gauge['rain_mm'].rolling(t).mean()
df_rolling_count=df_gauge['rain_mm'].rolling(t).count()
df_rolling_std=df_gauge['rain_mm'].rolling(t).std()
df_rolling_mean=pd.DataFrame(df_rolling_mean)
df_rolling_mean=df_rolling_mean.rename(columns={'rain_mm':'mean'})
df_rolling_std=pd.DataFrame(df_rolling_std)
df_rolling_std=df_rolling_std.rename(columns={'rain_mm':'std'})
df_rolling_count=pd.DataFrame(df_rolling_count)
df_rolling_count=df_rolling_count.rename(columns={'rain_mm':'count'})
df_rolling_all=pd.concat([df_rolling_mean, df_rolling_std], axis=1)
df_rolling_all=pd.concat([df_rolling_all, df_rolling_count], axis=1)
df_temp=df_rolling_all[(df_rolling_all['mean']< 0.201) & (df_rolling_all['mean']> 0.199) & (df_rolling_all['std']< 0.0001) & (df_rolling_all['count']> 50)] #df['count'] conta quantos pulsos de 0,2 há dentro do período de 10 dias
constant_period=df_temp['count'].count()
if constant_period > 0:
flag=True
return flag
# In[ ]:
# 3. Filter: Flags all gauges with maximum peaks of xx mm or more in 10 minutes
def filter_max_peak (code,df_cemaden_data,year,flag,state):
df_temp=df_cemaden_data[(df_cemaden_data['gauge_code'] == code ) & (df_cemaden_data['rain_mm'] > threshold_max_peak)]
if df_temp['rain_mm'].count()>0:
flag=True #raising the flag if the gauge has a higher max peak in the records than the highest threshold defined
return flag
# In[ ]:
#4. Filter: Flags all gauges with more than xxx consecutive days of null rain records
def filter_consecutive_period_without_rain (code,df_cemaden_data, year,flag, state):
df_gauge=df_cemaden_data[(df_cemaden_data['gauge_code'] == code ) & (df_cemaden_data['rain_mm']>0)]
df_gauge['rain_mm']=-1 #overwriting all rain values since missing values are substituted by "0"
df_gauge=insert_begin_end(df_gauge, year, code)
df_gauge=df_gauge.set_index('datetime')
df_gauge_resample=df_gauge['rain_mm'].resample('10Min').sum()
df_gauge_resample= | pd.DataFrame(df_gauge_resample) | pandas.DataFrame |
# Import packages
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import umap.umap_ as umap
from PIL import Image
from matplotlib import offsetbox
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import datetime
# Import packages for Bokeh visualization demo
from bokeh.models import ColumnDataSource, CustomJS, HoverTool, LassoSelectTool, BoxSelectTool, CrosshairTool
from bokeh.models import HoverTool, CustomJS, Div, Button, TextInput
from bokeh.plotting import curdoc, figure, output_file, show
from bokeh.layouts import column, row
from bokeh import events
import sys, getopt
verbose = False
try:
opts, args = getopt.getopt(sys.argv[1:],"p:v",["path="])
except getopt.GetoptError:
print('bokeh_serve.py -p <file_path>')
sys.exit(2)
for opt, arg in opts:
if opt == ("-v"):
verbose = True
elif opt in ("-p", "--path"):
path = arg
if verbose:
print(path)
print(f'The path is {path}')
if path[-1] == '/':
pass
else:
path += '/'
# Load the weights, activations, and images
name_all = np.load(f'{path}Data_name_all.npy')
images = np.load(f'{path}Data_images.npy')
images = images * 255
im = Image.open(f'{path}test1.png')
im = im.convert("RGBA")
imarray = np.array(im)
imarray = np.flipud(imarray)
X = np.load(f'{path}Data_activations.npy')
X_umap = np.load(f'{path}Data_umap.npy')
x_min1, x_max1 = np.min(X_umap, 0), np.max(X_umap, 0)
X_umap = ((X_umap - x_min1) / (x_max1 - x_min1)) * 15
# Load tooltips for hovering
TOOLTIPS = [
("index", "$index"),
("(x,y)", "($x, $y)"),
("filename", "@filename"),
("umapid", "@umapid"),
]
# Call out data points for the original and recrusvie projection
s1 = ColumnDataSource(data=dict(x=[], y=[], filename=[], umapid=[]))
s2 = ColumnDataSource(data=dict(x=[], y=[], filename=[], umapid=[]))
div = Div(width=400)
# Original projection set up
p1 = figure(plot_width=600, plot_height=600, title="Select Here")
p1.min_border = 0
p1.x_range.range_padding = p1.y_range.range_padding = 0
p1.image_rgba(image=[imarray], x=0, y=0, dw=15, dh=15)
p1.add_tools(LassoSelectTool())
p1.add_tools(BoxSelectTool())
p1.add_tools(CrosshairTool())
cr1 = p1.square('x', 'y', source=s1, fill_color="white", hover_fill_color="firebrick", fill_alpha=0.05,
hover_alpha=0.3, line_color=None, size=10) # settings for hovering
p1.add_tools(HoverTool(tooltips=TOOLTIPS, renderers=[cr1]))
# Recursive projection set up
p2 = figure(plot_width=600, plot_height=600, title="Watch Here")
p2.min_border = 0
p2.x_range.range_padding = p2.y_range.range_padding = 0
p2.add_tools(LassoSelectTool())
p2.add_tools(BoxSelectTool())
p2.add_tools(CrosshairTool())
ds = ColumnDataSource(data=dict(image=[]))
p2.image_rgba(image='image', source=ds, x=0, y=0, dw=15, dh=15)
cr2 = p2.square('x', 'y', source=s2, fill_color="white", hover_fill_color="firebrick", fill_alpha=0.1,
hover_alpha=0.3, line_color=None,
size=10) # settings for hovering
p2.add_tools(HoverTool(tooltips=TOOLTIPS, renderers=[cr2]))
# Load UMAP actiations into the original projection
data1 = dict(
x=[i * 1 for i in X_umap[:, 0]],
y=[i * 1 for i in X_umap[:, 1]],
filename=name_all[:],
umapid=list(range(0, len(X_umap))))
data1 = pd.DataFrame(data1)
data2 = dict(x=[], y=[], filename=[], umapid=[])
data2 = | pd.DataFrame(data2) | pandas.DataFrame |
# Import required modules
import requests
import pandas as pd
import json
import subprocess
from tqdm import tqdm
import re
# Set pandas to show full rows and columns
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
'''
This is API Query Function
'''
# Main function starts here
def query(cname="cname", apikey="apikey", apiquery="apiquery"):
'''
-- cname --> Cloud Instance Name <cname.infocyte.com> ('cname' is without .infocyte.com)
-- apikey --> APIKEY or the API Token
-- apiquery --> API GET Method
** Set above args as pre-defined variables (Can be used multiple times) or call them on the fly (Single use).
icdata = ic.query(cname, apikey, apiquery)
variable 'icdata' --> PandasDataframe. Can now be used with all options available from pandas. Refer README and Wiki for more details. https://pandas.pydata.org/docs/
Note: 'query' function loops until it reaches the last page on API explorer. Larger the data, more time it takes. However each loop will pull 1K entries (rows) and progress details are displayed while quering the data.
'''
tqdm.pandas()
global icpd, icd
icd = requests.get("https://"+cname+".infocyte.com/api/" +
apiquery+"?access_token="+apikey + "&count=True")
if "There is no method to handle GET" in icd.content.decode("utf-8"):
print("API Endpoint not found, suffix \"/explorer/#/\" at the end of URL to find the correct end point")
elif icd.reason == "Not Found":
print("Please check the CNAME used is correct")
elif icd.reason == "Unauthorized":
print("Please check the APIKey / Token has the permission to access the instance")
elif icd.reason != "OK":
print("Something went wrong and unable to find the reason")
else:
iccount = (str(icd.headers.get("X-Total-Count"))[:-3])
if (len(iccount) == 0):
loopic = icd
for x in tqdm(range(1), desc="Loading " + apiquery, ncols=100, unit='Loop(s)', bar_format='{l_bar}{bar} | {n_fmt}/{total_fmt} {unit}', colour='GREEN'):
icdata = json.loads(loopic.text)
icdb = pd.DataFrame(icdata)
icpd = icdb
else:
icdata = json.loads(icd.text)
icdb = pd.DataFrame(icdata)
icpd = icdb
for x in (num+1 for num in tqdm(range(int(iccount)), desc="Loading " + apiquery, ncols=100, unit='Loop(s)', bar_format='{l_bar}{bar} | {n_fmt}/{total_fmt} {unit}', colour='GREEN')):
if x > 9:
loopic = requests.get("https://"+cname+".infocyte.com/api/"+apiquery +
"?access_token=" + apikey+"&filter={\"skip\": "+str(x).ljust(5, '0')+"}")
icdata = json.loads(loopic.text)
icdb = pd.DataFrame(icdata)
icpd = icpd.append(icdb, ignore_index=True)
else:
loopic = requests.get("https://"+cname+".infocyte.com/api/"+apiquery +
"?access_token=" + apikey+"&filter={\"skip\": "+str(x).ljust(4, '0')+"}")
icdata = json.loads(loopic.text)
icdb = | pd.DataFrame(icdata) | pandas.DataFrame |
"""Created on Fri Apr 3 11:05:15 2020.
Contains the functions needed for data manipulation
@author: MLechner
-*- coding: utf-8 -*-
"""
import copy
import math
from concurrent import futures
import numpy as np
import pandas as pd
import ray
from mcf import general_purpose as gp
from mcf import general_purpose_estimation as gp_est
from mcf import general_purpose_mcf as gp_mcf
def variable_features(var_x_type, var_x_values):
"""
Show variables and their key features.
Parameters
----------
var_x_type : Dict. Name and type of variable.
var_x_values : Dict. Name and values of variables.
Returns
-------
None.
"""
print('\n')
print(80 * '=')
print('Features used to build causal forests')
print(80 * '-')
for name in var_x_type.keys():
print('{:20} '.format(name), end=' ')
if var_x_type[name] == 0:
print('Ordered ', end='')
if var_x_values[name]:
if isinstance(var_x_values[name][0], float):
for val in var_x_values[name]:
print('{:>6.2f}'.format(val), end=' ')
print(' ')
else:
print(var_x_values[name])
else:
print('Continuous')
else:
print('Unordered ', len(var_x_values[name]), ' different values')
print(80 * '-')
def prepare_data_for_forest(indatei, v_dict, v_x_type, v_x_values, c_dict,
no_y_nn=False, regrf=False):
"""Prepare data for Forest and variable importance estimation.
Parameters
----------
indatei : String. CSV file.
v_dict : DICT. Variables.
v_x_type : List. Type of variables.
v_x_values : List. Values of variables (if not continuous).
c_dict : DICT. Parameters.
Returns
-------
x_name : Dict.
x_type :Dict.
x_values : Dict.
c : Dict. Parameters (updated)
pen_mult : INT. Multiplier for penalty.
data_np : Numpy array. Data for estimation.
y_i : INT. Index of position of y in data_np.
y_nn_i : Numpy array of INT.
x_i : Numpy array of INT.
x_ind : Numpy array of INT.
x_ai_ind : Numpy array of INT.
d_i : INT.
w_i : INT.
cl_i : INT.
"""
x_name, x_type = gp.get_key_values_in_list(v_x_type)
x_type = np.array(x_type)
x_name2, x_values = gp.get_key_values_in_list(v_x_values)
pen_mult = 0
if x_name != x_name2:
raise Exception('Wrong order of names', x_name, x_name2)
p_x = len(x_name) # Number of variables
c_dict = m_n_grid(c_dict, p_x) # Grid for # of var's used for split
x_ind = np.array(range(p_x)) # Indices instead of names of variable
x_ai_ind = [] # Indices of variables used for all splits
if not v_dict['x_name_always_in'] == []:
always_in_set = set(v_dict['x_name_always_in'])
x_ai_ind = np.empty(len(always_in_set), dtype=np.uint32)
j = 0
for i in range(p_x):
if x_name[i] in always_in_set:
x_ai_ind[j] = i
j = j + 1
data = pd.read_csv(indatei)
y_dat = data[v_dict['y_tree_name']].to_numpy()
if not regrf:
if (c_dict['mtot'] == 1) or (c_dict['mtot'] == 4):
pen_mult = c_dict['mtot_p_diff_penalty'] * np.var(y_dat)
y_i = [0]
if regrf:
d_dat = None
d_i = None
else:
d_dat = data[v_dict['d_name']].to_numpy()
d_i = [1]
if regrf:
y_nn = None
y_nn_i = None
else:
if no_y_nn:
# y_nn = np.zeros((np.size(d), len(v['y_match_name'])))
y_nn = np.zeros((len(d_dat), len(v_dict['y_match_name'])))
else:
y_nn = data[v_dict['y_match_name']].to_numpy()
y_nn_i = range(2, 2 + len(v_dict['y_match_name']))
y_nn_i = np.array(y_nn_i)
x_dat = data[x_name].to_numpy()
for col_indx in range(np.shape(x_dat)[1]):
if x_type[col_indx] > 0:
x_dat[:, col_indx] = np.around(x_dat[:, col_indx])
if regrf:
x_i = np.array(range(1, 1+len(v_dict['x_name'])))
data_np = np.concatenate((y_dat, x_dat), axis=1) # easier handling
else:
x_i = np.array(
range(2 + len(v_dict['y_match_name']),
2 + len(v_dict['y_match_name']) + len(v_dict['x_name'])))
data_np = np.concatenate((y_dat, d_dat, y_nn, x_dat), axis=1)
if c_dict['w_yes']:
w_dat = data[v_dict['w_name']].to_numpy()
data_np = np.concatenate((data_np, w_dat), axis=1)
w_i = data_np.shape[1]-1
else:
w_i = 0
if c_dict['panel_in_rf']:
cl_dat = data[v_dict['cluster_name']].to_numpy()
data_np = np.concatenate((data_np, cl_dat), axis=1)
cl_i = data_np.shape[1]-1
else:
cl_i = 0
return (x_name, x_type, x_values, c_dict, pen_mult, data_np,
y_i, y_nn_i, x_i, x_ind, x_ai_ind, d_i, w_i, cl_i)
def m_n_grid(c_dict, no_vars):
"""Generate the grid for the # of coefficients (log-scale).Sort n_min grid.
Parameters
----------
c_dict : Dict. Parameters of MCF estimation
no_vars : INT. Number of x-variables used for tree building
Returns
-------
c : Dict. Updated (globally) dictionary with parameters.
"""
m_min = round(c_dict['m_min_share'] * no_vars)
m_min = max(m_min, 1)
m_max = round(c_dict['m_max_share'] * no_vars)
if m_min == m_max:
c_dict['m_grid'] = 1
grid_m = m_min
else:
if c_dict['m_grid'] == 1:
grid_m = round((m_min + m_max)/2)
else:
grid_m = gp.grid_log_scale(m_max, m_min, c_dict['m_grid'])
grid_m = [int(idx) for idx in grid_m]
if np.size(c_dict['grid_n_min']) > 1:
c_dict['grid_n_min'] = sorted(c_dict['grid_n_min'], reverse=True)
c_dict.update({'grid_m': grid_m}) # changes this dictionary globally
return c_dict
def nn_neighbour_mcf(y_dat, x_dat, d_dat, obs, cov_x_inv, treat_values,
i_treat):
"""Find nearest neighbour-y in subsamples by value of d.
Parameters
----------
y_dat : Numpy array: Outcome variable
x_dat : Numpy array: Covariates
d_dat : Numpy array: Treatment
obs : INT64: Sample size
cov_x_inv : Numpy array: inverse of covariance matrix
treat_values : Numpy array: possible values of D
i_treat : Position treat_values investigated
Returns
-------
y_all : Numpy series with matched values.
i_treat: see above (included to ease mulithreading which may confuse
positions).
"""
treat_value = treat_values[i_treat]
cond = d_dat[:, 0] == treat_value
x_t = x_dat[cond, :]
y_t = y_dat[cond, :]
y_all = np.empty([obs, 1])
for i in range(obs):
if treat_value == d_dat[i, 0]:
y_all[i, 0] = y_dat[i, 0]
else:
diff = x_t - x_dat[i, :]
dist = np.sum(np.dot(diff, cov_x_inv) * diff, axis=1)
min_ind = np.argmin(dist)
y_all[i, 0] = np.copy(y_t[min_ind, 0])
return y_all, i_treat # i_treat is returned for multithreading
@ray.remote
def ray_nn_neighbour_mcf2(y_dat, x_dat, d_dat, obs, cov_x_inv, treat_value,
i_treat=None):
"""Make procedure compatible for Ray."""
return nn_neighbour_mcf2(y_dat, x_dat, d_dat, obs, cov_x_inv, treat_value,
i_treat)
def nn_neighbour_mcf2(y_dat, x_dat, d_dat, obs, cov_x_inv, treat_value,
i_treat=None):
"""Find nearest neighbour-y in subsamples by value of d.
Parameters
----------
y_dat : Numpy array: Outcome variable
x_dat : Numpy array: Covariates
d_dat : Numpy array: Treatment
obs : INT64: Sample size
cov_x_inv : Numpy array: inverse of covariance matrix
treat_values : Numpy array: possible values of D
i_treat : Position treat_values investigated
Returns
-------
y_all : Numpy series with matched values.
i_treat: see above (included to ease mulithreading which may confuse
positions).
"""
cond = (d_dat == treat_value).reshape(-1)
x_t = x_dat[cond, :]
y_t = y_dat[cond]
y_all = np.empty(obs)
for i in range(obs):
if treat_value == d_dat[i]:
y_all[i] = y_dat[i]
else:
diff = x_t - x_dat[i, :]
dist = np.sum(np.dot(diff, cov_x_inv) * diff, axis=1)
min_ind = np.nonzero(dist <= (dist.min() + 1e-15))
y_all[i] = np.mean(y_t[min_ind])
if i_treat is None:
return y_all # i_treat is returned for multithreading
return y_all, i_treat # i_treat is returned for multithreading
def nn_matched_outcomes(indatei, v_dict, v_type, c_dict):
"""Nearest neighbor matching for outcome variables.
Parameters
----------
indatei : string.Data input
v_dict : Dict with variables
v_type : Dict with variable types (for dummy creation)
c_dict : Dict with control parameters
Returns
-------
string with name of new data file
v_dict : Updated dictionary with names including
"""
# read as pandas data
data = | pd.read_csv(filepath_or_buffer=indatei) | pandas.read_csv |
import pandas as pd
import json
import os
def run_microsoft_parser(path_save, path_source):
print('Consolidating results of Microsoft classifier')
files = [item for item in os.listdir(path_source) if '.pkl' in item]
print(len(files), 'to consolidate')
df = pd.DataFrame()
for file in files:
try:
tmpdf = pd.read_pickle(path_source+'/'+file)
df = df.append(tmpdf)
except:
print(file, 'error')
def parse_tagging(tagging, unique_photo_id):
results = pd.DataFrame()
tagging = json.loads(tagging)
if 'categories' in tagging.keys():
categories = tagging['categories']
cat_df = []
for category in categories:
res = {}
res['unique_photo_id'] = unique_photo_id
res['microsoft_category_label'] = category['name']
res['microsoft_category_score'] = category['score']
res['classifier'] = 'microsoft_category'
cat_df.append(res)
results = results.append(pd.DataFrame(cat_df))
tags = tagging['tags']
tags_df = []
for tag in tags:
res = {}
res['unique_photo_id'] = unique_photo_id
res['classifier'] = 'microsoft_tags'
res['microsoft_tags_name'] = tag['name']
res['microsoft_tags_score'] = tag['confidence']
tags_df.append(res)
results = results.append(pd.DataFrame(tags_df))
faces_df = []
for face in tagging['faces']:
res = {}
res['classifier'] = 'microsoft_faces'
res['unique_photo_id'] = unique_photo_id
res['microsoft_faces_age'] = face['age']
res['microsoft_faces_gender'] = face['gender']
faces_df.append(res)
results = results.append( | pd.DataFrame(faces_df) | pandas.DataFrame |
import numpy as np
import pystan
import pickle
from pystan import StanModel
import pandas as pd
import os
def stanTopkl():
"""
The function complies 'stan' models first and avoids re-complie of the model.
"""
if os.path.isfile('log_normal.pkl'):
os.remove('log_normal.pkl')
sm = StanModel(file = 'log_normal.stan')
with open('log_normal.pkl', 'wb') as f:
pickle.dump(sm, f)
if os.path.isfile('log_t.pkl'):
os.remove('log_t.pkl')
sm = StanModel(file = 'log_t.stan')
with open('log_t.pkl', 'wb') as f:
pickle.dump(sm, f)
def MCMCFit(Y, b, tau, model='log_normal', df=2, beta=0.01, sigma=1, nu=1, iter=10000, chains=1):
"""
The function to compute the MCMC posterior of 'B' and 'G' given the counts.
We use pystan to fit the posterior.
Keyword arguments:
Y -- logarithm of the counts
Each row is observations from a same instrument; each column is observations of a same object.
b -- prior mean for 'B'
tau -- prior standard deviation for 'B'
model -- model used to fit the data (default 'log_normal')
If 'model' is 'log_normal', we fit the log-normal model with unknown variance.
If 'model' is 'log_t', we fit the log-t model.
df -- used when 'model' is 'log_normal', prior parameter for the variance (default 2.0)
beta -- used when 'model' is 'log_normal', prior parameter for the variance (default 0.01)
sigma -- used when 'model' is 'log_t', prior parameter for the variance (default 1.0)
nu -- used when 'model' is 'log_t', prior parameter for the variance (default 1.0)
iter -- number of iterations in MCMC (default 10000)
chains -- number of chains in MCMC (default 1)
"""
N, M = Y.shape
# Record the index for the observations.
I = []
J = []
y = []
for i in np.arange(N):
for j in np.arange(M):
if not np.isinf(Y[i, j]):
I.append(i+1)
J.append(j+1)
y.append(Y[i, j])
I = np.array(I, dtype=int)
J = np.array(J, dtype=int)
y = np.array(y, dtype=float)
n = len(y)
if model == 'log_normal':
dat = {'n': n, 'N': N, 'M': M, 'I': I, 'J': J, 'y': y,
'df': df, 'beta': beta, 'tau': tau, 'b': b}
sm = pickle.load(open('log_normal.pkl', 'rb'))
fit = sm.sampling(data=dat, iter=iter, chains=chains)
elif model == 'log_t':
dat = {'n': n, 'N': N, 'M': M, 'I': I, 'J': J, 'y': y,
'sigma': sigma, 'nu': nu, 'tau': tau, 'b': b}
sm = pickle.load(open('log_t.pkl', 'rb'))
fit = sm.sampling(data=dat, iter=iter, chains=chains)
else:
return None
return summary_result(fit, model, N, M, n, I, J)
def summary_result(fit, model, N, M, n, I, J):
"""
The function summarizes the result from pystan.
Keyword arguments:
fit -- the MCMC result
model -- the model used to fit the data
If 'model' is 'log_normal', we fit the log-normal model with unknown variance.
If 'model' is 'log_t', we fit the log-t model.
N -- number of instruments
M -- number of objects
n -- total number of observations
I -- instrument index for each observation
J -- object index for each observation
"""
# Summary statistics for 'B', 'G' and 'sigma^2' (log-normal) or 'xi' (log-t).
resMCMC = fit.summary()
index = []
if model == 'log_normal':
# The MCMC chains for 'B', 'G' and 'sigma^22'.
chain = np.hstack((fit['B'], fit['G'], fit['sigma2']))
data = (resMCMC['summary'])[:N+M+N, :]
for i in np.arange(N):
index.append('B['+str(i+1)+']')
for j in np.arange(M):
index.append('G['+str(j+1)+']')
for i in np.arange(N):
index.append('sigma^2['+str(i+1)+']')
else:
# The MCMC chains for 'B', 'G' and 'xi'.
chain = np.hstack((fit['B'], fit['G'], fit['xi']))
data = (resMCMC['summary'])[:N+M+n, :]
for i in np.arange(N):
index.append('B['+str(i+1)+']')
for j in np.arange(M):
index.append('G['+str(j+1)+']')
for k in np.arange(n):
index.append('xi['+str(I[k])+','+str(J[k])+']')
chain = | pd.DataFrame(chain, columns=index) | pandas.DataFrame |
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from msticpy.analysis.anomalous_sequence import sessionize
class TestSessionize(unittest.TestCase):
def setUp(self):
self.df1 = pd.DataFrame({"UserId": [], "time": [], "operation": []})
self.df1_with_ses_col = pd.DataFrame(
{"UserId": [], "time": [], "operation": [], "session_ind": []}
)
self.df1_sessionized = pd.DataFrame(
{
"UserId": [],
"time_min": [],
"time_max": [],
"operation_list": [],
"duration": [],
"number_events": [],
}
)
self.df2 = pd.DataFrame(
{
"UserId": [1, 1, 2, 3, 1, 2, 2],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-06 11:06:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
],
"operation": ["A", "B", "C", "A", "A", "B", "C"],
}
)
self.df2_with_ses_col_1 = pd.DataFrame(
{
"UserId": [1, 1, 1, 2, 2, 2, 3],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation": ["A", "B", "A", "C", "B", "C", "A"],
"session_ind": [0, 0, 1, 2, 3, 4, 5],
}
)
self.df2_sessionized_1 = pd.DataFrame(
{
"UserId": [1, 1, 2, 2, 2, 3],
"time_min": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"time_max": [
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation_list": [["A", "B"], ["A"], ["C"], ["B"], ["C"], ["A"]],
"duration": [
pd.to_timedelta(1, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
],
"number_events": [2, 1, 1, 1, 1, 1],
}
)
self.df2_with_ses_col_2 = pd.DataFrame(
{
"UserId": [1, 1, 1, 2, 2, 2, 3],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation": ["A", "B", "A", "C", "B", "C", "A"],
"session_ind": [0, 0, 1, 2, 3, 3, 4],
}
)
self.df2_sessionized_2 = pd.DataFrame(
{
"UserId": [1, 1, 2, 2, 3],
"time_min": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"time_max": [
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
| pd.to_datetime("2020-01-05 00:25:00") | pandas.to_datetime |
#%% [markdown]
# ## ECA information theory comparison figures and stuff
#%% [markdown]
# ## Load packages and data
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
cana_df = pd.read_csv("../data/eca/canalization_df.csv")
imin_df = pd.read_csv("../data/eca/imin_df.csv", index_col = 0)
ipm_df = pd.read_csv("../data/eca/pm_df.csv", index_col = 0)
unq_rules = | pd.read_csv("../data/eca/eca_equiv_classes.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
from pathlib import Path
from datetime import datetime as dt
def mergeManagers(managers, gameLogs):
#Sum up doubled data
managers = managers.groupby(['yearID','playerID'], as_index=False)['Games','Wins','Losses'].sum()
#Get visiting managers
visitingManagers = gameLogs[['row','Date','Visiting team manager ID']]
visitingManagers['yearID'] = pd.DatetimeIndex(pd.to_datetime(visitingManagers['Date'])).year-1
visitingManagers = pd.merge(visitingManagers, managers, left_on=['yearID','Visiting team manager ID'], right_on=['yearID','playerID'], how="left")
#Get home managers
homeManagers = gameLogs[['row','Date','Home team manager ID']]
homeManagers['yearID'] = pd.DatetimeIndex(pd.to_datetime(homeManagers['Date'])).year-1
homeManagers = pd.merge(homeManagers, managers, left_on=['yearID','Home team manager ID'], right_on=['yearID','playerID'], how="left")
#Merge managers
homes = homeManagers[['row','Games','Wins','Losses']]
visitings = visitingManagers[['row','Games','Wins','Losses']]
return pd.merge(homes, visitings, on='row', suffixes=(' home manager',' visiting manager'))
def mergePitchings(pitchers, gameLogs):
#Get aggregators for doubled data
aggregators = {}
for column in pitchers.drop(columns=['yearID','playerID']).columns:
if column.find("average")>-1:
aggregators[column] = 'mean'
else:
aggregators[column] = 'sum'
#Aggregate doubled data
pitchers = pitchers.groupby(['yearID','playerID'], as_index=False).agg(aggregators)
#Get visiting pitchers
visitingPitchers = gameLogs[['row','Date','Visiting starting pitcher ID']]
visitingPitchers['yearID'] = pd.DatetimeIndex(pd.to_datetime(visitingPitchers['Date'])).year-1
visitingPitchers = pd.merge(visitingPitchers, pitchers, left_on=['yearID','Visiting starting pitcher ID'], right_on=['yearID','playerID'], how="left")
#Get home pitchers
homePitchers = gameLogs[['row','Date','Home starting pitcher ID']]
homePitchers['yearID'] = pd.DatetimeIndex(pd.to_datetime(homePitchers['Date'])).year-1
homePitchers = pd.merge(homePitchers, pitchers, left_on=['yearID','Home starting pitcher ID'], right_on=['yearID','playerID'], how="left")
#Merge pitchers
homes = homePitchers.drop(columns=['yearID','Home starting pitcher ID','playerID','Date'])
visitings = visitingPitchers.drop(columns=['yearID','Visiting starting pitcher ID','playerID','Date'])
return pd.merge(homes, visitings, on='row', suffixes=(' home pitcher',' visiting pitcher'))
def mergePeople(people, gameLogs):
#Encode people
people['bats right'] = (people['bats']=="R") | (people['bats']=="B")
people['bats left'] = (people['bats']=="L") | (people['bats']=="B")
people['throws right'] = people['throws']=="R"
people = people.drop(columns=['bats','throws'])
#Merge people
allPeople = []
for IDColumn in gameLogs.columns:
if IDColumn.find("starting")>-1:
merged = pd.merge(gameLogs[['row','Date',IDColumn]], people, how="left", left_on=[IDColumn], right_on=['playerID'])
merged['age'] = (pd.to_datetime(merged['Date']) - pd.to_datetime(merged['birthdate'])) / np.timedelta64(1, 'Y')
newColumns = {"age":IDColumn.replace(" ID"," "+" age")}
for column in people.drop(columns=['playerID','birthdate']).columns:
newColumns[column] = IDColumn.replace(" ID"," "+str(column))
merged = merged.rename(columns=newColumns)
allPeople.append(merged[['row']+list(newColumns.values())])
mergedPeople = gameLogs['row']
for merSal in allPeople:
mergedPeople = pd.merge(mergedPeople, merSal, how="left", on='row')
return mergedPeople
def mergeTeams(teams, gameLogs):
#Encode team data
teams.loc[(teams['Division winner'] == 'N'), 'Division winner'] = 0
teams.loc[(teams['Division winner'] == 'Y'), 'Division winner'] = 1
teams.loc[(teams['League winner'] == 'N'), 'League winner'] = 0
teams.loc[(teams['League winner'] == 'Y'), 'League winner'] = 1
teams.loc[(teams['World series winner'] == 'N'), 'World series winner'] = 0
teams.loc[(teams['World series winner'] == 'Y'), 'World series winner'] = 1
teams.loc[(teams['Division'] == 'W'), 'Division'] = 0
teams.loc[(teams['Division'] == 'E'), 'Division'] = 1
teams.loc[(teams['Division'] == 'C'), 'Division'] = 2
teams['Pythagorean_expectation'] = (teams['Runs scored'] ** 1.83) / (teams['Runs scored'] ** 1.83 + teams['Opponents runs scored'] ** 1.83)
#Merge teams
mergedTeams = gameLogs[['row','Date','Visiting team','Home team']]
mergedTeams['Date'] = pd.to_datetime(mergedTeams['Date']).dt.year-1
mergedTeams = pd.merge(mergedTeams, teams, left_on=['Date', 'Visiting team'], right_on=['yearID', 'teamID'], how='left')
mergedTeams = pd.merge(mergedTeams, teams, left_on=['Date', 'Home team'], right_on=['yearID', 'teamID'], how='left', suffixes=[' visiting', ' home'])
return mergedTeams[['row', 'Division visiting', 'Rank visiting', 'Games visiting', 'Wins visiting', 'Losses visiting', 'Division winner visiting',
'League winner visiting', 'World series winner visiting', 'Runs scored visiting', 'At bats visiting',
'Hits by batters visiting', 'Doubles visiting', 'Triples visiting', 'Homeruns visiting', 'Walks visiting', 'Strikeouts visiting',
'Stolen bases visiting', 'Cought stealing visiting', 'Batters hit by pitch visiting', 'Sacrifice flies visiting',
'Opponents runs scored visiting', 'Earned runs allowed visiting', 'Earned runs average visiting', 'Shutouts visiting',
'Saves visiting', 'Hits allowed visiting', 'Homeruns allowed visiting', 'Walks allowed visiting',
'Strikeouts allowed visiting', 'Errors visiting', 'Double plays visiting', 'Fielding percentage visiting',
'Pythagorean_expectation visiting', 'Division home', 'Rank home', 'Games home', 'Wins home', 'Losses home',
'Division winner home', 'League winner home', 'World series winner home', 'Runs scored home',
'At bats home', 'Hits by batters home', 'Doubles home', 'Triples home', 'Homeruns home',
'Walks home', 'Strikeouts home', 'Stolen bases home', 'Cought stealing home',
'Batters hit by pitch home', 'Sacrifice flies home', 'Opponents runs scored home',
'Earned runs allowed home', 'Earned runs average home', 'Shutouts home', 'Saves home',
'Hits allowed home', 'Homeruns allowed home', 'Walks allowed home', 'Strikeouts allowed home',
'Errors home', 'Double plays home', 'Fielding percentage home', 'Pythagorean_expectation home']]
def createScorings(gameLogs):
scoreLogs = gameLogs[['row','Visiting team','Home team','Visiting score','Home score']]
scoreLogs['Home team win'] = scoreLogs['Home score']>scoreLogs['Visiting score']
scoreLogs['Home team odd'] = (scoreLogs['Home score'].replace(0,1))/(scoreLogs['Visiting score'].replace(0,1))
homeTeams = {}
for team in scoreLogs['Home team'].unique():
homeTeams[team] = scoreLogs[scoreLogs['Home team']==team]
vistTeams = {}
for team in scoreLogs['Visiting team'].unique():
vistTeams[team] = scoreLogs[scoreLogs['Visiting team']==team]
homeTVers = {}
for hTeam in homeTeams:
homeTeams[hTeam]['Home win ratio'] = homeTeams[hTeam].loc[:,'Home team win'].rolling(10).mean().shift(1)
homeTeams[hTeam]['Home score ratio'] = homeTeams[hTeam].loc[:,'Home score'].rolling(10).mean().shift(1)
homeTeams[hTeam]['Home odd ratio'] = homeTeams[hTeam].loc[:,'Home team odd'].rolling(10).mean().shift(1)
temp = homeTeams[hTeam]
versus = {}
for team in temp['Visiting team'].unique():
versus[team] = temp[temp['Visiting team']==team]
for vTeam in versus:
versus[vTeam]['Home versus win ratio'] = versus[vTeam].loc[:,'Home team win'].rolling(5).mean().shift(1)
versus[vTeam]['Home versus score ratio'] = versus[vTeam].loc[:,'Home score'].rolling(5).mean().shift(1)
versus[vTeam]['Home versus odd ratio'] = versus[vTeam].loc[:,'Home team odd'].rolling(5).mean().shift(1)
homeTVers[hTeam] = pd.concat(versus)
vistTVers = {}
for vTeam in vistTeams:
vistTeams[vTeam]['Visiting win ratio'] = (1-vistTeams[vTeam].loc[:,'Home team win']).rolling(10).mean().shift(1)
vistTeams[vTeam]['Visiting score ratio'] = vistTeams[vTeam].loc[:,'Visiting score'].rolling(10).mean().shift(1)
vistTeams[vTeam]['Visiting odd ratio'] = (1/vistTeams[vTeam].loc[:,'Home team odd']).rolling(10).mean().shift(1)
temp = vistTeams[vTeam]
versus = {}
for team in temp['Home team'].unique():
versus[team] = temp[temp['Home team']==team]
for hTeam in versus:
versus[hTeam]['Visiting versus win ratio'] = (1-versus[hTeam].loc[:,'Home team win']).rolling(5).mean().shift(1)
versus[hTeam]['Visiting versus score ratio'] = versus[hTeam].loc[:,'Visiting score'].rolling(5).mean().shift(1)
versus[hTeam]['Visiting versus odd ratio'] = (1/versus[hTeam].loc[:,'Home team odd']).rolling(5).mean().shift(1)
vistTVers[vTeam] = pd.concat(versus)
merged = pd.merge(pd.concat(vistTeams)[['row'
,'Visiting win ratio'
,'Visiting score ratio'
,'Visiting odd ratio']]
,pd.concat(homeTVers)[['row'
,'Home versus win ratio'
,'Home versus score ratio'
,'Home versus odd ratio']]
, on='row')
merged = pd.merge(pd.concat(vistTVers)[['row'
,'Visiting versus win ratio'
,'Visiting versus score ratio'
,'Visiting versus odd ratio']]
,merged, on='row')
merged = pd.merge(pd.concat(homeTeams)[['row'
,'Home win ratio'
,'Home score ratio'
,'Home odd ratio']]
,merged, on='row')
return pd.merge(scoreLogs[['row','Visiting score','Home score','Home team win','Home team odd']],merged, on='row').fillna(0)
def mergeFieldings(fieldings, gameLogs):
fieldings = fieldings.groupby(['yearID','playerID'], as_index=False).sum()
gameLogs['yearID'] = pd.DatetimeIndex(pd.to_datetime(gameLogs['Date'])).year-1
allPlayers = []
for playerColumn in gameLogs.columns:
if playerColumn.find("starting")>-1:
merged = pd.merge(gameLogs[['row','yearID',playerColumn]], fieldings, how="left", left_on=[playerColumn,'yearID'], right_on=['playerID','yearID'])
newColumns = {}
for column in fieldings.drop(columns=['playerID','yearID']).columns:
newColumns[column] = playerColumn.replace(" ID"," "+str(column))
merged = merged.rename(columns=newColumns)
allPlayers.append(merged[['row']+list(newColumns.values())])
mergedFieldings = gameLogs['row']
for playerData in allPlayers:
mergedFieldings = pd.merge(mergedFieldings, playerData, how="left", on='row')
return mergedFieldings
def mergeBattings(battings, gameLogs):
battings = battings.groupby(['yearID','playerID'], as_index=False).sum()
gameLogs['yearID'] = pd.DatetimeIndex(pd.to_datetime(gameLogs['Date'])).year-1
allPlayers = []
for playerColumn in gameLogs.columns:
if playerColumn.find("starting")>-1:
merged = pd.merge(gameLogs[['row','yearID',playerColumn]], battings, how="left", left_on=[playerColumn,'yearID'], right_on=['playerID','yearID'])
newColumns = {}
for column in battings.drop(columns=['playerID','yearID']).columns:
newColumns[column] = playerColumn.replace(" ID"," "+str(column))
merged = merged.rename(columns=newColumns)
allPlayers.append(merged[['row']+list(newColumns.values())])
mergedBattings = gameLogs['row']
for playerData in allPlayers:
mergedBattings = pd.merge(mergedBattings, playerData, how="left", on='row')
return mergedBattings
path = Path
gameLogs = pd.read_csv(path+r'\Filtered\_mlb_filtered_GameLogs.csv', index_col=False)
people = pd.read_csv(path+r'\Filtered\_mlb_filtered_People.csv', index_col=False)
teams = pd.read_csv(path+r'\Filtered\_mlb_filtered_Teams.csv', index_col=False)
managers = pd.read_csv(path+r'\Filtered\_mlb_filtered_Managers.csv', index_col=False)
pitchings = | pd.read_csv(path+r'\Filtered\_mlb_filtered_Pitching.csv', index_col=False) | pandas.read_csv |
from collections.abc import MutableMapping
from datetime import datetime
from numpy import exp
import pandas as pd
CHANNEL_ERROR = 'Channel not supported.'
DIRECTION_ERROR = 'Direction not supported.'
class SPMImage():
data_headers = [
'sample_id',
'rec_index',
'probe',
'channel',
'direction',
'trace',
'setpoint (A)',
'voltage (V)',
'width (m)',
'height (m)',
'scan_time (s)',
'datetime',
'path',
'image'
]
def __init__(self, dataframe, metadata) -> None:
self.metadata = metadata
self.dataframe = dataframe
def summary(self):
sample_id = self.dataframe.at[0, 'sample_id']
rec_index = self.dataframe.at[0, 'rec_index']
channel = self.dataframe.at[0, 'channel']
datetime_obj = datetime.fromisoformat(self.dataframe.at[0, 'datetime'])
date = datetime_obj.strftime('%y%m%d')
# date = self.dataframe.at[0, 'datetime']
bias = self.dataframe.at[0, 'voltage (V)']
size = round(self.dataframe.at[0, 'width (m)'] * pow(10,9))
# return f"{sample_id}_{date}_{bias}V_{size}x{size}_{channel}"
return f"{sample_id}_{date}_{bias}V_{size}x{size}_{rec_index}_{channel}"
class SPMImage_dict(MutableMapping):
def __init__(self, path='', *args, **kwargs):
self.path = path
self.params = dict()
self.headers = dict()
self.data = {'Z':[], 'Current':[]}
self.traces = {'Z':[], 'Current':[]}
self.update(*args, **kwargs)
def reformate(self, channel):
cols = [
'sample_id',
'probe',
'channel',
'path',
'setpoint',
'voltage',
'width',
'height',
'datetime',
'direction',
'trace',
'image'
]
data = {col:[] for col in cols}
n = len(self.data[channel])
for i in range(n):
data['channels'].append(channel)
data['directions'].append(self.traces[channel][i]['direction'])
data['traces'].append(self.traces[channel][i]['trace'])
data['setpoints'].append(self.params['setpoint_value'])
data['voltages'].append(self.params['setpoint_value'])
data['widths'].append(self.params['setpoint_value'])
data['heights'].append(self.params['setpoint_value'])
data['datetimes'].append(self.params['date_time'].isoformat())
data['paths'].append(self.path)
img = self.data[channel][i]
data['images'].append(img)
return data
def as_dataframe(self):
Z_data = self.reformate('Z')
Z_dataframe = pd.DataFrame(Z_data)
I_data = self.reformate('Current')
I_dataframe = pd.DataFrame(I_data)
return | pd.concat([Z_dataframe,I_dataframe]) | pandas.concat |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import datetime
import pandas as pd
from dateutil.relativedelta import relativedelta
from collections import Iterable
ALLOWED_TIME_COLUMN_TYPES = [
pd.Timestamp,
pd.DatetimeIndex,
datetime.datetime,
datetime.date,
]
def is_datetime_like(x):
"""Function that checks if a data frame column x is of a datetime type."""
return any(isinstance(x, col_type) for col_type in ALLOWED_TIME_COLUMN_TYPES)
def get_datetime_col(df, datetime_colname):
"""
Helper function for extracting the datetime column as datetime type from
a data frame.
Args:
df: pandas DataFrame containing the column to convert
datetime_colname: name of the column to be converted
Returns:
pandas.Series: converted column
Raises:
Exception: if datetime_colname does not exist in the dateframe df.
Exception: if datetime_colname cannot be converted to datetime type.
"""
if datetime_colname in df.index.names:
datetime_col = df.index.get_level_values(datetime_colname)
elif datetime_colname in df.columns:
datetime_col = df[datetime_colname]
else:
raise Exception("Column or index {0} does not exist in the data " "frame".format(datetime_colname))
if not is_datetime_like(datetime_col):
datetime_col = pd.to_datetime(df[datetime_colname])
return datetime_col
def get_month_day_range(date):
"""
Returns the first date and last date of the month of the given date.
"""
# Replace the date in the original timestamp with day 1
first_day = date + relativedelta(day=1)
# Replace the date in the original timestamp with day 1
# Add a month to get to the first day of the next month
# Subtract one day to get the last day of the current month
last_day = date + relativedelta(day=1, months=1, days=-1, hours=23)
return first_day, last_day
def split_train_validation(df, fct_horizon, datetime_colname):
"""
Splits the input dataframe into train and validate folds based on the
forecast creation time (fct) and forecast horizon specified by fct_horizon.
Args:
df: The input data frame to split.
fct_horizon: list of tuples in the format of
(fct, (forecast_horizon_start, forecast_horizon_end))
datetime_colname: name of the datetime column
Note: df[datetime_colname] needs to be a datetime type.
"""
i_round = 0
for fct, horizon in fct_horizon:
i_round += 1
train = df.loc[df[datetime_colname] < fct].copy()
validation = df.loc[(df[datetime_colname] >= horizon[0]) & (df[datetime_colname] <= horizon[1]),].copy()
yield i_round, train, validation
def add_datetime(input_datetime, unit, add_count):
"""
Function to add a specified units of time (years, months, weeks, days,
hours, or minutes) to the input datetime.
Args:
input_datetime: datatime to be added to
unit: unit of time, valid values: 'year', 'month', 'week',
'day', 'hour', 'minute'.
add_count: number of units to add
Returns:
New datetime after adding the time difference to input datetime.
Raises:
Exception: if invalid unit is provided. Valid units are:
'year', 'month', 'week', 'day', 'hour', 'minute'.
"""
if unit == "Y":
new_datetime = input_datetime + relativedelta(years=add_count)
elif unit == "M":
new_datetime = input_datetime + relativedelta(months=add_count)
elif unit == "W":
new_datetime = input_datetime + relativedelta(weeks=add_count)
elif unit == "D":
new_datetime = input_datetime + relativedelta(days=add_count)
elif unit == "h":
new_datetime = input_datetime + relativedelta(hours=add_count)
elif unit == "m":
new_datetime = input_datetime + relativedelta(minutes=add_count)
else:
raise Exception(
"Invalid backtest step unit, {}, provided. Valid " "step units are Y, M, W, D, h, " "and m".format(unit)
)
return new_datetime
def convert_to_tsdf(input_df, time_col_name, time_format):
"""
Convert a time column in a data frame to monotonically increasing time
index.
Args:
input_df(pandas.DataFrame): Input data frame to convert.
time_col_name(str): Name of the time column to use as index.
time_format(str): Format of the time column.
Returns:
pandas.DataFrame: A new data frame with the time column of the input
data frame set as monotonically increasing index.
"""
output_df = input_df.copy()
if not is_datetime_like(output_df[time_col_name]):
output_df[time_col_name] = pd.to_datetime(output_df[time_col_name], format=time_format)
output_df.set_index(time_col_name, inplace=True)
if not output_df.index.is_monotonic:
output_df.sort_index(inplace=True)
return output_df
def is_iterable_but_not_string(obj):
"""
Determine if an object has iterable, list-like properties.
Importantly, this functions *does not* consider a string
to be list-like, even though Python strings are iterable.
"""
return isinstance(obj, Iterable) and not isinstance(obj, str)
def get_offset_by_frequency(frequency):
frequency_to_offset_map = {
"B": pd.offsets.BDay(),
"C": pd.offsets.CDay(),
"W": pd.offsets.Week(),
"WOM": pd.offsets.WeekOfMonth(),
"LWOM": pd.offsets.LastWeekOfMonth(),
"M": pd.offsets.MonthEnd(),
"MS": pd.offsets.MonthBegin(),
"BM": pd.offsets.BMonthEnd(),
"BMS": pd.offsets.BMonthBegin(),
"CBM": pd.offsets.CBMonthEnd(),
"CBMS": pd.offsets.CBMonthBegin(),
"SM": pd.offsets.SemiMonthEnd(),
"SMS": pd.offsets.SemiMonthBegin(),
"Q": pd.offsets.QuarterEnd(),
"QS": pd.offsets.QuarterBegin(),
"BQ": pd.offsets.BQuarterEnd(),
"BQS": pd.offsets.BQuarterBegin(),
"REQ": pd.offsets.FY5253Quarter(),
"A": pd.offsets.YearEnd(),
"AS": pd.offsets.YearBegin(),
"BYS": pd.offsets.YearBegin(),
"BA": pd.offsets.BYearEnd(),
"BAS": pd.offsets.BYearBegin(),
"RE": pd.offsets.FY5253(),
"BH": pd.offsets.BusinessHour(),
"CBH": pd.offsets.CustomBusinessHour(),
"D": pd.offsets.Day(),
"H": pd.offsets.Hour(),
"T": pd.offsets.Minute(),
"min": pd.offsets.Minute(),
"S": pd.offsets.Second(),
"L": pd.offsets.Milli(),
"ms": pd.offsets.Milli(),
"U": | pd.offsets.Micro() | pandas.offsets.Micro |
import re
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import pytest
from aneris.convenience import harmonise_all
from aneris.errors import (
AmbiguousHarmonisationMethod,
MissingHarmonisationYear,
MissingHistoricalError,
)
pytest.importorskip("pint")
import pint.errors
@pytest.mark.parametrize(
"method,exp_res",
(
(
"constant_ratio",
{
2010: 10 * 1.1,
2030: 5 * 1.1,
2050: 3 * 1.1,
2100: 1 * 1.1,
},
),
(
"reduce_ratio_2050",
{
2010: 11,
2030: 5 * 1.05,
2050: 3,
2100: 1,
},
),
(
"reduce_ratio_2030",
{
2010: 11,
2030: 5,
2050: 3,
2100: 1,
},
),
(
"reduce_ratio_2150",
{
2010: 11,
2030: 5 * (1 + 0.1 * (140 - 20) / 140),
2050: 3 * (1 + 0.1 * (140 - 40) / 140),
2100: 1 * (1 + 0.1 * (140 - 90) / 140),
},
),
(
"constant_offset",
{
2010: 10 + 1,
2030: 5 + 1,
2050: 3 + 1,
2100: 1 + 1,
},
),
(
"reduce_offset_2050",
{
2010: 11,
2030: 5 + 0.5,
2050: 3,
2100: 1,
},
),
(
"reduce_offset_2030",
{
2010: 11,
2030: 5,
2050: 3,
2100: 1,
},
),
(
"reduce_offset_2150",
{
2010: 11,
2030: 5 + 1 * (140 - 20) / 140,
2050: 3 + 1 * (140 - 40) / 140,
2100: 1 + 1 * (140 - 90) / 140,
},
),
(
"model_zero",
{
2010: 10 + 1,
2030: 5 + 1,
2050: 3 + 1,
2100: 1 + 1,
},
),
(
"hist_zero",
{
2010: 10,
2030: 5,
2050: 3,
2100: 1,
},
),
),
)
def test_different_unit_handling(method, exp_res):
idx = ["variable", "unit", "region", "model", "scenario"]
hist = pd.DataFrame(
{
"variable": ["Emissions|CO2"],
"unit": ["MtC / yr"],
"region": ["World"],
"model": ["CEDS"],
"scenario": ["historical"],
2010: [11000],
}
).set_index(idx)
scenario = pd.DataFrame(
{
"variable": ["Emissions|CO2"],
"unit": ["GtC / yr"],
"region": ["World"],
"model": ["IAM"],
"scenario": ["abc"],
2010: [10],
2030: [5],
2050: [3],
2100: [1],
}
).set_index(idx)
overrides = [{"variable": "Emissions|CO2", "method": method}]
overrides = pd.DataFrame(overrides)
res = harmonise_all(
scenarios=scenario,
history=hist,
harmonisation_year=2010,
overrides=overrides,
)
for year, val in exp_res.items():
npt.assert_allclose(res[year], val)
@pytest.fixture()
def hist_df():
idx = ["variable", "unit", "region", "model", "scenario"]
hist = pd.DataFrame(
{
"variable": ["Emissions|CO2", "Emissions|CH4"],
"unit": ["MtCO2 / yr", "MtCH4 / yr"],
"region": ["World"] * 2,
"model": ["CEDS"] * 2,
"scenario": ["historical"] * 2,
2010: [11000 * 44 / 12, 200],
2015: [12000 * 44 / 12, 250],
2020: [13000 * 44 / 12, 300],
}
).set_index(idx)
return hist
@pytest.fixture()
def scenarios_df():
idx = ["variable", "unit", "region", "model", "scenario"]
scenario = pd.DataFrame(
{
"variable": ["Emissions|CO2", "Emissions|CH4"],
"unit": ["GtC / yr", "GtCH4 / yr"],
"region": ["World"] * 2,
"model": ["IAM"] * 2,
"scenario": ["abc"] * 2,
2010: [10, 0.1],
2015: [11, 0.15],
2020: [5, 0.25],
2030: [5, 0.1],
2050: [3, 0.05],
2100: [1, 0.03],
}
).set_index(idx)
return scenario
@pytest.mark.parametrize("extra_col", (False, "mip_era"))
@pytest.mark.parametrize(
"harmonisation_year,scales",
(
(2010, [1.1, 2]),
(2015, [12 / 11, 25 / 15]),
),
)
def test_different_unit_handling_multiple_timeseries_constant_ratio(
hist_df,
scenarios_df,
extra_col,
harmonisation_year,
scales,
):
if extra_col:
scenarios_df[extra_col] = "test"
scenarios_df = scenarios_df.set_index(extra_col, append=True)
exp = scenarios_df.multiply(scales, axis=0)
overrides = [{"method": "constant_ratio"}]
overrides = pd.DataFrame(overrides)
res = harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=harmonisation_year,
overrides=overrides,
)
pdt.assert_frame_equal(res, exp)
@pytest.mark.parametrize(
"harmonisation_year,offset",
(
(2010, [1, 0.1]),
(2015, [1, 0.1]),
(2020, [8, 0.05]),
),
)
def test_different_unit_handling_multiple_timeseries_constant_offset(
hist_df,
scenarios_df,
harmonisation_year,
offset,
):
exp = scenarios_df.add(offset, axis=0)
overrides = [{"method": "constant_offset"}]
overrides = pd.DataFrame(overrides)
res = harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=harmonisation_year,
overrides=overrides,
)
pdt.assert_frame_equal(res, exp)
def test_different_unit_handling_multiple_timeseries_overrides(
hist_df,
scenarios_df,
):
harmonisation_year = 2015
exp = scenarios_df.sort_index()
for r in exp.index:
for c in exp:
if "CO2" in r[0]:
harm_year_ratio = 12 / 11
if c >= 2050:
sf = 1
elif c <= 2015:
# this custom pre-harmonisation year logic doesn't apply to
# offsets which seems surprising
sf = harm_year_ratio
else:
sf = 1 + (
(harm_year_ratio - 1) * (2050 - c) / (2050 - harmonisation_year)
)
exp.loc[r, c] *= sf
else:
harm_year_offset = 0.1
if c >= 2030:
of = 0
else:
of = harm_year_offset * (2030 - c) / (2030 - harmonisation_year)
exp.loc[r, c] += of
overrides = [
{"variable": "Emissions|CO2", "method": "reduce_ratio_2050"},
{"variable": "Emissions|CH4", "method": "reduce_offset_2030"},
]
overrides = pd.DataFrame(overrides)
res = harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=harmonisation_year,
overrides=overrides,
)
pdt.assert_frame_equal(res, exp, check_like=True)
def test_raise_if_variable_not_in_hist(hist_df, scenarios_df):
hist_df = hist_df[~hist_df.index.get_level_values("variable").str.endswith("CO2")]
error_msg = re.escape("No historical data for `World` `Emissions|CO2`")
with pytest.raises(MissingHistoricalError, match=error_msg):
harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2010,
overrides=pd.DataFrame([{"method": "constant_ratio"}]),
)
def test_raise_if_region_not_in_hist(hist_df, scenarios_df):
hist_df = hist_df[~hist_df.index.get_level_values("region").str.startswith("World")]
error_msg = re.escape("No historical data for `World` `Emissions|CH4`")
with pytest.raises(MissingHistoricalError, match=error_msg):
harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2010,
overrides=pd.DataFrame([{"method": "constant_ratio"}]),
)
def test_raise_if_incompatible_unit(hist_df, scenarios_df):
scenarios_df = scenarios_df.reset_index("unit")
scenarios_df["unit"] = "Mt CO2 / yr"
scenarios_df = scenarios_df.set_index("unit", append=True)
error_msg = re.escape(
"Cannot convert from 'megatCH4 / a' ([mass] * [methane] / [time]) to "
"'CO2 * megametric_ton / a' ([carbon] * [mass] / [time])"
)
with pytest.raises(pint.errors.DimensionalityError, match=error_msg):
harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2010,
overrides=pd.DataFrame([{"method": "constant_ratio"}]),
)
def test_raise_if_undefined_unit(hist_df, scenarios_df):
scenarios_df = scenarios_df.reset_index("unit")
scenarios_df["unit"] = "Mt CO2eq / yr"
scenarios_df = scenarios_df.set_index("unit", append=True)
with pytest.raises(pint.errors.UndefinedUnitError):
harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2010,
overrides=pd.DataFrame([{"method": "constant_ratio"}]),
)
def test_raise_if_harmonisation_year_missing(hist_df, scenarios_df):
hist_df = hist_df.drop(2015, axis="columns")
error_msg = re.escape(
"No historical data for year 2015 for `World` `Emissions|CH4`"
)
with pytest.raises(MissingHarmonisationYear, match=error_msg):
harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2015,
overrides=pd.DataFrame([{"method": "constant_ratio"}]),
)
def test_raise_if_harmonisation_year_nan(hist_df, scenarios_df):
hist_df.loc[
hist_df.index.get_level_values("variable").str.endswith("CO2"), 2015
] = np.nan
error_msg = re.escape(
"Historical data is null for year 2015 for `World` `Emissions|CO2`"
)
with pytest.raises(MissingHarmonisationYear, match=error_msg):
harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2015,
overrides= | pd.DataFrame([{"method": "constant_ratio"}]) | pandas.DataFrame |
from abc import ABC, abstractmethod
from math import floor
import datetime as dt
from typing import Dict, List
import pandas as pd
from .events import FillEvent, OrderEvent
from .enums import EventTypes, SignalTypes
from .data import DataHandler
from .enums import OrderTypes
from .events import SignalEvent
class Portfolio(ABC):
"""
The Portfolio class handles the positions and market value of all
instruments at a resolution of a "bar", i.e. secondly, minutely, 5-min,
30-min, 60 min or EOD.
"""
@abstractmethod
def update_signal(self, event: EventTypes):
"""
Acts on a SignalEvent to generate new orders based on the portfolio
logic.
"""
raise NotImplementedError("Should implement update_signal()")
@abstractmethod
def update_fill(self, event: EventTypes):
"""
Updates the portfolio current positions and holdings from a FillEvent.
"""
raise NotImplementedError("Should implement update_fill()")
class NaivePortfolio(Portfolio):
"""
The NaivePortfolio object is designed to send orders to a brokerage object
with a constant quantity size blindly, i.e. without any risk management or
position sizing. It is used to test simpler strategies such as
BuyAndHoldStrategy.
"""
def __init__(self, bars: DataHandler, events: EventTypes,
start_date: dt.datetime, initial_capital=100_000):
self.bars = bars
self.events = events
self.symbol_list = bars.symbol_list
self.start_date = start_date
self.initial_capital = initial_capital
self.all_positions = self.construct_all_positions()
self.current_positions = {symbol: 0 for symbol in self.symbol_list}
self.all_holdings = self.construct_all_holdings()
self.current_holdings = self.construct_current_holdings()
def construct_all_positions(self) -> List[Dict[str, str]]:
"""
Constructs the positions list using the start_date to determine when
the time index will begin.
"""
d = {symbol: 0 for symbol in self.symbol_list}
d['datetime'] = self.start_date
return [d]
def construct_all_holdings(self) -> List[Dict[str, str]]:
"""
Constructs the holdings list using the start_date to determine when the
time index will begin.
"""
d = {symbol: 0 for symbol in self.symbol_list}
d['datetime'] = self.start_date
d['cash'] = self.initial_capital
d['commission'] = 0.0
d['total'] = self.initial_capital
return [d]
def construct_current_holdings(self) -> Dict[str, str]:
"""
This constructs the dictionary which will hold the instantaneous
value of the portfolio across all symbols.
"""
d = {symbol: 0 for symbol in self.symbol_list}
d['cash'] = self.initial_capital
d['commission'] = 0.0
d['total'] = self.initial_capital
return d
def update_timeindex(self, event: EventTypes) -> None:
"""
Adds a new record to the positions matrix for the current
market data bar. This reflects the PREVIOUS bar, i.e. all
current market data at this stage is known (OLHCVI).
Makes use of a MarketEvent from the events queue.
"""
bars = {}
for symbol in self.symbol_list:
bars[symbol] = self.bars.get_latest_bars(symbol, n=1)
# Update positions
positions = {symbol: 0 for symbol in self.symbol_list}
positions['datetime'] = bars[self.symbol_list[0]][0].dt
for symbol in self.symbol_list:
positions[symbol] = self.current_positions[symbol]
# Append the current positions
self.all_positions.append(positions)
# Update holdings
holdings = {symbol: 0 for symbol in self.symbol_list}
holdings['datetime'] = bars[self.symbol_list[0]][0].dt
holdings['cash'] = self.current_holdings['cash']
holdings['commission'] = self.current_holdings['commission']
holdings['total'] = self.current_holdings['cash']
for symbol in self.symbol_list:
# Approximation to the real value
market_value = self.current_positions[symbol] * bars[symbol][0].close
holdings[symbol] = market_value
holdings['total'] += market_value
# Append the current holdings
self.all_holdings.append(holdings)
def update_positions_from_fill(self, fill: FillEvent) -> None:
"""
Takes a FillEvent object and updates the position matrix
to reflect the new position.
Parameters:
fill - The FillEvent object to update the positions with.
"""
# Check whether the fill is a buy or sell
fill_direction = fill.direction.value
# Update positions list with new quantities
self.current_positions[fill.symbol] += fill_direction * fill.quantity
def update_holdings_from_fill(self, fill: FillEvent) -> None:
"""
Takes a FillEvent object and updates the holdings matrix
to reflect the holdings value.
Parameters:
fill - The FillEvent object to update the holdings with.
"""
# Check whether the fill is a buy or sell
fill_dir = fill.direction.value
# Update holdings list with new quantities
fill_cost = self.bars.get_latest_bars(fill.symbol)[0].close
cost = fill_dir * fill_cost * fill.quantity
self.current_holdings[fill.symbol] += cost
self.current_holdings['commission'] += fill.commission
self.current_holdings['cash'] -= (cost + fill.commission)
self.current_holdings['total'] -= (cost + fill.commission)
def generate_naive_order(self, signal: SignalEvent) -> OrderEvent:
"""
Simply transacts an OrderEvent object as a constant quantity
sizing of the signal object, without risk management or
position sizing considerations.
Parameters:
signal - The SignalEvent signal information.
"""
order = None
symbol = signal.symbol
direction = signal.signal_type
strength = signal.strength
mkt_quantity = floor(100 * strength)
cur_quantity = self.current_positions[symbol]
order_type = OrderTypes.MARKET
if direction == SignalTypes.BUY and cur_quantity == 0:
order = OrderEvent(
symbol, order_type, mkt_quantity, SignalTypes.BUY)
elif direction == SignalTypes.SELL and cur_quantity == 0:
order = OrderEvent(
symbol, order_type, mkt_quantity, SignalTypes.SELL)
elif direction == SignalTypes.EXIT and cur_quantity > 0:
order = OrderEvent(
symbol, order_type, abs(cur_quantity), SignalTypes.SELL)
elif direction == SignalTypes and cur_quantity < 0:
order = OrderEvent(
symbol, order_type, abs(cur_quantity), SignalTypes.BUY)
return order
def create_equity_curve_dataframe(self) -> None:
"""
Creates a pandas DataFrame from the all_holdings
list of dictionaries.
"""
curve = | pd.DataFrame(self.all_holdings) | pandas.DataFrame |
from __future__ import division
from builtins import str
from builtins import object
__copyright__ = "Copyright 2015 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import numpy as np
import pandas as pd
from .Error import NetworkInputError, NotImplementedError, UnexpectedError
from .Logger import FastTripsLogger
from .Util import Util
class Route(object):
"""
Route class.
One instance represents all of the Routes.
Stores route information in :py:attr:`Route.routes_df` and agency information in
:py:attr:`Route.agencies_df`. Each are instances of :py:class:`pandas.DataFrame`.
Fare information is in :py:attr:`Route.fare_attrs_df`, :py:attr:`Route.fare_rules_df` and
:py:attr:`Route.fare_transfer_rules_df`.
"""
#: File with fasttrips routes information (this extends the
#: `gtfs routes <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/routes.md>`_ file).
#: See `routes_ft specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/routes_ft.md>`_.
INPUT_ROUTES_FILE = "routes_ft.txt"
#: gtfs Routes column name: Unique identifier
ROUTES_COLUMN_ROUTE_ID = "route_id"
#: gtfs Routes column name: Short name
ROUTES_COLUMN_ROUTE_SHORT_NAME = "route_short_name"
#: gtfs Routes column name: Long name
ROUTES_COLUMN_ROUTE_LONG_NAME = "route_long_name"
#: gtfs Routes column name: Route type
ROUTES_COLUMN_ROUTE_TYPE = "route_type"
#: gtfs Routes column name: Agency ID
ROUTES_COLUMN_AGENCY_ID = "agency_id"
#: fasttrips Routes column name: Mode
ROUTES_COLUMN_MODE = "mode"
#: fasttrips Routes column name: Proof of Payment
ROUTES_COLUMN_PROOF_OF_PAYMENT = "proof_of_payment"
# ========== Added by fasttrips =======================================================
#: fasttrips Routes column name: Mode number
ROUTES_COLUMN_ROUTE_ID_NUM = "route_id_num"
#: fasttrips Routes column name: Mode number
ROUTES_COLUMN_MODE_NUM = "mode_num"
#: fasttrips Routes column name: Mode type
ROUTES_COLUMN_MODE_TYPE = "mode_type"
#: Value for :py:attr:`Route.ROUTES_COLUMN_MODE_TYPE` column: access
MODE_TYPE_ACCESS = "access"
#: Value for :py:attr:`Route.ROUTES_COLUMN_MODE_TYPE` column: egress
MODE_TYPE_EGRESS = "egress"
#: Value for :py:attr:`Route.ROUTES_COLUMN_MODE_TYPE` column: transit
MODE_TYPE_TRANSIT = "transit"
#: Value for :py:attr:`Route.ROUTES_COLUMN_MODE_TYPE` column: transfer
MODE_TYPE_TRANSFER = "transfer"
#: Access mode numbers start from here
MODE_NUM_START_ACCESS = 101
#: Egress mode numbers start from here
MODE_NUM_START_EGRESS = 201
#: Route mode numbers start from here
MODE_NUM_START_ROUTE = 301
#: File with fasttrips fare attributes information (this *subsitutes rather than extends* the
#: `gtfs fare_attributes <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_attributes_ft.md>`_ file).
#: See `fare_attributes_ft specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_attributes_ft.md>`_.
INPUT_FARE_ATTRIBUTES_FILE = "fare_attributes_ft.txt"
# fasttrips Fare attributes column name: Fare Period
FARE_ATTR_COLUMN_FARE_PERIOD = "fare_period"
# fasttrips Fare attributes column name: Price
FARE_ATTR_COLUMN_PRICE = "price"
# fasttrips Fare attributes column name: Currency Type
FARE_ATTR_COLUMN_CURRENCY_TYPE = "currency_type"
# fasttrips Fare attributes column name: Payment Method
FARE_ATTR_COLUMN_PAYMENT_METHOD = "payment_method"
# fasttrips Fare attributes column name: Transfers (number permitted on this fare)
FARE_ATTR_COLUMN_TRANSFERS = "transfers"
# fasttrips Fare attributes column name: Transfer duration (Integer length of time in seconds before transfer expires. Omit or leave empty if they do not.)
FARE_ATTR_COLUMN_TRANSFER_DURATION = "transfer_duration"
#: File with fasttrips fare periods information
#: See `fare_rules_ft specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_rules_ft.md>`_.
INPUT_FARE_PERIODS_FILE = "fare_periods_ft.txt"
#: fasttrips Fare rules column name: Fare ID
FARE_RULES_COLUMN_FARE_ID = "fare_id"
#: GTFS fare rules column name: Route ID
FARE_RULES_COLUMN_ROUTE_ID = ROUTES_COLUMN_ROUTE_ID
#: GTFS fare rules column name: Origin Zone ID
FARE_RULES_COLUMN_ORIGIN_ID = "origin_id"
#: GTFS fare rules column name: Destination Zone ID
FARE_RULES_COLUMN_DESTINATION_ID = "destination_id"
#: GTFS fare rules column name: Contains ID
FARE_RULES_COLUMN_CONTAINS_ID = "contains_id"
#: fasttrips Fare rules column name: Fare class
FARE_RULES_COLUMN_FARE_PERIOD = FARE_ATTR_COLUMN_FARE_PERIOD
#: fasttrips Fare rules column name: Start time for the fare. A DateTime
FARE_RULES_COLUMN_START_TIME = "start_time"
#: fasttrips Fare rules column name: End time for the fare rule. A DateTime.
FARE_RULES_COLUMN_END_TIME = "end_time"
# ========== Added by fasttrips =======================================================
#: fasttrips Fare rules column name: Fare ID num
FARE_RULES_COLUMN_FARE_ID_NUM = "fare_id_num"
#: fasttrips Fare rules column name: Route ID num
FARE_RULES_COLUMN_ROUTE_ID_NUM = ROUTES_COLUMN_ROUTE_ID_NUM
#: fasttrips fare rules column name: Origin Zone ID number
FARE_RULES_COLUMN_ORIGIN_ID_NUM = "origin_id_num"
#: fasttrips fare rules column name: Destination ID number
FARE_RULES_COLUMN_DESTINATION_ID_NUM = "destination_id_num"
#: File with fasttrips fare transfer rules information.
#: See `fare_transfer_rules specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_transfer_rules_ft.md>`_.
INPUT_FARE_TRANSFER_RULES_FILE = "fare_transfer_rules_ft.txt"
#: fasttrips Fare transfer rules column name: From Fare Class
FARE_TRANSFER_RULES_COLUMN_FROM_FARE_PERIOD = "from_fare_period"
#: fasttrips Fare transfer rules column name: To Fare Class
FARE_TRANSFER_RULES_COLUMN_TO_FARE_PERIOD = "to_fare_period"
#: fasttrips Fare transfer rules column name: Transfer type?
FARE_TRANSFER_RULES_COLUMN_TYPE = "transfer_fare_type"
#: fasttrips Fare transfer rules column name: Transfer amount (discount or fare)
FARE_TRANSFER_RULES_COLUMN_AMOUNT = "transfer_fare"
#: Value for :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_TYPE`: transfer discount
TRANSFER_TYPE_TRANSFER_DISCOUNT = "transfer_discount"
#: Value for :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_TYPE`: free transfer
TRANSFER_TYPE_TRANSFER_FREE = "transfer_free"
#: Value for :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_TYPE`: transfer fare cost
TRANSFER_TYPE_TRANSFER_COST = "transfer_cost"
#: Valid options for :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_TYPE`
TRANSFER_TYPE_OPTIONS = [TRANSFER_TYPE_TRANSFER_DISCOUNT,
TRANSFER_TYPE_TRANSFER_FREE,
TRANSFER_TYPE_TRANSFER_COST]
#: File with route ID, route ID number correspondence (and fare id num)
OUTPUT_ROUTE_ID_NUM_FILE = "ft_intermediate_route_id.txt"
#: File with fare id num, fare id, fare class, price, xfers
OUTPUT_FARE_ID_FILE = "ft_intermediate_fare.txt"
#: File with fare transfer rules
OUTPUT_FARE_TRANSFER_FILE = "ft_intermediate_fare_transfers.txt"
#: File with mode, mode number correspondence
OUTPUT_MODE_NUM_FILE = "ft_intermediate_supply_mode_id.txt"
def __init__(self, input_archive, output_dir, gtfs, today, stops):
"""
Constructor. Reads the gtfs data from the transitfeed schedule, and the additional
fast-trips routes data from the input file in *input_archive*.
"""
self.output_dir = output_dir
self.routes_df = gtfs.routes
FastTripsLogger.info("Read %7d %15s from %25d %25s" %
(len(self.routes_df), 'date valid route', len(gtfs.routes), 'total routes'))
# Read the fast-trips supplemental routes data file
routes_ft_df = gtfs.get(Route.INPUT_ROUTES_FILE)
# verify required columns are present
routes_ft_cols = list(routes_ft_df.columns.values)
assert(Route.ROUTES_COLUMN_ROUTE_ID in routes_ft_cols)
assert(Route.ROUTES_COLUMN_MODE in routes_ft_cols)
# verify no routes_ids are duplicated
if routes_ft_df.duplicated(subset=[Route.ROUTES_COLUMN_ROUTE_ID]).sum()>0:
error_msg = "Found %d duplicate %s in %s" % (routes_ft_df.duplicated(subset=[Route.ROUTES_COLUMN_ROUTE_ID]).sum(),
Route.ROUTES_COLUMN_ROUTE_ID, Route.INPUT_ROUTES_FILE)
FastTripsLogger.fatal(error_msg)
FastTripsLogger.fatal("\nDuplicates:\n%s" % \
str(routes_ft_df.loc[routes_ft_df.duplicated(subset=[Route.ROUTES_COLUMN_ROUTE_ID])]))
raise NetworkInputError(Route.INPUT_ROUTES_FILE, error_msg)
# Join to the routes dataframe
self.routes_df = pd.merge(left=self.routes_df, right=routes_ft_df,
how='left',
on=Route.ROUTES_COLUMN_ROUTE_ID)
# Get the mode list
self.modes_df = self.routes_df[[Route.ROUTES_COLUMN_MODE]].drop_duplicates().reset_index(drop=True)
self.modes_df[Route.ROUTES_COLUMN_MODE_NUM] = self.modes_df.index + Route.MODE_NUM_START_ROUTE
self.modes_df[Route.ROUTES_COLUMN_MODE_TYPE] = Route.MODE_TYPE_TRANSIT
# Join to mode numbering
self.routes_df = Util.add_new_id(self.routes_df, Route.ROUTES_COLUMN_MODE, Route.ROUTES_COLUMN_MODE_NUM,
self.modes_df, Route.ROUTES_COLUMN_MODE, Route.ROUTES_COLUMN_MODE_NUM)
# Route IDs are strings. Create a unique numeric route ID.
self.route_id_df = Util.add_numeric_column(self.routes_df[[Route.ROUTES_COLUMN_ROUTE_ID]],
id_colname=Route.ROUTES_COLUMN_ROUTE_ID,
numeric_newcolname=Route.ROUTES_COLUMN_ROUTE_ID_NUM)
FastTripsLogger.debug("Route ID to number correspondence\n" + str(self.route_id_df.head()))
FastTripsLogger.debug(str(self.route_id_df.dtypes))
self.routes_df = self.add_numeric_route_id(self.routes_df,
id_colname=Route.ROUTES_COLUMN_ROUTE_ID,
numeric_newcolname=Route.ROUTES_COLUMN_ROUTE_ID_NUM)
FastTripsLogger.debug("=========== ROUTES ===========\n" + str(self.routes_df.head()))
FastTripsLogger.debug("\n"+str(self.routes_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s, %25s" %
(len(self.routes_df), "routes", "routes.txt", Route.INPUT_ROUTES_FILE))
self.agencies_df = gtfs.agency
FastTripsLogger.debug("=========== AGENCIES ===========\n" + str(self.agencies_df.head()))
FastTripsLogger.debug("\n"+str(self.agencies_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.agencies_df), "agencies", "agency.txt"))
self.fare_attrs_df = gtfs.fare_attributes
FastTripsLogger.debug("=========== FARE ATTRIBUTES ===========\n" + str(self.fare_attrs_df.head()))
FastTripsLogger.debug("\n"+str(self.fare_attrs_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.fare_attrs_df), "fare attributes", "fare_attributes.txt"))
# subsitute fasttrips fare attributes
self.fare_attrs_df = gtfs.get(Route.INPUT_FARE_ATTRIBUTES_FILE)
if not self.fare_attrs_df.empty:
# verify required columns are present
fare_attrs_cols = list(self.fare_attrs_df.columns.values)
assert(Route.FARE_ATTR_COLUMN_FARE_PERIOD in fare_attrs_cols)
assert(Route.FARE_ATTR_COLUMN_PRICE in fare_attrs_cols)
assert(Route.FARE_ATTR_COLUMN_CURRENCY_TYPE in fare_attrs_cols)
assert(Route.FARE_ATTR_COLUMN_PAYMENT_METHOD in fare_attrs_cols)
assert(Route.FARE_ATTR_COLUMN_TRANSFERS in fare_attrs_cols)
if Route.FARE_ATTR_COLUMN_TRANSFER_DURATION not in fare_attrs_cols:
self.fare_attrs_df[Route.FARE_ATTR_COLUMN_TRANSFER_DURATION] = np.nan
FastTripsLogger.debug("===> REPLACED BY FARE ATTRIBUTES FT\n" + str(self.fare_attrs_df.head()))
FastTripsLogger.debug("\n"+str(self.fare_attrs_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.fare_attrs_df), "fare attributes", Route.INPUT_FARE_ATTRIBUTES_FILE))
#: fares are by fare_period rather than by fare_id
self.fare_by_class = True
else:
self.fare_by_class = False
# Fare rules (map routes to fare_id)
self.fare_rules_df = gtfs.fare_rules
if len(self.fare_rules_df) > 0:
self.fare_ids_df = Util.add_numeric_column(self.fare_rules_df[[Route.FARE_RULES_COLUMN_FARE_ID]],
id_colname=Route.FARE_RULES_COLUMN_FARE_ID,
numeric_newcolname=Route.FARE_RULES_COLUMN_FARE_ID_NUM)
self.fare_rules_df = pd.merge(left =self.fare_rules_df,
right =self.fare_ids_df,
how ="left")
else:
self.fare_ids_df = pd.DataFrame()
# optionally reverse those with origin/destinations if configured
from .Assignment import Assignment
if Assignment.FARE_ZONE_SYMMETRY:
FastTripsLogger.debug("applying FARE_ZONE_SYMMETRY to %d fare rules" % len(self.fare_rules_df))
# select only those with an origin and destination
reverse_fare_rules = self.fare_rules_df.loc[ pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_ORIGIN_ID])&
pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_DESTINATION_ID]) ].copy()
# FastTripsLogger.debug("reverse_fare_rules 1 head()=\n%s" % str(reverse_fare_rules.head()))
# reverse them
reverse_fare_rules.rename(columns={Route.FARE_RULES_COLUMN_ORIGIN_ID : Route.FARE_RULES_COLUMN_DESTINATION_ID,
Route.FARE_RULES_COLUMN_DESTINATION_ID : Route.FARE_RULES_COLUMN_ORIGIN_ID},
inplace=True)
# FastTripsLogger.debug("reverse_fare_rules 2 head()=\n%s" % str(reverse_fare_rules.head()))
# join them to eliminate dupes
reverse_fare_rules = pd.merge(left =reverse_fare_rules,
right =self.fare_rules_df,
how ="left",
on =[Route.FARE_RULES_COLUMN_FARE_ID,
Route.FARE_RULES_COLUMN_FARE_ID_NUM,
Route.FARE_RULES_COLUMN_ROUTE_ID,
Route.FARE_RULES_COLUMN_ORIGIN_ID,
Route.FARE_RULES_COLUMN_DESTINATION_ID,
Route.FARE_RULES_COLUMN_CONTAINS_ID],
indicator=True)
# dupes exist in both -- drop those
reverse_fare_rules = reverse_fare_rules.loc[ reverse_fare_rules["_merge"]=="left_only"]
reverse_fare_rules.drop(["_merge"], axis=1, inplace=True)
# add them to fare rules
self.fare_rules_df = pd.concat([self.fare_rules_df, reverse_fare_rules])
FastTripsLogger.debug("fare rules with symmetry %d head()=\n%s" % (len(self.fare_rules_df), str(self.fare_rules_df.head())))
# sort by fare ID num so zone-to-zone and their reverse are together
if len(self.fare_rules_df) > 0:
self.fare_rules_df.sort_values(by=[Route.FARE_RULES_COLUMN_FARE_ID_NUM], inplace=True)
fare_rules_ft_df = gtfs.get(Route.INPUT_FARE_PERIODS_FILE)
if not fare_rules_ft_df.empty:
# verify required columns are present
fare_rules_ft_cols = list(fare_rules_ft_df.columns.values)
assert(Route.FARE_RULES_COLUMN_FARE_ID in fare_rules_ft_cols)
assert(Route.FARE_RULES_COLUMN_FARE_PERIOD in fare_rules_ft_cols)
assert(Route.FARE_RULES_COLUMN_START_TIME in fare_rules_ft_cols)
assert(Route.FARE_RULES_COLUMN_END_TIME in fare_rules_ft_cols)
# Split fare classes so they don't overlap
fare_rules_ft_df = self.remove_fare_period_overlap(fare_rules_ft_df)
# join to fare rules dataframe
self.fare_rules_df = pd.merge(left=self.fare_rules_df, right=fare_rules_ft_df,
how='left',
on=Route.FARE_RULES_COLUMN_FARE_ID)
# add route id numbering if applicable
if Route.FARE_RULES_COLUMN_ROUTE_ID in list(self.fare_rules_df.columns.values):
self.fare_rules_df = self.add_numeric_route_id(self.fare_rules_df,
Route.FARE_RULES_COLUMN_ROUTE_ID,
Route.FARE_RULES_COLUMN_ROUTE_ID_NUM)
# add origin zone numbering if applicable
if (Route.FARE_RULES_COLUMN_ORIGIN_ID in list(self.fare_rules_df.columns.values)) and \
(pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_ORIGIN_ID]).sum() > 0):
self.fare_rules_df = stops.add_numeric_stop_zone_id(self.fare_rules_df,
Route.FARE_RULES_COLUMN_ORIGIN_ID,
Route.FARE_RULES_COLUMN_ORIGIN_ID_NUM)
# add destination zone numbering if applicable
if (Route.FARE_RULES_COLUMN_DESTINATION_ID in list(self.fare_rules_df.columns.values)) and \
( | pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_DESTINATION_ID]) | pandas.notnull |
import numpy as np
from scipy.stats import ttest_ind, pearsonr
from sklearn.model_selection import StratifiedKFold
# General packages
import numpy as np
import seaborn as sns
import pandas as pd
# Bunch of scikit-learn stuff
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, cross_val_score
from sklearn.feature_selection import f_classif
from sklearn.externals import joblib as jl
from sklearn.metrics import f1_score
from sklearn.preprocessing import OneHotEncoder
# Specific statistics-functions
from scipy.stats import pearsonr
# Misc.
from tqdm import tqdm
from copy import deepcopy
# Plotting
import matplotlib.pyplot as plt
sns.set_style("ticks")
# Custom code! (install skbold by `pip install skbold`; counterbalance.py is in cwd)
from confounds import ConfoundRegressor
from counterbalance import CounterbalancedStratifiedSplit
from utils import get_r2
import time
class DataGenerator:
def __init__(self, N, K, corr_cy, signal_r2, confound_r2=None,
c_type='continuous', y_type='binary', tolerance=0.01,
verbose=False):
"""
Parameters
----------
N : int
Number of samples (N) in the data (X, y, and c)
K : int
Number of features (K) in the data (X)
c_type : str
Type of confound; either "continuous" or "binary". If binary,
the data a balanced vector with ones and zeros
y_type : str
Type of target; either "continuous" or "binary".
corr_cy : float
Number between -1 and 1, specifying the correlation
between the confound (c) and the target (y)
signal_r2 : float
Number between 0 and 1, specifying the explained variance
of y using X, independent of the confound contained in X;
(technically, the semipartial correlation rho(xy.c)
confound_r2 : float or None
Number between 0 and 1 (or None), specifying the shared variance
explained of y of x and c (i.e. the explained variance
of the confound-related information in x). If None,
no confound R2 will be left unspecified (which can be used
to specify a baseline).
tolerance : float
How much an observed statistic (corr_cy, signal_r2, confound_r2) may
deviate from the desired value.
verbose : bool
Whether to print (extra) relevant information
"""
self.N = N
self.K = K
self.corr_cy = corr_cy
self.signal_r2 = signal_r2
self.confound_r2 = confound_r2
self.c_type = c_type
self.y_type = y_type
self.tolerance = tolerance
self.verbose = verbose
def generate(self):
""" Generates X, y, and (optionally) c. """
self._check_settings()
self._init_y_and_c()
# Define X as a matrix of N-samples by K-features
X = np.zeros((self.N, self.K))
# Pre-allocate arrays for average signal_r2 values and confound_r2 values
signal_r2_values = np.zeros(self.K)
confound_r2_values = np.zeros(self.K)
icept = np.ones((self.N, 1))
iterator = tqdm_notebook(np.arange(self.K)) if self.verbose else np.arange(self.K)
for i in iterator:
should_continue = False
# Define generative parameters (gen_beta_y = beta-parameter for y in model of X)
gen_beta_y, gen_beta_c = 1, 1
noise_factor = 1
this_c = 0 if self.confound_r2 is None else self.c
tmp_confound_r2 = 0 if self.confound_r2 is None else self.confound_r2
c_iter = 0
start_time = time.time()
while True:
this_time = time.time()
if c_iter > 100000:
gen_beta_y, gen_beta_c, noise_factor = 1, 1, 1
c_iter = 0
if (start_time * 1000 - this_time * 1000) > 10:
print("Something's wrong")
print("C: %.3f, y: %.3f, noise: %.3f" % (gen_beta_y, gen_beta_c, noise_factor))
# Generate X as a linear combination of y, c, and random noise
this_X = (gen_beta_y * self.y + gen_beta_c * this_c + np.random.randn(self.N) * noise_factor)
r2_X = pearsonr(this_X, self.y)[0] ** 2
difference_obs_vs_desired = r2_X - (self.signal_r2 + tmp_confound_r2)
if np.abs(difference_obs_vs_desired) > self.tolerance: # should be even more strict
# If correlation too small/big, adjust noise factor and CONTINUE
if difference_obs_vs_desired < 0:
noise_factor -= 0.01
else:
noise_factor += 0.01
c_iter += 1
continue
if self.confound_r2 is None and not should_continue:
signal_r2_values[i] = r2_X
X[:, i] = this_X
break
c_tmp = np.hstack((icept, this_c[:, np.newaxis]))
X_not_c = this_X - c_tmp.dot(np.linalg.lstsq(c_tmp, this_X, rcond=None)[0])
this_signal_r2 = pearsonr(X_not_c, self.y)[0] ** 2
this_confound_r2 = r2_X - this_signal_r2
difference_obs_vs_desired = this_confound_r2 - self.confound_r2
if np.abs(difference_obs_vs_desired) > self.tolerance:
if difference_obs_vs_desired < 0:
gen_beta_c += 0.01
else:
gen_beta_c -= 0.01
should_continue = True
else:
should_continue = False
difference_obs_vs_desired = this_signal_r2 - self.signal_r2
if np.abs(difference_obs_vs_desired) > self.tolerance:
if difference_obs_vs_desired < 0:
gen_beta_y += 0.01
else:
gen_beta_y -= 0.01
should_continue = True
else:
should_continue = False
if should_continue:
c_iter += 1
continue
else: # We found it!
X[:, i] = this_X
signal_r2_values[i] = this_signal_r2
confound_r2_values[i] = this_confound_r2
break
self.X = X
self.signal_r2_values = signal_r2_values
self.confound_r2_values = confound_r2_values
if self.verbose:
self._generate_report()
return self
def return_vals(self):
""" Returns X, y, and (optionally) c. """
if self.confound_r2 is not None:
return self.X, self.y, self.c
else:
return self.X, self.y
def _generate_report(self):
""" If verbose, prints some stuff to check. """
print("Signal r2: %.3f" % self.signal_r2_values.mean())
if self.confound_r2 is not None:
print("Confound r2: %.3f" % self.confound_r2_values.mean())
if self.confound_r2 is not None:
plt.figure(figsize=(15, 5))
plt.subplot(1, 3, 1)
plt.imshow(np.corrcoef(self.X.T), aspect='auto', cmap='RdBu')
plt.title("Correlations between features")
plt.colorbar()
plt.grid('off')
plt.subplot(1, 3, 2)
plt.title("Signal R2 values")
plt.hist(self.signal_r2_values, bins='auto')
plt.subplot(1, 3, 3)
plt.title("Confound R2 values")
plt.hist(self.confound_r2_values, bins='auto')
plt.tight_layout()
plt.show()
def _check_settings(self):
""" Some checks of sensible parameters. """
if self.N % 2 != 0:
raise ValueError("Please select an even number of samples "
"(Makes things easier.)")
if self.confound_r2 is not None:
if np.abs(self.corr_cy) < np.sqrt(self.confound_r2):
raise ValueError("The desired corr_cy value is less than the square "
"root of the desired confound R-squared ... This is "
"impossible to generate.")
VAR_TYPES = ['binary', 'continuous']
if self.y_type not in VAR_TYPES:
raise ValueError("y_type must be one of %r" % VAR_TYPES)
if self.c_type not in VAR_TYPES:
raise ValueError("c_type must be one of %r" % VAR_TYPES)
def _init_y_and_c(self):
""" Initializes y and c. """
if self.y_type == 'binary':
y = np.repeat([0, 1], repeats=self.N / 2)
else: # assume continuous
y = np.random.normal(0, 1, self.N)
if self.c_type == 'binary':
if self.y_type == 'binary':
# Simply shift ("roll") y to create correlation using the "formula":
# to-shift = N / 4 * (1 - corr_cy)
to_roll = int((self.N / 4) * (1 - self.corr_cy))
c = np.roll(y, to_roll)
else: # y is continuous
c = y.copy()
this_corr_cy = pearsonr(c, y)[0]
i = 0
while np.abs(this_corr_cy - self.corr_cy) > self.tolerance:
np.shuffle(c)
this_corr_cy = pearsonr(c, y)
i += 1
if i > 10000:
raise ValueError("Probably unable to find good corr_cy value")
else:
# If c is continuous, just sample y + random noise
noise_factor = 10
c = y + np.random.randn(self.N) * noise_factor
this_corr_cy = pearsonr(c, y)[0]
i = 0
while np.abs(this_corr_cy - self.corr_cy) > self.tolerance:
# Decrease noise if the difference is too big
noise_factor -= 0.01
c = y + np.random.randn(self.N) * noise_factor
this_corr_cy = pearsonr(c, y)[0]
i += 1
if i > 10000:
# Reset noise factor
noise_factor = 10
i = 0
self.y = y
self.c = c
from utils import vectorized_semipartial_corr, vectorized_corr
def run_without_confound_control(X, y, c, pipeline, cv, arg_dict, sim_nr=None):
""" Run a classification analysis using without controlling for confounds.
Parameters
----------
X : numpy array
Array of shape N (samples) x K (features) with floating point numbers
y : numpy array
Array of shape N (samples) x 1 with binary numbers {0, 1}
c : numpy array
Array of shape N (samples) x 1 with either binary {0, 1}
or continuous (from normal dist, 0 mean, 1 variance) values
pipeline : Pipeline-object
A scikit-learn Pipeline-object
n_splits : int
Number of splits to generate in the K-fold routine
arg_dict : dict
Dictionary with arguments used in data generation
(i.e. args fed to generate_data function)
Returns
-------
results : pandas DataFrame
DataFrame with data parameters (from arg-dict) and fold-wise scores.
"""
results = pd.concat([pd.DataFrame(arg_dict, index=[i]) for i in range(n_splits)])
results['method'] = ['None'] * n_splits
results['sim_nr'] = [sim_nr] * n_splits
results['score'] = cross_val_score(estimator=pipeline, X=X, y=y, cv=cv, scoring=scoring)
return results
def run_with_ipw(X, y, c, pipeline, cv, arg_dict, sim_nr=None):
""" Run a classification analysis using without controlling for confounds.
Parameters
----------
X : numpy array
Array of shape N (samples) x K (features) with floating point numbers
y : numpy array
Array of shape N (samples) x 1 with binary numbers {0, 1}
c : numpy array
Array of shape N (samples) x 1 with either binary {0, 1}
or continuous (from normal dist, 0 mean, 1 variance) values
pipeline : Pipeline-object
A scikit-learn Pipeline-object
n_splits : int
Number of splits to generate in the K-fold routine
arg_dict : dict
Dictionary with arguments used in data generation
(i.e. args fed to generate_data function)
Returns
-------
results : pandas DataFrame
DataFrame with data parameters (from arg-dict) and fold-wise scores.
"""
results = pd.concat([pd.DataFrame(arg_dict, index=[i]) for i in range(n_splits)])
results['method'] = ['IPW'] * n_splits
results['sim_nr'] = [sim_nr] * n_splits
y_ohe = OneHotEncoder(sparse=False).fit_transform(y[:, np.newaxis])
skf = StratifiedKFold(n_splits=n_splits)
lr = LogisticRegression(class_weight='balanced')
if c.ndim == 1:
c = c[:, np.newaxis]
tmp_scores = np.zeros(n_splits)
for i, (train_idx, test_idx) in enumerate(skf.split(X, y)):
lr.fit(c[train_idx], y[train_idx])
probas = lr.predict_proba(c[train_idx])
weights = 1 / (probas * y_ohe[train_idx]).sum(axis=1)
pipeline.fit(X[train_idx], y[train_idx], clf__sample_weight=weights)
preds = pipeline.predict(X[test_idx])
tmp_scores[i] = f1_score(y[test_idx], preds, average='macro')
results['score'] = tmp_scores
return results
def run_with_counterbalancing_random(X, y, c, pipeline, cv, arg_dict, verbose=False,
c_type='categorical', metric='corr', threshold=0.05,
use_pval=True, sim_nr=None):
""" Run a classification analysis using without controlling for confounds.
Parameters
----------
X : numpy array
Array of shape N (samples) x K (features) with floating point numbers
y : numpy array
Array of shape N (samples) x 1 with binary numbers {0, 1}
c : numpy array
Array of shape N (samples) x 1 with either binary {0, 1}
or continuous (from normal dist, 0 mean, 1 variance) values
pipeline : Pipeline-object
A scikit-learn Pipeline-object
n_splits : int
Number of splits to generate in the K-fold routine
arg_dict : dict
Dictionary with arguments used in data generation
(i.e. args fed to generate_data function)
Returns
-------
results : pandas DataFrame
DataFrame with data parameters (from arg-dict) and fold-wise scores.
"""
results = pd.concat([ | pd.DataFrame(arg_dict, index=[i]) | pandas.DataFrame |
# this file contains all components needed to collect, format and save the data from dwd
import os
import re
import requests
from zipfile import ZipFile
from io import TextIOWrapper, BytesIO
import csv
import pandas as pd
import numpy as np
import datetime
# constants
DWD_URL_HISTORICAL = "https://opendata.dwd.de/climate_environment/CDC/observations_germany/climate/hourly/air_temperature/historical/"
DWD_URL_RECENT = "https://opendata.dwd.de/climate_environment/CDC/observations_germany/climate/hourly/air_temperature/recent/"
DWD_FOLDER = os.path.join(os.path.dirname(__file__), "data", "dwd")
os.makedirs(DWD_FOLDER, exist_ok = True)
def get_unpacked_zips(*urls):
"""
this function is a generator which downloads and unzips all .zip files from an url
"""
for url in urls:
html = str(requests.get(url).content)
for zip_link in [f"{url}{link}" for link in re.findall(r'href="(\w*\.zip)"', html)]:
yield ZipFile(BytesIO(requests.get(zip_link).content))
def verify_dwd_urls(*urls):
"""
this function tests urls, to check if they are from dwd. (default urls are historical & recent)
"""
if len(urls) == 0:
urls = [
DWD_URL_HISTORICAL,
DWD_URL_RECENT
]
for url in urls:
if not "https://opendata.dwd.de/" in url:
raise Exception(f"The url '{url}' is not supported, only urls from 'https://opendata.dwd.de/' are supported.")
return urls
def download_dwd(*urls):
"""
this function downloads data from dwd and saves it as an parquet. (default urls are historical & recent)
"""
urls = verify_dwd_urls(*urls)
for unpacked in get_unpacked_zips(*urls):
data_files = [f for f in unpacked.namelist() if ".txt" in f]
meta_data_file = [f for f in data_files if "Metadaten_Geographie" in f][0]
main_data_file = [f for f in data_files if "produkt_tu_stunde" in f][0]
station_id = int(main_data_file.split("_")[-1].split(".")[0])
# reading main data
with unpacked.open(main_data_file, "r") as main_data:
station_df = pd.DataFrame(
csv.DictReader(TextIOWrapper(main_data, 'utf-8'), delimiter=';')
).drop(["STATIONS_ID", "QN_9", "eor"], axis="columns")
station_df.columns = ["TIME", "TEMPERATURE", "HUMIDITY"]
station_df.TIME = pd.to_datetime(station_df.TIME, format="%Y%m%d%H", utc=True)
# adding missing rows
station_df = pd.merge(
pd.DataFrame({
"TIME": pd.date_range(
station_df.TIME.min(),
station_df.TIME.max(),
freq = "1H",
tz = "utc"
)
}),
station_df,
how = "outer"
).fillna(-999)
# clean up
station_df.TEMPERATURE = pd.to_numeric(station_df.TEMPERATURE, downcast="float")
station_df.HUMIDITY = pd.to_numeric(station_df.HUMIDITY, downcast="integer")
station_df.sort_values(by="TIME", inplace=True)
# add coordinates from meta data
with unpacked.open(meta_data_file, "r") as meta_data:
meta_df = pd.DataFrame(
csv.DictReader(TextIOWrapper(meta_data, 'latin-1'), delimiter=';')
).drop(["Stations_id", "Stationsname"], axis="columns")
meta_df.columns = ["ASL", "LAT", "LON", "START", "END"]
meta_df.iloc[-1].END = datetime.datetime.now().strftime("%Y%m%d")
meta_df.START = | pd.to_datetime(meta_df.START, format="%Y%m%d", utc=True) | pandas.to_datetime |
import pandas as pd
import itertools
def get_init_df(re_list, no_re_list, re_col, no_re_col):
# 直接解包re_list,no_re_list就可以处理完有关系的列
# 对于no_re_col中的每一项 需要查出它的可能取值范围
product_list = list(itertools.product(*re_list, *no_re_list))
# print(product_list)
processed_product_list = unzip_tool(product_list)
# print(processed_product_list)
dataframe = | pd.DataFrame(processed_product_list, columns=re_col + no_re_col) | pandas.DataFrame |
import pandas as pd
import numpy as np
df1 = pd.DataFrame(np.ones((3, 4)) * 0, columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4)) * 1, columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(np.ones((3, 4)) * 2, columns=['a', 'b', 'c', 'd'])
print(df1)
print(df2)
print(df3)
# 纵向合并
print(pd.concat([df1, df2, df3], axis=0, ignore_index=True))
df4 = pd.DataFrame(np.ones((3, 4)) * 0, columns=['a', 'b', 'c', 'd'])
df5 = pd.DataFrame(np.ones((3, 4)) * 1, columns=['b', 'c', 'd', 'e'])
print(pd.concat([df4, df5]))
print( | pd.concat([df4, df5], ignore_index=True, join="inner") | pandas.concat |
import pandas as pd
from exams.models import TimeCode
from exams.models import AcademicYear
from exams.models import Period
def dates(start_date, end=254):
num_weeks = end // 100
num_days = (num_weeks - 1) * 5 + (end - 100 * num_weeks) // 10
print(num_days)
lst = []
start_date = pd.to_datetime(start_date)
for day in range(num_days):
week = day//5
for session in range(4):
exam_date = start_date + \
pd.to_timedelta('{} days {} hour'.format(
day+2*week, session*2))
time_code = (week+1)*100 + (day-5*week+1)*10 + session + 1
lst.append((time_code, exam_date))
df = | pd.DataFrame(lst, columns=['time_code', 'exam_date']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 20 09:38:41 2021
@author: daniele.proverbio
Code to monitor the COVID-19 epidemic in Luxembourg and estimate useful
indicators for the Ministry of Health and the Taskforce WP6.
Path to input file at line 186
Path to output file at line 242; to output plot at line 260
"""
# -----
#
# Preliminary settings
#
# -----
# ----- import packages
import pandas as pd
import numpy as np
import datetime as DT
from scipy import stats as sps
from matplotlib import pyplot as plt
from matplotlib import dates as mdates
from matplotlib.dates import date2num
import plot_reff_estimate
# ----- global variables for data analysis
FILTERED_REGION_CODES = ['LU']
state_name = 'LU'
today = DT.datetime.now().strftime("%Y-%m-%d")
idx_start = 22 # Initial condition, over the first wave in March
# ----- some preparation to make sure data are ok
def prepare_cases(cases, cutoff=25): # prepare data, to get daily cases and smoothing
new_cases = cases.diff()
smoothed = new_cases.rolling(7,
min_periods=1,
center=False).mean().round()
smoothed = smoothed.iloc[idx_start:]
original = new_cases.loc[smoothed.index]
return original, smoothed
# ----- getting highest density intervals for the Bayesian inference
def highest_density_interval(pmf, p=.9, debug=False):
# If we pass a DataFrame, just call this recursively on the columns
if(isinstance(pmf, pd.DataFrame)):
return pd.DataFrame([highest_density_interval(pmf[col], p=p) for col in pmf],
index=pmf.columns)
cumsum = np.cumsum(pmf.values)
total_p = cumsum - cumsum[:, None] # N x N matrix of total probability mass for each low, high
lows, highs = (total_p > p).nonzero() # Return all indices with total_p > p
best = (highs - lows).argmin() # Find the smallest range (highest density)
low = pmf.index[lows[best]]
high = pmf.index[highs[best]]
return pd.Series([low, high],index=[f'Low_{p*100:.0f}',f'High_{p*100:.0f}'])
# ----- getting posteriors for R_t evaluation
def get_posteriors(sr, date, sigma=0.15):
# (1) Calculate Lambda (average arrival rate from Poisson process)
gamma=1/np.random.normal(4, 0.2, len(r_t_range)) # COVID-19 serial interval, with uncertainty
lam = sr[:-1] * np.exp(gamma[:, None] * (r_t_range[:, None] - 1))
# (2) Calculate each day's likelihood
likelihoods = pd.DataFrame(
data = sps.poisson.pmf(sr[1:], lam),
index = r_t_range,
columns = date[1:])
# (3) Create the Gaussian Matrix
process_matrix = sps.norm(loc=r_t_range,scale=sigma).pdf(r_t_range[:, None])
# (3a) Normalize all rows to sum to 1
process_matrix /= process_matrix.sum(axis=0)
# (4) Calculate the initial prior
prior0 = np.ones_like(r_t_range)/len(r_t_range)
prior0 /= prior0.sum()
# Create a DataFrame that will hold our posteriors for each day
# Insert our prior as the first posterior.
posteriors = pd.DataFrame(index=r_t_range,columns=date,data={date[0]: prior0})
# Keep track of the sum of the log of the probability of the data for maximum likelihood calculation.
log_likelihood = 0.0
# (5) Iteratively apply Bayes' rule
for previous_day, current_day in zip(date[:-1], date[1:]):
#(5a) Calculate the new prior
current_prior = process_matrix @ posteriors[previous_day]
#(5b) Calculate the numerator of Bayes' Rule: P(k|R_t)P(R_t)
numerator = likelihoods[current_day] * current_prior
#(5c) Calcluate the denominator of Bayes' Rule P(k)
denominator = np.sum(numerator)
# Execute full Bayes' Rule
posteriors[current_day] = numerator/denominator
# Add to the running sum of log likelihoods
log_likelihood += np.log(denominator)
return posteriors, log_likelihood
# -----
#
# Input data
#
# -----
path = "input/input-data.xlsx" # specify path to file
full_data = pd.read_excel(path, engine='openpyxl').iloc[::-1].reset_index()
data_df = pd.DataFrame(full_data, columns =['report_date','new_cases','positive_patients_intensive_care','positive_patients_normal_care', 'covid_patients_dead', 'new_cases_resident','tests_done_resident'])
population_LU = 600000
dates = data_df.iloc[idx_start:].index
dates_detection = date2num(dates.tolist())
# -----
#
# Analysis
#
# -----
#estimate R_eff for detection
# ----- Prepare data for analysis
cases = data_df.new_cases_resident.cumsum()
original, smoothed = prepare_cases(cases)
#convert into array for easier handling
original_array = original.values
smoothed_array = smoothed.values
# ----- R_eff estimation
R_T_MAX = 10
r_t_range = np.linspace(0, R_T_MAX, R_T_MAX*100+1)
posteriors, log_likelihood = get_posteriors(smoothed_array, dates, sigma=.15) #optimal sigma already chosen in original Notebook
# Note that this is not the most efficient algorithm, but works fine
hdis = highest_density_interval(posteriors, p=.5) # confidence bounds, p=50%
most_likely = posteriors.idxmax().rename('Reff-estimate') # mean R_eff value
result = | pd.concat([most_likely, hdis], axis=1) | pandas.concat |
#!/usr/bin/env python3
import warnings
from typing import Generator, Optional, Tuple, Union
import findiff.diff
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as nptest
import pandas as pd
import pandas.testing as pdtest
from scipy.stats import multivariate_normal
from datafold.pcfold.timeseries.collection import TSCDataFrame, TSCException
from datafold.utils.general import is_integer
@pd.api.extensions.register_dataframe_accessor("tsc")
class TSCAccessor(object):
"""Extension functions for TSCDataFrame.
See `documentation <https://pandas.pydata.org/pandas-docs/stable/development/extending.html?highlight=accessor>`_
for regular pandas accessors.
The functions are available through the accessor `tsc`, for example,
.. code::
tsc_object.tsc.normalize_time()
Parameters
----------
tsc_df
time series collection data to carry out accessor functions on
"""
def __init__(self, tsc_df: TSCDataFrame):
# NOTE: cannot call TSCDataFrame(tsc_df) here to transform in case it is a normal
# DataFrame. This is because the accessor has to know when updating this object.
if not isinstance(tsc_df, TSCDataFrame):
raise TypeError(
"The 'tsc' extension only works for type TSCDataFrame (convert before)."
)
self._tsc_df = tsc_df
def check_tsc(
self,
*,
ensure_all_finite: bool = True,
ensure_min_samples: Optional[int] = None,
ensure_same_length: bool = False,
ensure_const_delta_time: bool = True,
ensure_delta_time: Optional[float] = None,
ensure_same_time_values: bool = False,
ensure_normalized_time: bool = False,
ensure_n_timeseries: Optional[int] = None,
ensure_min_timesteps: Optional[int] = None,
ensure_n_timesteps: Optional[int] = None,
ensure_no_degenerate_ts: bool = True,
) -> TSCDataFrame:
"""Validate time series properties.
This summarises the single check functions also contained in `TSCAccessor`.
Parameters
----------
ensure_all_finite
If True, check if all values are finite (no 'nan' or 'inf' values).
ensure_min_samples
If provided, check that the frame has at least required samples.
ensure_same_length
If True, check if all time series have the same length.
ensure_const_delta_time
If True, check that all time series have the same time-delta.
ensure_delta_time
If provided, check that time series have required time-delta.
ensure_same_time_values
If True, check that all time series share the same time values.
ensure_normalized_time
If True, check if the time values are normalized.
ensure_n_timeseries
If provided, check if the required number time series are present.
ensure_n_timesteps
If provded, check that all time series have exactly the number the
timesteps spectifed.
ensure_min_timesteps
If provided, check if every time series has the required minimum of time
steps.
ensure_no_degenerate_ts
If True, make sure that no degenerate (single sampled) time series are
present.
Returns
-------
TSCDataFrame
validated time series collection (without changes)
"""
# TODO: allow handle_fail="raise | warn | return"?
if ensure_all_finite:
self.check_finite()
if ensure_min_samples is not None:
self.check_min_samples(min_samples=ensure_min_samples)
if ensure_same_length:
self.check_timeseries_same_length()
if ensure_const_delta_time:
self.check_const_time_delta()
if ensure_delta_time is not None:
self.check_required_time_delta(required_time_delta=ensure_delta_time)
if ensure_same_time_values:
self.check_equal_timevalues()
if ensure_normalized_time:
self.check_normalized_time()
if ensure_n_timeseries is not None:
self.check_required_n_timeseries(required_n_timeseries=ensure_n_timeseries)
if ensure_n_timesteps is not None:
self.check_required_n_timesteps(ensure_n_timesteps)
if ensure_min_timesteps is not None:
self.check_required_min_timesteps(ensure_min_timesteps)
if ensure_no_degenerate_ts:
self.check_no_degenerate_ts()
return self._tsc_df
def check_finite(self) -> None:
"""Check if all values are finite (i.e. does not contain `nan` or `inf`)."""
if not self._tsc_df.is_finite():
raise TSCException.not_finite()
def check_min_samples(self, min_samples) -> None:
"""Check if there is a minimum number of samples included."""
if self._tsc_df.shape[0] < min_samples:
raise TSCException.not_min_samples(min_samples=min_samples)
def check_timeseries_same_length(self) -> None:
"""Check if time series in the collection have the same length."""
if not self._tsc_df.is_equal_length():
raise TSCException.not_same_length(
actual_lengths=self._tsc_df.is_equal_length()
)
def check_const_time_delta(self) -> Union[pd.Series, float]:
"""Check if all time series have the same time-delta."""
delta_time = self._tsc_df.delta_time
if not self._tsc_df.is_const_delta_time():
raise TSCException.not_const_delta_time(self._tsc_df.delta_time)
return delta_time
def check_equal_timevalues(self) -> None:
"""Check if all time series in the collection share the same time values."""
if not self._tsc_df.is_same_time_values():
raise TSCException.not_same_time_values()
def check_normalized_time(self) -> None:
"""Check if time series collection has normalized time.
See Also
--------
:py:meth:`TSCAccessor.normalize_time`
"""
if not self._tsc_df.is_normalized_time():
raise TSCException.not_normalized_time()
def check_required_time_delta(
self, required_time_delta: Union[pd.Series, float, int]
) -> None:
"""Check if time series collection has required time-delta.
Parameters
----------
required_time_delta
single value or per time series
"""
try:
delta_times = np.asarray(self._tsc_df.delta_time)
if self._tsc_df.is_datetime_index():
if (delta_times != required_time_delta).any():
raise AttributeError
else:
# this is a better variant than
# np.asarray(self._tsc_df.delta_time) == np.asarray(required_time_delta)
# because the shapes can also mismatch
nptest.assert_allclose(
delta_times,
np.asarray(required_time_delta),
rtol=1e-12,
atol=1e-15,
)
except AssertionError:
raise TSCException.not_required_delta_time(
required_delta_time=required_time_delta,
actual_delta_time=self._tsc_df.delta_time,
)
def check_required_n_timeseries(self, required_n_timeseries: int) -> None:
"""Check if in the collection are exactly the required number of time series.
Parameters
----------
required_n_timeseries
value
"""
if self._tsc_df.n_timeseries != required_n_timeseries:
raise TSCException.not_required_n_timeseries(
required_n_timeseries=required_n_timeseries,
actual_n_timeseries=self._tsc_df.n_timeseries,
)
def check_required_n_timesteps(self, required_n_timesteps: int) -> None:
n_timesteps = self._tsc_df.n_timesteps
if isinstance(n_timesteps, pd.Series):
raise TSCException.not_n_timesteps(required=required_n_timesteps)
else:
assert isinstance(n_timesteps, int)
if n_timesteps != required_n_timesteps:
raise TSCException.not_n_timesteps(required=required_n_timesteps)
def check_required_min_timesteps(self, required_min_timesteps: int) -> None:
"""Check if all time series in the collection have a minimum number of time steps.
Parameters
----------
required_min_timesteps
value
"""
_n_timesteps = self._tsc_df.n_timesteps
if (np.asarray(_n_timesteps) < required_min_timesteps).any():
raise TSCException.not_min_timesteps(
required_n_timesteps=required_min_timesteps,
actual_n_timesteps=_n_timesteps,
)
def check_no_degenerate_ts(self):
if self._tsc_df.has_degenerate():
raise TSCException.has_degenerate_ts()
def check_non_overlapping_timeseries(self) -> None:
"""Check if all time series have disjoint time values (do not overlap).
Returns
-------
"""
_, counts = np.unique(
self._tsc_df.index.get_level_values(TSCDataFrame.tsc_time_idx_name),
return_counts=True,
)
if (counts > 1).any():
raise TSCException("time series are required to be non-overlapping")
@classmethod
def check_equal_delta_time(
cls, X: TSCDataFrame, Y: TSCDataFrame, atol=1e-15, require_const=False
) -> Tuple[Union[float, pd.Series], Union[float, pd.Series]]:
"""Check if two time series collections have the same delta times.
Parameters
----------
X
First time series collection.
Y
Second time series collection.
atol
Tolerance passed to :py:meth:`.equal_const_delta_time`
require_const
If True, both `X` and `Y` must have constant delta times.
Raises
------
:py:class:`TSCException` - if time_delta not equal or if either `X` or `Y` is not
constant with ``require_const=True``.
Returns
-------
"""
X_dt = X.delta_time
Y_dt = Y.delta_time
equal = True
if isinstance(X_dt, pd.Series) and not require_const:
if not isinstance(Y_dt, pd.Series):
equal = False
else:
try:
| pdtest.assert_series_equal(X_dt, Y_dt, atol=atol) | pandas.testing.assert_series_equal |
# -*- coding: utf-8 -*-
'''
Site
A site import and analysis class built
with the pandas library
'''
import anemoi as an
import pandas as pd
import numpy as np
import itertools
class Site(object):
'''Subclass of the pandas dataframe built to import and quickly analyze
met mast data.'''
def __init__(self, masts=None, meta_data=None, primary_mast=None):
'''Data structure with an array of anemoi.MetMasts and a DataFrame of
results:
Parameters
----------
masts: array of anemoi.MetMasts
meta_data: DataFrame of analysis results
primary_mast: string or int, default None
Longest-term mast installed on site
'''
if masts is not None:
mast_names = []
mast_lats = []
mast_lons = []
mast_heights = []
mast_primary_anos = []
mast_primary_vanes = []
for mast in masts:
if isinstance(mast, an.MetMast):
mast_names.append(mast.name)
mast_lats.append(mast.lat)
mast_lons.append(mast.lon)
mast_heights.append(mast.height)
mast_primary_anos.append(mast.primary_ano)
mast_primary_vanes.append(mast.primary_vane)
if meta_data is None:
meta_data = pd.DataFrame(columns=mast_names,
index=['Lat', 'Lon', 'Height', 'PrimaryAno', 'PrimaryVane'])
meta_data.loc['Lat', :] = mast_lats
meta_data.loc['Lon', :] = mast_lons
meta_data.loc['Height', :] = mast_heights
meta_data.loc['PrimaryAno', :] = mast_primary_anos
meta_data.loc['PrimaryVane', :] = mast_primary_vanes
meta_data.columns.name = 'Masts'
self.masts = masts
self.meta_data = meta_data
def __repr__(self):
mast_names = 'Site masts: '
for mast in self.masts:
if mast_names == 'Site masts: ':
mast_names = mast_names + ' ' + str(mast.name)
else:
mast_names = mast_names + ', ' + str(mast.name)
return mast_names
def check_has_masts(self):
if len(self.masts) < 1:
raise ValueError("This site doesn't seem to have any masts associated...")
return True
def get_mast_names(self):
if not self.masts:
raise ValueError("This site doesn't seem to have any masts associated...")
else:
return self.meta_data.columns
def return_ws_corr_results_binned_by_direction(self):
if self.check_has_masts():
site_correlation_results = []
for mast_pair in itertools.permutations(self.masts, 2):
ref_mast = mast_pair[0]
site_mast = mast_pair[1]
results = an.correlate.correlate_masts_10_minute_by_direction(ref_mast=ref_mast, site_mast=site_mast)
site_correlation_results.append(results)
site_correlation_results = pd.concat(site_correlation_results, axis=0)
return site_correlation_results
def return_cross_corr_results_dataframe(self):
if self.check_has_masts():
cross_corr_results_index = pd.MultiIndex.from_product([self.meta_data.columns.tolist()]*2, names=['Ref', 'Site'])
results_cols = ['Slope', 'Offset', 'DirOffset', 'R2', 'Uncert']
cross_corr_results_dataframe = pd.DataFrame(index=cross_corr_results_index, columns=results_cols)
refs = cross_corr_results_dataframe.index.get_level_values(level='Ref')
sites = cross_corr_results_dataframe.index.get_level_values(level='Site')
cross_corr_results_dataframe = cross_corr_results_dataframe.loc[refs != sites, :]
return cross_corr_results_dataframe
def calculate_measured_momm(self):
'''Calculates measured mean of monthly mean wind speed for each mast in anemoi.Site'''
if self.check_has_masts():
for mast in self.masts.Masts:
self.meta_data.loc['Meas MoMM', mast.name] = mast.return_momm(sensors=mast.primary_ano).iloc[0,0]
def calculate_self_corr_results(self):
if self.check_has_masts():
cross_corr_results = self.return_cross_corr_results_dataframe()
for mast_pair in cross_corr_results.index:
ref = mast_pair[0]
site = mast_pair[1]
ref_mast = self.masts.loc[ref,'Masts']
site_mast = self.masts.loc[site,'Masts']
slope, offset, uncert, R2 = site_mast.correlate_to_reference(reference_mast=ref_mast, method='ODR')
results_cols = ['Slope', 'Offset', 'R2', 'Uncert']
cross_corr_results.loc[pd.IndexSlice[ref, site], results_cols] = [slope, offset, R2, uncert]
return cross_corr_results
def calculate_annual_shear_results(self):
if self.check_has_masts():
shear_results = an.shear.shear_analysis_site(self.masts)
return shear_results
def calculate_long_term_alpha(self):
'''Calculates measured annual alpha for each mast in anemoi.Site'''
if self.check_has_masts():
for mast in self.masts:
self.meta_data.loc['Alpha', mast.name] = mast.calculate_long_term_alpha()
def plot_monthly_valid_recovery(self):
'''Plots monthly valid recovery for each mast in anemoi.Site'''
if self.check_has_masts():
for mast in self.masts:
mast.plot_monthly_valid_recovery()
def plot_freq_dists(self):
'''Plots wind speed frequency distributions for each mast in anemoi.Site'''
if self.check_has_masts():
for mast in self.masts:
mast.plot_freq_dist()
def plot_wind_roses(self):
'''Plots wind speed frequency distributions for each mast in anemoi.Site'''
if self.check_has_masts():
for mast in self.masts:
mast.plot_wind_rose()
def plot_site_masts_summary(self):
for mast in self.masts:
print(mast.mast_data_summary(), mast, '\n')
mast.plot_monthly_valid_recovery();
mast.plot_wind_energy_roses(dir_sectors=12);
mast.plot_freq_dist();
# plt.show()
def plot_ws_corr_results_binned_by_direction(self):
site_correlation_results = self.return_ws_corr_results_binned_by_direction()
dir_bins = site_correlation_results.index.get_level_values('DirBin').unique()
for mast_pair in itertools.permutations(self.masts, 2):
ref_mast = mast_pair[0]
ref_mast_name = ref_mast.name
site_mast = mast_pair[1]
site_mast_name = site_mast.name
ref_data = ref_mast.return_sensor_data([ref_mast.primary_ano, ref_mast.primary_vane])
site_data = site_mast.return_sensor_data(site_mast.primary_ano)
df = | pd.concat([ref_data, site_data], axis=1, join='inner', keys=['Ref', 'Site']) | pandas.concat |
# -*- coding: utf-8 -*-
import unittest
import pandas as pd
import numpy as np
# from ThymeBoost.trend_models import (linear_trend, mean_trend, median_trend,
# loess_trend, ransac_trend, ewm_trend,
# ets_trend, arima_trend, moving_average_trend,
# zero_trend, svr_trend, naive_trend)
from ThymeBoost.trend_models import *
def testing_data():
seasonality = ((np.cos(np.arange(1, 101))*10 + 50))
np.random.seed(100)
true = np.linspace(-1, 1, 100)
noise = np.random.normal(0, 1, 100)
y = true + seasonality# + noise
return y
class BaseModelTest():
"""Allows self without overriding unitTest __init__"""
def setUp(self):
self.model_obj = None
def set_model_obj(self, child_model_obj):
self.model_obj = child_model_obj
self._params = {'arima_order': 'auto',
'model': 'ses',
'bias': 0,
'arima_trend': None,
'alpha': None,
'poly': 1,
'fit_constant': True,
'l2': 0,
'trend_weights': None,
'ewm_alpha': .5,
'window_size': 13,
'ransac_trials': 20,
'ransac_min_samples': 5}
def test_fitted_series(self):
y = testing_data()
fitted_values = self.model_obj.fit(y, **self._params)
self.assertTrue(isinstance(fitted_values, np.ndarray))
def test_predicted_series(self):
y = testing_data()
self.model_obj.fit(y, **self._params)
predictions = self.model_obj.predict(24, self.model_obj.model_params)
self.assertTrue(isinstance(predictions, np.ndarray))
def test_fitted_null(self):
y = testing_data()
fitted_values = self.model_obj.fit(y, **self._params)
self.assertFalse(pd.Series(fitted_values).isnull().values.any())
def test_prediction_null(self):
y = testing_data()
self.model_obj.fit(y, **self._params)
predictions = self.model_obj.predict(24, self.model_obj.model_params)
self.assertFalse( | pd.Series(predictions) | pandas.Series |
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_table
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import urllib
import requests
import zstandard as zstd
import orjson
import flask
# from util import app_ts_summ, sel_ts_summ, ecan_ts_data
pd.options.display.max_columns = 10
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
server = flask.Flask(__name__)
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, server=server, url_base_pathname = '/')
# app = dash.Dash(__name__, external_stylesheets=external_stylesheets, server=server)
# server = app.server
##########################################
### Parameters
base_url = 'http://tethys-ts.xyz/tethys/data/'
def select_dataset(features, parameters, methods, processing_codes, owners, aggregation_statistics, frequency_intervals, utc_offsets, datasets):
"""
"""
dataset = [d for d in datasets if (d['feature'] == features) and (d['parameter'] == parameters) and (d['method'] == methods) and (d['owner'] == owners) and (d['aggregation_statistic'] == aggregation_statistics) and (d['frequency_interval'] == frequency_intervals) and (d['utc_offset'] == utc_offsets) and (d['processing_code'] == processing_codes)][0]
return dataset
ts_plot_height = 600
map_height = 700
lat1 = -43.45
lon1 = 171.9
zoom1 = 7
mapbox_access_token = "<KEY>"
###############################################
### App layout
map_layout = dict(mapbox = dict(layers = [], accesstoken = mapbox_access_token, style = 'outdoors', center=dict(lat=lat1, lon=lon1), zoom=zoom1), margin = dict(r=0, l=0, t=0, b=0), autosize=True, hovermode='closest', height=map_height)
# @server.route('/wai-vis')
# def main():
def serve_layout():
dc = zstd.ZstdDecompressor()
datasets = requests.get(base_url + 'datasets').json()
requested_datasets = datasets.copy()
features = list(set([f['feature'] for f in requested_datasets]))
features.sort()
parameters = list(set([f['parameter'] for f in requested_datasets]))
parameters.sort()
methods = list(set([f['method'] for f in requested_datasets]))
methods.sort()
processing_codes = list(set([f['processing_code'] for f in requested_datasets]))
processing_codes.sort()
owners = list(set([f['owner'] for f in requested_datasets]))
owners.sort()
aggregation_statistics = list(set([f['aggregation_statistic'] for f in requested_datasets]))
aggregation_statistics.sort()
frequency_intervals = list(set([f['frequency_interval'] for f in requested_datasets]))
frequency_intervals.sort()
utc_offsets = list(set([f['utc_offset'] for f in requested_datasets]))
utc_offsets.sort()
init_dataset = [d for d in requested_datasets if (d['feature'] == 'waterway') and (d['parameter'] == 'streamflow') and (d['processing_code'] == 'quality_controlled_data')][0]
init_dataset_id = init_dataset['dataset_id']
dataset_table_cols = {'license': 'Data License', 'precision': 'Data Precision', 'units': 'Units'}
### prepare summaries and initial states
max_date = pd.Timestamp.now()
start_date = max_date - pd.DateOffset(years=1)
# init_summ = sel_ts_summ(ts_summ, 'River', 'Flow', 'Recorder', 'Primary', 'ECan', str(start_date.date()), str(max_date.date()))
#
# new_sites = init_summ.drop_duplicates('ExtSiteID')
init_summ_r = requests.post(base_url + 'sampling_sites', params={'dataset_id': init_dataset_id, 'compression': 'zstd'})
init_summ = orjson.loads(dc.decompress(init_summ_r.content))
init_summ = [s for s in init_summ if (pd.Timestamp(s['stats']['to_date']) > start_date) and (pd.Timestamp(s['stats']['from_date']) < max_date)]
init_sites = [{'label': s['ref'], 'value': s['site_id']} for s in init_summ]
init_site_id = [s['value'] for s in init_sites if s['label'] == '70105'][0]
init_lon = [l['geometry']['coordinates'][0] for l in init_summ]
init_lat = [l['geometry']['coordinates'][1] for l in init_summ]
init_names = [l['ref'] + '<br>' + l['name'] for l in init_summ]
init_table = [{'Site ID': s['ref'], 'Site Name': s['name'], 'Min Value': s['stats']['min'], 'Mean Value': s['stats']['mean'], 'Max Value': s['stats']['max'], 'Start Date': s['stats']['from_date'], 'End Date': s['stats']['to_date'], 'Last Modified Date': s['modified_date']} for s in init_summ]
# init_ts_r = requests.get(base_url + 'time_series_results', params={'dataset_id': init_dataset_id, 'site_id': init_site_id, 'compression': 'zstd', 'from_date': start_date.round('s').isoformat(), 'to_date': max_date.round('s').isoformat()})
# dc = zstd.ZstdDecompressor()
# df1 = pd.DataFrame(orjson.loads(dc.decompress(init_ts_r.content)))
layout = html.Div(children=[
html.Div([
html.P(children='Filter datasets (select from top to bottom):'),
html.Label('Feature'),
dcc.Dropdown(options=[{'label': d, 'value': d} for d in features], value='waterway', id='features'),
html.Label('Parameter'),
dcc.Dropdown(options=[{'label': d, 'value': d} for d in parameters], value='streamflow', id='parameters'),
html.Label('Method'),
dcc.Dropdown(options=[{'label': d, 'value': d} for d in methods], value='sensor_recording', id='methods'),
html.Label('Processing Code'),
dcc.Dropdown(options=[{'label': d, 'value': d} for d in processing_codes], value='quality_controlled_data', id='processing_codes'),
html.Label('Data Owner'),
dcc.Dropdown(options=[{'label': d, 'value': d} for d in owners], value='ECan', id='owners'),
html.Label('Aggregation Statistic'),
dcc.Dropdown(options=[{'label': d, 'value': d} for d in aggregation_statistics], value='mean', id='aggregation_statistics'),
html.Label('Frequency Interval'),
dcc.Dropdown(options=[{'label': d, 'value': d} for d in frequency_intervals], value='1H', id='frequency_intervals'),
html.Label('UTC Offset'),
dcc.Dropdown(options=[{'label': d, 'value': d} for d in utc_offsets], value='0H', id='utc_offsets'),
html.Label('Date Range'),
dcc.DatePickerRange(
end_date=str(max_date.date()),
display_format='DD/MM/YYYY',
start_date=str(start_date.date()),
id='date_sel'
# start_date_placeholder_text='DD/MM/YYYY'
),
html.Label('Site IDs'),
dcc.Dropdown(options=init_sites, id='sites')
# html.Label('Water quality below detection limit method'),
# dcc.RadioItems(
# options=[
# {'label': 'Half dtl', 'value': 'half'},
# {'label': 'Trend analysis method', 'value': 'trend'}
# ],
# value='half',
# id='dtl')
],
className='two columns', style={'margin': 20}),
html.Div([
html.P('Click on a site or "box select" multiple sites:', style={'display': 'inline-block'}),
dcc.Graph(
id = 'site-map',
style={'height': map_height},
figure=dict(
data = [dict(lat = init_lat,
lon = init_lon,
text = init_names,
type = 'scattermapbox',
hoverinfo = 'text',
marker = dict(
size=8,
color='black',
opacity=1
)
)
],
layout=map_layout),
config={"displaylogo": False}),
# html.A(
# 'Download Dataset Summary Data',
# id='download-summ',
# download="dataset_summary.csv",
# href="",
# target="_blank",
# style={'margin': 50}),
#
dash_table.DataTable(
id='dataset_table',
columns=[{"name": v, "id": v, 'deletable': True} for k, v in dataset_table_cols.items()],
data=[],
sort_action="native",
sort_mode="multi",
style_cell={
'minWidth': '80px', 'maxWidth': '200px',
'whiteSpace': 'normal'}
)
], className='four columns', style={'margin': 20}),
#
html.Div([
# html.P('Select Dataset for time series plot:', style={'display': 'inline-block'}),
# dcc.Dropdown(options=[{'value:': 5, 'label': init_dataset}], value=5, id='sel_dataset'),
dcc.Graph(
id = 'selected-data',
figure = dict(
data = [dict(x=0, y=0)],
layout = dict(
paper_bgcolor = '#F4F4F8',
plot_bgcolor = '#F4F4F8',
height = ts_plot_height
)
),
config={"displaylogo": False}
),
html.A(
'Download Time Series Data',
id='download-tsdata',
download="tsdata.csv",
href="",
target="_blank",
style={'margin': 50}),
dash_table.DataTable(
id='summ_table',
columns=[{"name": i, "id": i, 'deletable': True} for i in init_table[0].keys()],
data=init_table,
sort_action="native",
sort_mode="multi",
style_cell={
'minWidth': '80px', 'maxWidth': '200px',
'whiteSpace': 'normal'
}
)
], className='six columns', style={'margin': 10, 'height': 900}),
html.Div(id='ts_data', style={'display': 'none'}),
html.Div(id='datasets', style={'display': 'none'}, children=orjson.dumps(datasets).decode()),
html.Div(id='dataset_id', style={'display': 'none'}, children=init_dataset_id),
html.Div(id='sites_summ', style={'display': 'none'}, children=orjson.dumps(init_summ).decode())
# dcc.Graph(id='map-layout', style={'display': 'none'}, figure=dict(data=[], layout=map_layout))
], style={'margin':0})
return layout
app.layout = serve_layout
########################################
### Callbacks
@app.callback(
[Output('parameters', 'options'), Output('methods', 'options'), Output('processing_codes', 'options'), Output('owners', 'options'), Output('aggregation_statistics', 'options'), Output('frequency_intervals', 'options'), Output('utc_offsets', 'options')],
[Input('features', 'value')],
[State('datasets', 'children')])
def update_parameters(features, datasets):
def make_options(val):
l1 = [{'label': v, 'value': v} for v in val]
return l1
datasets1 = orjson.loads(datasets)
datasets2 = [d for d in datasets1 if d['feature'] == features]
parameters = list(set([d['parameter'] for d in datasets2]))
parameters.sort()
methods = list(set([d['method'] for d in datasets2]))
methods.sort()
processing_codes = list(set([d['processing_code'] for d in datasets2]))
processing_codes.sort()
owners = list(set([d['owner'] for d in datasets2]))
owners.sort()
aggregation_statistics = list(set([d['aggregation_statistic'] for d in datasets2]))
aggregation_statistics.sort()
frequency_intervals = list(set([d['frequency_interval'] for d in datasets2]))
frequency_intervals.sort()
utc_offsets = list(set([d['utc_offset'] for d in datasets2]))
utc_offsets.sort()
return make_options(parameters), make_options(methods), make_options(processing_codes), make_options(owners), make_options(aggregation_statistics), make_options(frequency_intervals), make_options(utc_offsets)
@app.callback(
Output('dataset_id', 'children'), [Input('features', 'value'), Input('parameters', 'value'), Input('methods', 'value'), Input('processing_codes', 'value'), Input('owners', 'value'), Input('aggregation_statistics', 'value'), Input('frequency_intervals', 'value'), Input('utc_offsets', 'value')], [State('datasets', 'children')])
def update_dataset_id(features, parameters, methods, processing_codes, owners, aggregation_statistics, frequency_intervals, utc_offsets, datasets):
try:
dataset = select_dataset(features, parameters, methods, processing_codes, owners, aggregation_statistics, frequency_intervals, utc_offsets, orjson.loads(datasets))
dataset_id = dataset['dataset_id']
print(features, parameters, methods, processing_codes, owners, aggregation_statistics, frequency_intervals, utc_offsets)
return dataset_id
except:
print('No available dataset_id')
@app.callback(
Output('sites_summ', 'children'),
[Input('dataset_id', 'children'), Input('date_sel', 'start_date'), Input('date_sel', 'end_date')])
def update_summ_data(dataset_id, start_date, end_date):
if dataset_id is None:
print('No new sites_summ')
else:
summ_r = requests.post(base_url + 'sampling_sites', params={'dataset_id': dataset_id, 'compression': 'zstd'})
dc = zstd.ZstdDecompressor()
summ_data1 = orjson.loads(dc.decompress(summ_r.content).decode())
summ_data2 = [s for s in summ_data1 if (pd.Timestamp(s['stats']['to_date']) > | pd.Timestamp(start_date) | pandas.Timestamp |
import pandas as pd
import geopandas as gpd
import glob
import os
from shapely import wkt
# from optimization_parameters import *
from _variable_definitions import *
import contextily as ctx
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from _utils import pd2gpd
from matplotlib import rc
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
from _file_import_optimization import *
from matplotlib import rc
from matplotlib.ticker import MaxNLocator
from pandas.core.common import SettingWithCopyWarning
import warnings
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
# ---------------------------------------------------------------------------------------------------------------------
# input data for visualizations
# ---------------------------------------------------------------------------------------------------------------------
val = pd.read_csv(
"validation_results/20220208-202154_validation 1_optimization_result_charging_stations.csv"
)
scenario = pd.read_csv(
"scenarios/results/20220209-144610_Directed Transition_optimization_result_charging_stations.csv"
)
path_SA_driving_range = "sensitivity_analyses/driving_range/"
list_of_paths_SA_driving_range = [
"20220215-085558_TF200_22_optimization_result_charging_stations.csv",
"20220215-090500_TF300_22_optimization_result_charging_stations.csv",
"20220215-091550_TF400_22_optimization_result_charging_stations.csv",
"20220215-093041_TF500_22_optimization_result_charging_stations.csv",
"20220215-095047_TF600_22_optimization_result_charging_stations.csv",
"20220215-101421_TF700_22_optimization_result_charging_stations.csv",
"20220215-104036_TF800_22_optimization_result_charging_stations.csv",
"20220215-111415_TF900_22_optimization_result_charging_stations.csv",
"20220215-114133_TF1000_22_optimization_result_charging_stations.csv",
"20220215-123025_TF1100_22_optimization_result_charging_stations.csv",
"20220215-125510_TF1200_22_optimization_result_charging_stations.csv",
"20220215-132158_TF1300_22_optimization_result_charging_stations.csv",
"20220215-135720_TF1400_22_optimization_result_charging_stations.csv",
]
path_SA_share_BEV = "sensitivity_analyses/epsilon_increase/"
list_of_paths_SA_share_BEV = [
"20220215-094034_ev share - epsilon SC10_3_optimization_result_charging_stations.csv",
"20220215-095419_ev share - epsilon SC20_3_optimization_result_charging_stations.csv",
"20220215-100808_ev share - epsilon SC30_3_optimization_result_charging_stations.csv",
"20220215-102142_ev share - epsilon SC40_3_optimization_result_charging_stations.csv",
"20220215-103522_ev share - epsilon SC50_3_optimization_result_charging_stations.csv",
"20220215-105154_ev share - epsilon SC60_3_optimization_result_charging_stations.csv",
"20220215-111343_ev share - epsilon SC70_3_optimization_result_charging_stations.csv",
"20220215-134430_ev share - epsilon SC80_3_optimization_result_charging_stations.csv",
"20220215-195040_ev share - epsilon SC90_3_optimization_result_charging_stations.csv",
"20220215-214848_ev share - epsilon SC100_3_optimization_result_charging_stations.csv"
]
_filename = 'sensitivity_analyses\cost_reduction_potentials_1202.csv'
scenario_file = pd.read_csv("scenarios/optimization_results_1002.csv")
# ---------------------------------------------------------------------------------------------------------------------
# VALIDATION visualization
# ---------------------------------------------------------------------------------------------------------------------
# colors
colors = ["#5f0f40", "#9a031e", "#E9C46A", "#e36414", "#0f4c5c"]
colors.reverse()
# reference coordinate system for all visualisation
reference_coord_sys = "EPSG:31287"
# highway geometries
highway_geometries = pd.read_csv(r"geometries/highway_geometries_v6.csv")
highway_geometries["geometry"] = highway_geometries.geometry.apply(wkt.loads)
highway_geometries = gpd.GeoDataFrame(highway_geometries)
highway_geometries = highway_geometries.set_crs(reference_coord_sys)
highway_geometries["length"] = highway_geometries.geometry.length
segments_gdf = pd2gpd(pd.read_csv("data/highway_segments.csv"))
copy_highway_geometries = highway_geometries.drop_duplicates(subset=["highway"])
# austrian borders
austrian_border = gpd.read_file("geometries/austrian_border.shp")
# get latest result file
list_of_files = glob.glob("scenarios/*")
# latest_file = max(list_of_files, key=os.path.getctime)
charging_capacity = 150 # (kW)
# energies = scenario_file.p_max_bev.to_list()
def merge_with_geom(results, energy):
filtered_results = results[results[col_type] == "ra"]
# osm geometries
rest_areas = pd2gpd(
pd.read_csv("data/projected_ras.csv"), geom_col_name="centroid"
).sort_values(by=["on_segment", "dist_along_highway"])
rest_areas["segment_id"] = rest_areas["on_segment"]
rest_areas[col_type_ID] = rest_areas["nb"]
rest_areas[col_directions] = rest_areas["evaluated_dir"]
# merge here
results_and_geom_df = pd.merge(
filtered_results, rest_areas, on=[col_segment_id, col_type_ID, col_directions]
)
# turn into GeoDataframe
results_and_geom_df["geometry"] = results_and_geom_df.centroid
results_and_geom_df["total_charging_pole_number"] = np.where(
np.array(results_and_geom_df.pYi_dir) == 0,
np.nan,
np.array(results_and_geom_df.pYi_dir),
)
results_and_geom_df = gpd.GeoDataFrame(
results_and_geom_df, crs=reference_coord_sys, geometry="geometry"
)
results_and_geom_df["charging_capacity"] = (
results_and_geom_df["total_charging_pole_number"] * energy
)
# plot
plot_results_and_geom_df = results_and_geom_df.to_crs("EPSG:3857")
# plot_results_and_geom_df = plot_results_and_geom_df[
# plot_results_and_geom_df.total_charging_pole_number > 0
# ]
plot_results_and_geom_df["x"] = plot_results_and_geom_df.geometry.x
plot_results_and_geom_df["y"] = plot_results_and_geom_df.geometry.y
return plot_results_and_geom_df
plot_sc_1 = merge_with_geom(val, charging_capacity)
merged = pd.merge(
plot_sc_1, existing_infr, how="left", on=["segment_id", "name", "dir"]
)
sub_1 = merged[merged.has_charging_station == True]
sub_2 = merged[merged.total_charging_pole_number > 0]
sub_1["installed_cap"] = (
sub_1["44kW"] * 44
+ sub_1["50kW"] * 50
+ sub_1["75kW"] * 75
+ sub_1["150kW"] * 150
+ sub_1["350kW"] * 350
)
sub_2["installed_cap"] = sub_2["total_charging_pole_number"] * charging_capacity
plot_highway_geometries = highway_geometries.to_crs("EPSG:3857")
plot_austrian_border = austrian_border.to_crs("EPSG:3857")
plot_highway_geometries["null"] = [0] * len(plot_highway_geometries)
min_size = 30
max_size = 150
max_val = max([sub_1["installed_cap"].max(), sub_1["installed_cap"].max()])
fact = max_size / max_val
sizes = list(np.linspace(55, 150, 5))
bounds = np.linspace(0, max_val, 6)
cm = 1 / 2.54
bounds[0] = 44
fig = plt.figure(figsize=(15, 10))
plt.rcParams["font.family"] = "Franklin Gothic Book"
plt.rcParams["font.size"] = 10
gs = fig.add_gridspec(2, hspace=0)
axs = gs.subplots(sharex=True, sharey=True)
plot_highway_geometries.plot(
ax=axs[0], label="Austrian highway network", color="black", zorder=0, linewidth=1
)
plot_highway_geometries.plot(
ax=axs[1], label="Austrian highway network", color="black", zorder=0, linewidth=1
)
plot_austrian_border.plot(ax=axs[0], color="grey", linewidth=1)
plot_austrian_border.plot(ax=axs[1], color="grey", linewidth=1)
for ij in range(0, len(bounds) - 1):
cat = sub_1[sub_1["installed_cap"].isin(np.arange(bounds[ij], bounds[ij + 1]))]
axs[0].scatter(
cat["x"].to_list(),
cat["y"].to_list(),
s=sizes[ij],
color=colors[ij],
label=str(int(bounds[ij])) + " - " + str(int(bounds[ij + 1])) + " kW",
# edgecolors='black',
zorder=10,
)
for ij in range(0, len(bounds) - 1):
cat = sub_2[sub_2["installed_cap"].isin(np.arange(bounds[ij], bounds[ij + 1]))]
axs[1].scatter(
cat["x"].to_list(),
cat["y"].to_list(),
s=sizes[ij],
color=colors[ij],
label=str(int(bounds[ij])) + " - " + str(int(bounds[ij + 1])) + " kW",
# edgecolors='black',
zorder=10,
)
axs[0].axis("off")
axs[1].axis("off")
axs[0].text(
1.07e6,
6.2e6,
"Existing infrastructure",
bbox=dict(facecolor="none", edgecolor="grey", boxstyle="round,pad=0.4"),
fontsize=14,
)
axs[1].text(
1.07e6,
6.2e6,
"Model output",
bbox=dict(facecolor="none", edgecolor="grey", boxstyle="round,pad=0.4"),
fontsize=14,
)
# plotting NUTS 2
p = gpd.read_file("geometries\output_BL.shp")
bd = p.to_crs("EPSG:3857")
geoms = bd.geometry.to_list()
names = bd["NAME"].to_list()
for ij in range(0, len(bd)):
if geoms[ij].type == "MultiPolygon":
for g in geoms[ij]:
axs[0].plot(*g.exterior.xy, color="grey", linewidth=1)
axs[1].plot(*g.exterior.xy, color="grey", linewidth=1)
else:
axs[0].plot(*geoms[ij].exterior.xy, color="grey", linewidth=1)
axs[1].plot(*geoms[ij].exterior.xy, color="grey", linewidth=1)
c = geoms[ij].centroid
# axs[0].text(c.x, c.y + 0.03e6, names[ij], color="grey")
# axs[1].text(c.x, c.y + 0.03e6, names[ij], color="grey")
axs[0].legend(loc="lower left", bbox_to_anchor=(0.15, 1, 1, 0), ncol=3, fancybox=True)
# plt.show()
plt.savefig("figures/comparison_image.pdf", bbox_inches="tight")
# ---------------------------------------------------------------------------------------------------------------------
# EXPANSION visualization
# ---------------------------------------------------------------------------------------------------------------------
# colors
colors = ["#6b705c", "#2a9d8f", "#264653", "#f4a261", "#e76f51"]
# colors.reverse()
# reference coordinate system for all visualisation
reference_coord_sys = "EPSG:31287"
# highway geometries
highway_geometries = pd.read_csv(r"geometries/highway_geometries_v6.csv")
highway_geometries["geometry"] = highway_geometries.geometry.apply(wkt.loads)
highway_geometries = gpd.GeoDataFrame(highway_geometries)
highway_geometries = highway_geometries.set_crs(reference_coord_sys)
highway_geometries["length"] = highway_geometries.geometry.length
segments_gdf = pd2gpd(pd.read_csv("data/highway_segments.csv"))
copy_highway_geometries = highway_geometries.drop_duplicates(subset=["highway"])
# austrian borders
austrian_border = gpd.read_file("geometries/austrian_border.shp")
# get latest result file
list_of_files = glob.glob("scenarios/*")
# latest_file = max(list_of_files, key=os.path.getctime)
charging_capacity = 350 # (kW)
# energies = scenario_file.p_max_bev.to_list()
def merge_with_geom(results, energy):
filtered_results = results[results[col_type] == "ra"]
# osm geometries
rest_areas = pd2gpd(
pd.read_csv("data/projected_ras.csv"), geom_col_name="centroid"
).sort_values(by=["on_segment", "dist_along_highway"])
rest_areas["segment_id"] = rest_areas["on_segment"]
rest_areas[col_type_ID] = rest_areas["nb"]
rest_areas[col_directions] = rest_areas["evaluated_dir"]
# merge here
results_and_geom_df = pd.merge(
filtered_results, rest_areas, on=[col_segment_id, col_type_ID, col_directions]
)
# turn into GeoDataframe
results_and_geom_df["geometry"] = results_and_geom_df.centroid
results_and_geom_df["total_charging_pole_number"] = np.where(
np.array(results_and_geom_df.pYi_dir) == 0,
np.nan,
np.array(results_and_geom_df.pYi_dir),
)
results_and_geom_df = gpd.GeoDataFrame(
results_and_geom_df, crs=reference_coord_sys, geometry="geometry"
)
results_and_geom_df["charging_capacity"] = (
results_and_geom_df["total_charging_pole_number"] * energy
)
# plot
plot_results_and_geom_df = results_and_geom_df.to_crs("EPSG:3857")
# plot_results_and_geom_df = plot_results_and_geom_df[
# plot_results_and_geom_df.total_charging_pole_number > 0
# ]
plot_results_and_geom_df["x"] = plot_results_and_geom_df.geometry.x
plot_results_and_geom_df["y"] = plot_results_and_geom_df.geometry.y
return plot_results_and_geom_df
plot_sc_1 = merge_with_geom(scenario, charging_capacity)
merged = pd.merge(
plot_sc_1, existing_infr, how="left", on=["segment_id", "name", "dir"]
)
# sub_1 = merged[merged.has_charging_station == True]
# sub_2 = merged[merged.total_charging_pole_number > 0]
merged["existing_cap"] = +merged["350kW"] * 350
merged["model_cap"] = merged["total_charging_pole_number"] * charging_capacity
merged["existing_cap"] = merged["existing_cap"].replace(np.NaN, 0)
merged["model_cap"] = merged["model_cap"].replace(np.NaN, 0)
# make classification here
merged["diff"] = merged["model_cap"] - merged["existing_cap"]
merged["difference"] = np.where(merged["diff"] < 0, 0, merged["diff"])
comp_df = merged[merged.total_charging_pole_number > 0]
max_val = comp_df["difference"].max()
bounds = [charging_capacity] + [int(round(max_val / 2, -2)), int(max_val)]
size_1 = 150 # small
size_2 = 300 # big
# plot grey , difference == 0
# plot the two classes, for where has_charging_infrastructure == True (blue)
# plot the two classes, for where !(has_charging_infrastructure == True) (red)
plot_highway_geometries = highway_geometries.to_crs("EPSG:3857")
plot_austrian_border = austrian_border.to_crs("EPSG:3857")
plot_highway_geometries["null"] = [0] * len(plot_highway_geometries)
bd = p.to_crs("EPSG:3857")
fig, ax = plt.subplots(
figsize=(13, 8)
)
plt.rcParams["font.family"] = "Franklin Gothic Book"
plt.rcParams["font.size"] = 13
# plotting NUTS 2
geoms = bd.geometry.to_list()
names = bd["NAME"].to_list()
for ij in range(0, len(bd)):
if geoms[ij].type == "MultiPolygon":
for g in geoms[ij]:
ax.plot(*g.exterior.xy, color="grey", linewidth=1)
else:
ax.plot(*geoms[ij].exterior.xy, color="grey", linewidth=1)
c = geoms[ij].centroid
# ax.text(c.x, c.y + 0.03e6, names[ij], color="grey")
# plotting highway network and Austrian boarder
plot_highway_geometries.plot(
ax=ax, label="Austrian highway network", color="black", zorder=0, linewidth=1
)
# count together for the four categories all capacity
expansion_values = []
# plot_austrian_border.plot(ax=ax, color="grey", linewidth=1)
# plot the ones with no change
# plot the two classes, for where has_charging_infrastructure == True (blue);
cat = comp_df[comp_df.has_charging_station == True]
cat0 = cat[cat["difference"].isin(list(range(bounds[0], bounds[1] + 1)))]
cat1 = cat[cat["difference"].isin(list(range(bounds[1] + 1, bounds[2] + 1)))]
expansion_values.append(cat0["difference"].sum())
expansion_values.append(cat1["difference"].sum())
ax.scatter(
cat0["x"].to_list(),
cat0["y"].to_list(),
s=size_1,
color=colors[1],
label="Expansion of existing CS by "
+ str(bounds[0])
+ " - "
+ str(bounds[1])
+ " kW",
zorder=10,
)
ax.scatter(
cat1["x"].to_list(),
cat1["y"].to_list(),
s=size_2,
color=colors[2],
label="Expansion of existing CS by "
+ str(bounds[1])
+ " - "
+ str(bounds[2])
+ " kW",
zorder=10,
)
# plot the two classes, for where !(has_charging_infrastructure == True) (red)
cat = comp_df[~(comp_df.has_charging_station == True)]
cat0 = cat[cat["difference"].isin(list(range(bounds[0], bounds[1] + 1)))]
cat1 = cat[cat["difference"].isin(list(range(bounds[1] + 1, bounds[2] + 1)))]
expansion_values.append(cat0["difference"].sum())
expansion_values.append(cat1["difference"].sum())
ax.scatter(
cat0["x"].to_list(),
cat0["y"].to_list(),
s=size_1,
color=colors[3],
label="Newly installed CS with " + str(bounds[0]) + " - " + str(bounds[1]) + " kW",
# edgecolors='black',
zorder=10,
)
ax.scatter(
cat1["x"].to_list(),
cat1["y"].to_list(),
s=size_2,
color=colors[4],
label="Newly installed CS with "
+ str(bounds[1])
+ " - "
+ str(bounds[2])
+ " kW",
# edgecolors='black',
zorder=10,
)
cat = comp_df[comp_df["difference"] == 0]
if len(cat) > 0:
ax.scatter(
cat["x"].to_list(),
cat["y"].to_list(),
s=size_1,
color=colors[0],
label="No expansion of existing CS",
# edgecolors='black',
zorder=10,
)
ax.axis("off")
ax.set_title("Required charging infrastructure expansion until 2030 under the DT scenario")
ax.legend(loc="lower left", bbox_to_anchor=(0, 0.6, 1, 0), ncol=1, fancybox=True)
tot = sum(expansion_values)
expansion_values = [e / tot * 100 for e in expansion_values]
plt.savefig("figures/expansion_image.pdf", bbox_inches="tight")
# ---------------------------------------------------------------------------------------------------------------------
# Cost reduction potentials
# ---------------------------------------------------------------------------------------------------------------------
_cost_decrease_analysis = pd.read_csv(_filename)
costs = [scenario_file.loc[1].costs] + _cost_decrease_analysis.costs.to_list()[0:4]
# spec_BEV_costs = [scenario_file.loc[3]['€/BEV']] + _cost_decrease_analysis['€/BEV'].to_list()[0:4]
# spec_kW_costs = [scenario_file.loc[3]['€/kW']] + _cost_decrease_analysis['€/kW'].to_list()[0:4]
labels = ['GD scenario'] + _cost_decrease_analysis['scenario_name'].to_list()[0:4]
labels = ['GD scenario\n2030', 'Medium decrease\nin road traffic', 'Major decrease\nin road traffic', 'Increase in\ndriving range', 'Increase in\ncharging power']
c = '#f4f1de'
fig, ax = plt.subplots(figsize=(9, 4))
plt.rcParams['xtick.bottom'] = plt.rcParams['xtick.labelbottom'] = False
plt.rcParams['xtick.top'] = plt.rcParams['xtick.labeltop'] = True
plt.rcParams["font.family"] = "Franklin Gothic Book"
plt.rcParams["font.size"] = 12
# gs = gridspec.GridSpec(2, 1, height_ratios=[1, 1])
# ax1 = plt.subplot(gs[0])
# ax2 = plt.subplot(gs[1], sharex=ax1)
ax.tick_params(axis='both', which='major', labelsize=10)
l2 = ax.bar(labels[0], costs[0], width=0.7, color=['#f6bd60'], zorder=10, label='infrastructure costs in GD scenario 2030')
l3 = ax.bar(labels[1:], costs[1:], width=0.7, color=['#3d405b'] * 4, zorder=10,
label='reduced infrastructure costs of GD scenario 2030')
l4 = ax.bar(labels, costs[0], color=c, width=0.7, zorder=5, label='cost difference relative to GD scenario 2030')
ax.axhline(y=costs[0], linewidth=3, color='#f6bd60', linestyle='--', zorder=30)
ax.grid(axis="y")
ax.set_ylabel('Total infrastructure expansion costs (€)', fontsize=14, fontname="Franklin Gothic Book")
ax.text(labels[0], costs[0]/2, '€ ' + str(int(round((costs[0])/1e6, 0))) + ' Mio.', zorder=20, ha='center', va='center', fontsize=12, fontname="Franklin Gothic Book")
ax.set_yticklabels([str(e) + ' Mio.' for e in range(0, 120, 20)], fontsize=12, fontname="Franklin Gothic Book")
for ij in [1,2,4]:
ax.text(labels[ij], costs[ij] + (costs[0] - costs[ij])/2, u"\u2212" + ' € ' +
str(int(round((costs[0] - costs[ij])/1e6, 0))) + ' Mio.', zorder=20, ha='center', va='center', fontsize=11, fontname="Franklin Gothic Book")
plt.subplots_adjust(hspace=.0)
# ax.set_ylim([0, 70e6])
# ax2.grid()
# l0 = ax2.plot(labels, spec_kW_costs, marker='o', linestyle='dotted', color='#004733', linewidth=2, label="€/kW")
# ax3 = ax2.twinx()
# l1 = ax3.plot(labels, spec_BEV_costs, marker='o', linestyle='dotted', color='#0096c7', linewidth=2, label="€/BEV")
# ax2.set_ylim([120, 480])
# ax3.set_ylim([0, 100])
# insert text descriptions
# for ij in range(0, 5):
# ax2.text(labels[ij], spec_kW_costs[ij] + 40, "{:.2f}".format(spec_kW_costs[ij]), va='top', color='#004733', ha='left')
# ax3.text(labels[ij], spec_BEV_costs[ij] - 10, "{:.2f}".format(spec_BEV_costs[ij]), va='bottom', color='#0096c7',
# ha='right')
# ax3.spines["left"].set_color("#004733") # setting up Y-axis tick color to red
# ax3.spines["right"].set_color("#0096c7") # setting up Y-axis tick color to red
# ax2.tick_params(axis="y", colors="#004733")
# ax3.tick_params(axis="y", colors="#0096c7")
#
# ax2.set_ylabel("€/kW", color="#004733", fontsize=14)
# ax3.set_ylabel("€/BEV", rotation=-90, color="#0096c7", fontsize=14)
# adding labels to graph
y_size = 490
b_box = dict(facecolor="white", edgecolor="white", boxstyle="round,pad=0.5")
#
# for ij in range(0, len(labels)):
# ax2.text(labels[ij], y_size, labels[ij], ha='center', va='top', bbox=b_box, fontweight='extra bold')
ax.xaxis.set_ticks_position('top')
# lns = l0 + l1
lns2 = [l2] + [l3] + [l4]
labs = [l.get_label() for l in lns2]
ax.legend(lns2, labs, bbox_to_anchor=(1.01, -0.05), ncol=2)
# ax2.get_xaxis().set_ticks([])
# ax2.set_xticklabels(['' for e in range(0, len(labels))])
# ax1.xaxis.set_ticks_position('top')
# ax1.xaxis.set_label_position('top')
# ax1.xaxis.tick_top()
# ax1.xaxis.set_ticks_position('both')
# plt.setp(ax3.get_xticklabels(), visible=False)
# plt.setp(ax2.get_xticklabels(), visible=False)
ax.set_title('Cost-reduction potentials in the GD scenario 2030\n', fontsize=15, fontname="Franklin Gothic Book")
# plt.show()
plt.savefig('figures/cost_red.pdf', bbox_inches="tight")
# ---------------------------------------------------------------------------------------------------------------------
# SENSITIVITY ANALYSIS I : DRIVING RANGE
# ---------------------------------------------------------------------------------------------------------------------
results = []
range_max = 1400
range_min = 200
step_size = 100
ranges = np.arange(range_min, range_max + step_size, step_size)
# create a dataframe with all Y values with columns of "100", "200", ...
base_df = pd.read_csv(path_SA_driving_range + list_of_paths_SA_driving_range[0])
df = pd.DataFrame()
# df['locations'] = base_df.POI_ID
nb_x = []
for ij in range(0, len(ranges)):
temp_df = pd.read_csv(path_SA_driving_range + list_of_paths_SA_driving_range[ij])
df[ranges[ij]] = temp_df.pYi_dir
nb_x.append(temp_df.pXi.sum())
# nb_x.append(nb_x[-1])
df_to_plot = df.replace(0, np.nan)
# plot
fig, ax = plt.subplots(figsize=(8, 3.5))
plt.rcParams["font.family"] = "Franklin Gothic Book"
plt.rcParams["font.size"] = 12
font = {'family': "Franklin Gothic Book", 'fontsize': 12
}
ax2 = ax.twinx()
plt.xlim([range_min - step_size, range_max + step_size])
ax.tick_params(
axis="x", # changes apply to the x-axis
which="both", # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
) # labels along the bottom edge are off
c = "#6096ba"
plt.rcParams["font.family"] = "Franklin Gothic Book"
plt.rcParams["font.size"] = 12
df_to_plot.boxplot(
ax=ax,
widths=(50),
# notch=True,
patch_artist=True,
boxprops=dict(facecolor=c, color=c),
capprops={"color": c, "linewidth": 2},
whiskerprops={"color": c, "linewidth": 2},
flierprops={"color": c, "markeredgewidth": 2, "markeredgecolor": c},
medianprops=dict(color='red', linewidth=1.5 ),
positions=ranges,
labels=ranges,
)
ax.set_xlabel("driving range (km)", fontname="Franklin Gothic Book")
ax.set_yticklabels(labels= list(range(0, 55,5)),fontname="Franklin Gothic Book")
ax2.set_xticklabels(labels= list(range(200, 1500, 100)), fontname="Franklin Gothic Book")
ax.set_xticklabels(labels= list(range(200, 1500, 100)), fontname="Franklin Gothic Book")
plt.grid("off")
ax.set_ylabel("Nb. charging points per charging station", color="#1d3557", fontdict=font)
ax2.set_ylabel(
"Nb. charging stations", color="#723d46", rotation=-90, labelpad=12, fontsize=12
)
ax2.grid(False)
l0 = ax2.plot(
list(ranges),
nb_x,
marker="o",
color="#723d46",
linewidth=2,
label="Nb. of CS",
)
ax2.set_ylim([36, 66])
ax.set_ylim([0, 50])
l1 = ax.plot(
[range_min - step_size, range_max + step_size],
[int(12000 / 350)] * len([range_min - step_size, range_max + step_size]),
linestyle="dotted",
color="grey",
linewidth=2,
label="Max. nb. of CP at a CS",
)
ax.tick_params(axis="y", colors="#1d3557", labelsize=10)
ax2.tick_params(axis="y", colors="#723d46", labelsize=10)
ax2.spines["left"].set_color("#1d3557")
ax.spines["left"].set_color("#1d3557") # setting up Y-axis tick color to red
ax2.spines["right"].set_color("#723d46")
ax2.spines["top"].set_visible(False)
ax.spines["top"].set_visible(False)
ax2.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
blue_patch = mpatches.Patch(color=c, label='Distribution of nb. of CP at CS')
median_patch = Line2D([0], [0], color='red', lw=1.5)
lns = l0 + l1
labs = [l.get_label() for l in lns]
ax.legend(lns + [blue_patch] + [median_patch], labs + [blue_patch._label, 'Median of CP at CS'], loc=2, fontsize=11, ncol=2)
plt.savefig("figures/driving_range_SA.pdf", bbox_inches="tight")
# ---------------------------------------------------------------------------------------------------------------------
# highway geometries
highway_geometries = pd.read_csv(r"geometries/highway_geometries_v6.csv")
highway_geometries["geometry"] = highway_geometries.geometry.apply(wkt.loads)
highway_geometries = gpd.GeoDataFrame(highway_geometries)
highway_geometries = highway_geometries.set_crs(reference_coord_sys)
highway_geometries["length"] = highway_geometries.geometry.length
segments_gdf = pd2gpd(pd.read_csv("data/highway_segments.csv"))
plot_highway_geometries = highway_geometries.to_crs("EPSG:3857")
# plot_austrian_border = austrian_border.to_crs("EPSG:3857")
plot_highway_geometries["null"] = [0] * len(plot_highway_geometries)
def merge_with_geom(results, energy):
filtered_results = results[results[col_type] == "ra"]
# osm geometries
rest_areas = pd2gpd(
pd.read_csv("data/projected_ras.csv"), geom_col_name="centroid"
).sort_values(by=["on_segment", "dist_along_highway"])
rest_areas["segment_id"] = rest_areas["on_segment"]
rest_areas[col_type_ID] = rest_areas["nb"]
rest_areas[col_directions] = rest_areas["evaluated_dir"]
# merge here
results_and_geom_df = pd.merge(
filtered_results, rest_areas, on=[col_segment_id, col_type_ID, col_directions]
)
# turn into GeoDataframe
results_and_geom_df["geometry"] = results_and_geom_df.centroid
results_and_geom_df["total_charging_pole_number"] = np.where(
np.array(results_and_geom_df.pYi_dir) == 0,
np.nan,
np.array(results_and_geom_df.pYi_dir),
)
results_and_geom_df = gpd.GeoDataFrame(
results_and_geom_df, crs=reference_coord_sys, geometry="geometry"
)
results_and_geom_df["charging_capacity"] = (
results_and_geom_df["total_charging_pole_number"] * energy
)
# plot
plot_results_and_geom_df = results_and_geom_df.to_crs("EPSG:3857")
# plot_results_and_geom_df = plot_results_and_geom_df[
# plot_results_and_geom_df.total_charging_pole_number > 0
# ]
plot_results_and_geom_df["x"] = plot_results_and_geom_df.geometry.x
plot_results_and_geom_df["y"] = plot_results_and_geom_df.geometry.y
return plot_results_and_geom_df
plot_sc_1 = merge_with_geom(val, charging_capacity)
merged = pd.merge(
plot_sc_1, existing_infr, how="left", on=["segment_id", "name", "dir"]
)
results = []
# range_max = 1400
# range_min = 200
# step_size = 100
range_max = 1400
range_min = 200
step_size = 100
ranges = np.arange(range_min, range_max + step_size, step_size)
# create a dataframe with all Y values with columns of "100", "200", ...
base_df = pd.read_csv(path_SA_driving_range + list_of_paths_SA_driving_range[0])
df = pd.DataFrame()
# df['locations'] = base_df.POI_ID
nb_x = []
df["POI_ID"] = base_df["POI_ID"]
for ij in range(0, len(ranges)):
temp_df = pd.read_csv(path_SA_driving_range + list_of_paths_SA_driving_range[ij])
df[ranges[ij]] = temp_df.pXi
nb_x.append(temp_df.pXi.sum())
# nb_x.append(nb_x[-1])
df_to_plot = df.replace(0, np.nan)
merge_2 = | pd.merge(df_to_plot, merged, how="left", on=["POI_ID"]) | pandas.merge |
import sys, os
import unittest
import pandas as pd
import numpy
import sys
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, Imputer, LabelEncoder, LabelBinarizer, MinMaxScaler, MaxAbsScaler, RobustScaler,\
Binarizer, PolynomialFeatures, OneHotEncoder, KBinsDiscretizer
from sklearn_pandas import CategoricalImputer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR, OneClassSVM
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.naive_bayes import GaussianNB
from sklearn_pandas import DataFrameMapper
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor, RandomForestClassifier,\
RandomForestRegressor, IsolationForest
from sklearn.linear_model import LinearRegression, LogisticRegression, RidgeClassifier, SGDClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.gaussian_process import GaussianProcessClassifier
from nyoka.preprocessing import Lag
from nyoka import skl_to_pmml
from nyoka import PMML44 as pml
class TestMethods(unittest.TestCase):
def test_sklearn_01(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "svc_pmml.pmml"
model = SVC()
pipeline_obj = Pipeline([
('svm',model)
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,f_name)
pmml_obj = pml.parse(f_name,True)
## 1
svms = pmml_obj.SupportVectorMachineModel[0].SupportVectorMachine
for mod_val, recon_val in zip(model.intercept_, svms):
self.assertEqual("{:.16f}".format(mod_val), "{:.16f}".format(recon_val.Coefficients.absoluteValue))
## 2
svm = pmml_obj.SupportVectorMachineModel[0]
self.assertEqual(svm.RadialBasisKernelType.gamma,model._gamma)
def test_sklearn_02(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "knn_pmml.pmml"
pipeline_obj = Pipeline([
('scaling',StandardScaler()),
('knn',KNeighborsClassifier(n_neighbors = 5))
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,f_name)
pmml_obj = pml.parse(f_name,True)
##1
self.assertIsNotNone(pmml_obj.NearestNeighborModel[0].ComparisonMeasure.euclidean)
##2
self.assertEqual(pmml_obj.NearestNeighborModel[0].ComparisonMeasure.kind, "distance")
##3
self.assertEqual(pipeline_obj.steps[-1][-1].n_neighbors, pmml_obj.NearestNeighborModel[0].numberOfNeighbors)
def test_sklearn_03(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "rf_pmml.pmml"
model = RandomForestClassifier(n_estimators = 100)
pipeline_obj = Pipeline([
("mapping", DataFrameMapper([
(['sepal length (cm)', 'sepal width (cm)'], StandardScaler()) ,
(['petal length (cm)', 'petal width (cm)'], Imputer())
])),
("rfc", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, f_name)
pmml_obj = pml.parse(f_name,True)
## 1
self.assertEqual(model.n_estimators,pmml_obj.MiningModel[0].Segmentation.Segment.__len__())
##2
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.multipleModelMethod, "majorityVote")
def test_sklearn_04(self):
titanic = pd.read_csv("nyoka/tests/titanic_train.csv")
features = titanic.columns
target = 'Survived'
f_name = "gb_pmml.pmml"
pipeline_obj = Pipeline([
("imp", Imputer(strategy="median")),
("gbc", GradientBoostingClassifier(n_estimators = 10))
])
pipeline_obj.fit(titanic[features],titanic[target])
skl_to_pmml(pipeline_obj, features, target, f_name)
pmml_obj = pml.parse(f_name,True)
##1
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.multipleModelMethod, "modelChain")
##2
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.Segment.__len__(), 2)
##3
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.Segment[1].RegressionModel.normalizationMethod, "logit")
def test_sklearn_05(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg'],axis=1)
y = df['mpg']
features = [name for name in df.columns if name not in ('mpg')]
target = 'mpg'
pipeline_obj = Pipeline([
('mapper', DataFrameMapper([
('car name', TfidfVectorizer())
])),
('model',DecisionTreeRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"dtr_pmml.pmml")
self.assertEqual(os.path.isfile("dtr_pmml.pmml"),True)
def test_sklearn_06(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
f_name = "linearregression_pmml.pmml"
model = LinearRegression()
pipeline_obj = Pipeline([
('model',model)
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,f_name)
pmml_obj = pml.parse(f_name, True)
## 1
reg_tab = pmml_obj.RegressionModel[0].RegressionTable[0]
self.assertEqual(reg_tab.intercept,model.intercept_)
## 2
for model_val, pmml_val in zip(model.coef_, reg_tab.NumericPredictor):
self.assertEqual("{:.16f}".format(model_val),"{:.16f}".format(pmml_val.coefficient))
def test_sklearn_07(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "logisticregression_pmml.pmml"
model = LogisticRegression()
pipeline_obj = Pipeline([
("mapping", DataFrameMapper([
(['sepal length (cm)', 'sepal width (cm)'], StandardScaler()) ,
(['petal length (cm)', 'petal width (cm)'], Imputer())
])),
("lr", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, f_name)
pmml_obj = pml.parse(f_name,True)
## 1
segmentation = pmml_obj.MiningModel[0].Segmentation
self.assertEqual(segmentation.Segment.__len__(), model.classes_.__len__()+1)
## 2
self.assertEqual(segmentation.multipleModelMethod, "modelChain")
##3
self.assertEqual(segmentation.Segment[-1].RegressionModel.normalizationMethod, "simplemax")
##4
for i in range(model.classes_.__len__()):
self.assertEqual(segmentation.Segment[i].RegressionModel.normalizationMethod, "logit")
self.assertEqual("{:.16f}".format(model.intercept_[i]),\
"{:.16f}".format(segmentation.Segment[i].RegressionModel.RegressionTable[0].intercept))
def test_sklearn_08(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = [i%2 for i in range(iris.data.shape[0])]
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
('pca',PCA(2)),
('mod',LogisticRegression())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "logisticregression_pca_pmml.pmml")
self.assertEqual(os.path.isfile("logisticregression_pca_pmml.pmml"),True)
def test_sklearn_09(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("SGD", SGDClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "sgdclassifier_pmml.pmml")
self.assertEqual(os.path.isfile("sgdclassifier_pmml.pmml"),True)
def test_sklearn_10(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("lsvc", LinearSVC())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "linearsvc_pmml.pmml")
self.assertEqual(os.path.isfile("linearsvc_pmml.pmml"),True)
def test_sklearn_11(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',LinearSVR())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"linearsvr_pmml.pmml")
self.assertEqual(os.path.isfile("linearsvr_pmml.pmml"),True)
def test_sklearn_12(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',GradientBoostingRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"gbr.pmml")
self.assertEqual(os.path.isfile("gbr.pmml"),True)
def test_sklearn_13(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("SGD", DecisionTreeClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "dtr_clf.pmml")
self.assertEqual(os.path.isfile("dtr_clf.pmml"),True)
def test_sklearn_14(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',RandomForestRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"rfr.pmml")
self.assertEqual(os.path.isfile("rfr.pmml"),True)
def test_sklearn_15(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',KNeighborsRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"knnr.pmml")
self.assertEqual(os.path.isfile("knnr.pmml"),True)
def test_sklearn_16(self):
df = | pd.read_csv('nyoka/tests/auto-mpg.csv') | pandas.read_csv |
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from numpy.polynomial.polynomial import polyfit
from scipy.stats import shapiro
from scipy.stats import ttest_ind as tt
from scipy.stats import spearmanr as corrp
import numpy as np
from statsmodels.graphics.gofplots import qqplot
font = {'family' : 'sans-serif',
'weight' : 'light',
'size' : 16}
matplotlib.rc('font', **font)
bad_indices=[]
sr_data=pd.read_csv('self_report_study2.csv') #load self-report data
mb_agnostic=pd.read_csv('mb_scores_rares_empirical_best.csv')
mb_scores=mb_agnostic['MB_behav']
state_t1=pd.read_csv('Gillan_TL_full_lrT.csv',header=None) #load state transition lrs
state_t=pd.read_csv('Gillan_Or_full_lrT_decay.csv',header=None) #load state transition lrs
print(len(state_t1))
r,p=corrp(state_t1[0],state_t[0])
print('CORREL ST TL both models : {}, p {}'.format(r,p))
it_mb=pd.read_csv('Gillan_Or_full_MB_decay.csv',header=None) #load MB beta
# it_mb=np.log(it_mb)
mf1= | pd.read_csv('Gillan_Or_full_MF1_decay.csv',header=None) | pandas.read_csv |
import argparse
import utils
import pandas as pd
import time
import os
parser = argparse.ArgumentParser(prog='cleaner',
description="Parser of cleaning script")
parser.add_argument(
'--data_path', help='Provide Full path of data.', type=str)
parser.add_argument(
'--filename', help="file name of cleaned data. Don't include .csv. default= extracted", type=str, default='extracted')
parser.add_argument(
'--handle_emojies', help='How to handle emojies. [remove] to remove emojies. [emoticon] to keep emoticon. [keep] to keep emojies، default=[emoticon]', type=str, default='emoticon')
args = parser.parse_args()
data_path = args.data_path
handle_emojies = args.handle_emojies
filename = args.filename
if not os.path.isfile(data_path):
raise ValueError(f'file {data_path} not found')
def clean_data():
start_time = time.time()
data = | pd.read_csv(data_path, header=0) | pandas.read_csv |
# ------------------
# this module, grid.py, deals with calculations of all microbe-related activites on a spatial grid with a class, Grid().
# by <NAME>
# ------------------
import numpy as np
import pandas as pd
from microbe import microbe_osmo_psi
from microbe import microbe_mortality_prob as MMP
from enzyme import Arrhenius, Allison
from monomer import monomer_leaching
from utility import expand
class Grid():
"""
This class holds all variables related to microbe, substrate, monomer, and enzyme over the spatial grid.
Accepts returns from the module 'initialization.py' and includes methods as follows:
1) degrdation(): explicit substrate degradation
2) uptake(): explicit monomers uptake
3) metabolism(): cellular processes and emergent CUE and respiration
4) mortality(): determine mortality of microbial cells based on mass thresholds
5) reproduction(): compute cell division and dispersal
6) repopulation(): resample taxa from the microbial pool and place them on the grid
Coding philosophy:
Each method starts with passing some global variables to local ones and creating
some indices facilitating dataframe index/column processing and ends up with updating
state variables and passing them back to the global ones. All computation stays in between.
Reminder:
Keep a CLOSE EYE on the indexing throughout the matrix/dataframe operations
"""
def __init__(self,runtime,data_init):
"""
The constructor of Grid class.
Parameters:
runtime: user-specified parameters
data_init: dictionary;initialized data from the module 'initialization.py'
"""
self.cycle = int(runtime.loc['end_time',1])
self.gridsize = int(runtime.loc['gridsize',1])
self.n_taxa = int(runtime.loc["n_taxa",1])
self.n_substrates = int(runtime.loc["n_substrates",1])
self.n_enzymes = int(runtime.loc["n_enzymes",1])
self.n_monomers = self.n_substrates + 2
#Degradation
#self.Substrates_init = data_init['Substrates'] # Substrates initialized
self.Substrates = data_init['Substrates'].copy(deep=True) # Substrates;df; w/ .copy() avoiding mutation
self.SubInput = data_init['SubInput'] # Substrate inputs
#self.Enzymes_init = data_init['Enzymes'] # Initial pool of Enzymes
self.Enzymes = data_init['Enzymes'].copy(deep=True) # Enzymes
self.ReqEnz = data_init['ReqEnz'] # Enzymes required by each substrate
self.Ea = data_init['Ea'] # Enzyme activatin energy
self.Vmax0 = data_init['Vmax0'] # Max. reaction speed
self.Km0 = data_init['Km0'] # Half-saturation constant
self.SubstrateRatios = np.float32('nan') # Substrate stoichiometry
self.DecayRates = np.float32('nan') # Substrate decay rate
#Uptake
#self.Microbes_init = data_init['Microbes_pp'] # microbial community before placement
self.Microbes = data_init['Microbes'].copy(deep=True) # microbial community after placement
#self.Monomers_init = data_init['Monomers'] # Monomers initialized
self.Monomers = data_init['Monomers'].copy(deep=True) # Monomers
self.MonInput = data_init['MonInput'] # Inputs of monomers
self.Uptake_Ea = data_init['Uptake_Ea'] # transporter enzyme Ea
self.Uptake_Vmax0 = data_init['Uptake_Vmax0'] # transporter Vmax
self.Uptake_Km0 = data_init['Uptake_Km0'] # transporter Km
self.Monomer_ratios = data_init['Monomer_ratio'].copy(deep=True) # monomer stoichiometry
self.Uptake_ReqEnz = data_init['Uptake_ReqEnz'] # Enzymes required by monomers
self.Uptake_Enz_Cost = data_init['UptakeGenesCost'] # Cost of encoding each uptake gene
self.Taxon_Uptake_C = np.float32('nan') # taxon uptake of C
self.Taxon_Uptake_N = np.float32('nan') # taxon uptake of N
self.Taxon_Uptake_P = np.float32('nan') # taxon uptake of P
#Metabolism
self.Consti_Enzyme_C = data_init["EnzProdConstit"] # C cost of encoding constitutive enzyme
self.Induci_Enzyme_C = data_init["EnzProdInduce"] # C Cost of encoding inducible enzyme
self.Consti_Osmo_C = data_init['OsmoProdConsti'] # C Cost of encoding constitutive osmolyte
self.Induci_Osmo_C = data_init['OsmoProdInduci'] # C Cost of encoding inducible osmolyte
self.Uptake_Maint_Cost = data_init['Uptake_Maint_cost'] # Respiration cost of uptake transporters: 0.01 mg C transporter-1 day-1
self.Enz_Attrib = data_init['EnzAttrib'] # Enzyme attributes; dataframe
self.AE_ref = data_init['AE_ref'] # Reference AE:constant of 0.5;scalar
self.AE_temp = data_init['AE_temp'] # AE sensitivity to temperature;scalar
self.Respiration = np.float32('nan') # Respiration
self.CUE_system = np.float32('nan') # emergent CUE
#self.Transporters = float('nan')
#self.Osmolyte_Con = float('nan')
#self.Osmolyte_Ind = float('nan')
#self.Enzyme_Con = float('nan')
#self.Enzyme_Ind = float('nan')
#self.CUE_Taxon = float('nan')
#self.Growth_Yield = float('nan')
#Mortality
self.MinRatios = data_init['MinRatios'] # minimal cell quotas
self.C_min = data_init['C_min'] # C threshold value of living cell
self.N_min = data_init['N_min'] # N threshold value of living cell
self.P_min = data_init['P_min'] # P threshold value of living cell
self.basal_death_prob = data_init['basal_death_prob'] # basal death probability of microbes
self.death_rate = data_init['death_rate'] # change rate of mortality with water potential
self.tolerance = data_init['TaxDroughtTol'] # taxon drought tolerance
self.wp_fc = data_init['wp_fc'] # scalar; max threshold value of water potential
self.wp_th = data_init['wp_th'] # scalar; min threshold value of water potential
self.alpha = data_init['alpha'] # scalar; moisture sensitivity; 1
self.Kill = np.float32('nan') # total number of cells stochastically killed
# Reproduction
self.fb = data_init['fb'] # index of fungal taxa (=1)
self.max_size_b = data_init['max_size_b'] # threshold of cell division
self.max_size_f = data_init['max_size_f'] # threshold of cell division
self.x = int(runtime.loc['x',1]) # x dimension of grid
self.y = int(runtime.loc['y',1]) # y dimension of grid
self.dist = int(runtime.loc['dist',1]) # maximum dispersal distance: 1 cell
self.direct = int(runtime.loc['direct',1]) # dispersal direction: 0.95
# Climate data
self.temp = data_init['Temp'] # series; temperature
self.psi = data_init['Psi'] # series; water potential
# Global constants
self.Km_Ea = np.float32(20) # kj mol-1;activation energy for both enzyme and transporter
self.Tref = np.float32(293) # reference temperature of 20 celcius
# tradeoff
self.Taxon_Enzyme_Cost_C = np.float32('nan')
self.Taxon_Osmo_Cost_C = np.float32('nan')
self.Microbe_C_Gain = np.float32('nan')
def degradation(self,day):
"""
Explicit degradation of different substrates.
Calculation procedure:
1. Determine substates pool: incl. inputs
2. Compute Vmax & Km and make them follow the index of Substrates
3. Follow the Michaelis-Menten equation to compute full degradation rate
4. Impose the substrate-required enzymes upon the full degradation rate
5. Adjust cellulose rate with LCI(lignocellulose index)
"""
# constant of lignocellulose index--LCI
LCI_slope = np.float32(-0.8)
# Substrates index by subtrate names
Sub_index = self.Substrates.index
# Calculate total mass of each substrate (C+N+P) and derive substrate stoichiometry
rss = self.Substrates.sum(axis=1)
SubstrateRatios = self.Substrates.divide(rss,axis=0)
SubstrateRatios = SubstrateRatios.fillna(0) # NOTE:ensure NA(b/c of 0/0 in df) = 0
# Arrhenius equation for Vmax and Km multiplied by exponential decay for Psi sensitivity
Vmax = Arrhenius(self.Vmax0, self.Ea, self.temp[day]) * Allison(0.05, self.wp_fc, self.psi[day]) # Vmax: (enz*gridsize) * sub
Km = Arrhenius(self.Km0, self.Km_Ea, self.temp[day]) # Km: (sub*gridsize) * enz
# Multiply Vmax by enzyme concentration
tev_transition = Vmax.mul(self.Enzymes,axis=0) # (enz*gridsize) * sub
tev_transition.index = [np.arange(self.gridsize).repeat(self.n_enzymes),tev_transition.index] # create a MultiIndex
tev = tev_transition.stack().unstack(1).reset_index(level=0,drop=True) # (sub*gridsize) * enz
tev = tev[Km.columns] # ensure to re-order the columns b/c of python's default alphabetical ordering
# Michaelis-Menten equation
Decay = tev.mul(rss,axis=0)/Km.add(rss,axis=0)
# Pull out each batch of required enzymes and sum across redundant enzymes
batch1 = (self.ReqEnz.loc['set1'].values * Decay).sum(axis=1)
#batch2 = (self.ReqEnz.loc['set2'].values * Decay).sum(axis=1)
# Assess the rate-limiting enzyme and set decay to that rate
#DecaySums = pd.concat([batch1, batch2],axis=1)
#DecayRates0 = DecaySums.min(axis=1, skipna=True)
# Compare to substrate available and take the min, allowing for a tolerance of 1e-9
DecayRates = pd.concat([batch1,rss],axis=1,sort=False).min(axis=1,skipna=True)
# Adjust cellulose rate by linking cellulose degradation to lignin concentration (LCI)
ss7 = self.Substrates.loc[Sub_index=='Lignin'].sum(axis=1).values
DecayRates.loc[Sub_index=='Cellulose'] *= np.float32(1) + (ss7/(ss7 + self.Substrates.loc[Sub_index=='Cellulose','C'])) * LCI_slope
# Update Substrates Pool by removing decayed C, N, & P. Depending on specific needs, adding inputs of substrates can be done here
self.Substrates -= SubstrateRatios.mul(DecayRates,axis=0) #+ self.SubInput
# Pass these two back to the global variables to be used in the next method
self.SubstrateRatios = SubstrateRatios
self.DecayRates = DecayRates
def uptake(self,day):
"""
Explicit uptake of different monomers by transporters following the Michaelis-Menten equation.
Calculaton procedure:
1. Average monomers across the grid:
2. Determine pool of monomers: add degradation and input, update stoichimoetry
3. Maximum uptake:
4. Uptake by Monomer:
5. Uptake by Taxon:
"""
# Every monomer averaged over the grid in each time step
self.Monomers = expand(self.Monomers.groupby(level=0,sort=False).sum()/self.gridsize,self.gridsize)
# Indices
is_org = (self.Monomers.index != "NH4") & (self.Monomers.index != "PO4") # organic monomers
#is_mineral = (Monomers.index == "NH4") | (Monomers.index == "PO4")
# Update monomer ratios in each time step with organic monomers following the substrates
self.Monomer_ratios[is_org] = self.SubstrateRatios.values
# Determine monomer pool from decay and input
# Organic monomers derived from substrate-decomposition
Decay_Org = self.Monomer_ratios[is_org].mul(self.DecayRates.values,axis=0)
# inputs of organic and mineral monomers
#Input_Org = MR_transition[is_org].mul(self.MonInput[is_org].tolist(),axis=0)
#Input_Mineral = MR_transition[is_mineral].mul((self.MonInput[is_mineral]).tolist(),axis=0)
# Monomer pool determined
self.Monomers.loc[is_org] += Decay_Org #+ Input_Org
#self.Monomers.loc[is_mineral] += Input_Mineral
# Get the total mass of each monomer: C+N+P
rsm = self.Monomers.sum(axis=1)
# Recalculate monomer ratios after updating monomer pool and before uptake calculation
self.Monomer_ratios.loc[is_org] = self.Monomers.loc[is_org].divide(rsm[is_org],axis=0)
self.Monomer_ratios = self.Monomer_ratios.fillna(0)
# Start calculating monomer uptake
# Caculate uptake enzyme kinetic parameters, multiplied by moisture multiplier accounting for the diffusivity implications
Uptake_Vmax = Arrhenius(self.Uptake_Vmax0, self.Uptake_Ea, self.temp[day]) * Allison(0.1, self.wp_fc, self.psi[day])
Uptake_Km = Arrhenius(self.Uptake_Km0, self.Km_Ea, self.temp[day])
# Equation for hypothetical potential uptake (per unit of compatible uptake protein)
Potential_Uptake = (self.Uptake_ReqEnz * Uptake_Vmax).mul(rsm.values,axis=0)/Uptake_Km.add(rsm.values,axis=0)
# Derive the mass of each transporter of each taxon NOTE: transpose the df to Upt*(Taxa*grid)
MicCXGenes = (self.Uptake_Enz_Cost.mul(self.Microbes.sum(axis=1),axis=0)).T
# Define Max_Uptake: (Monomer*gridsize) * Taxon
Max_Uptake_array = np.zeros((self.n_monomers*self.gridsize,self.n_taxa), dtype='float32')
Max_Uptake = pd.DataFrame(data=Max_Uptake_array, index=self.Monomers.index, columns=self.Microbes.index[0:self.n_taxa])
# Matrix multiplication to get max possible uptake by monomer(extract each grid point separately for operation)
for i in range(self.gridsize):
i_monomer = np.arange(i * self.n_monomers, (i+1) * self.n_monomers)
i_taxa = np.arange(i * self.n_taxa, (i+1) * self.n_taxa)
Max_Uptake.iloc[i_monomer,:] = Potential_Uptake.iloc[i_monomer,:].values @ MicCXGenes.iloc[:,i_taxa].values
# Take the min of the monomer available and the max potential uptake, and scale the uptake to what's available
csmu = Max_Uptake.sum(axis=1) # total potential uptake of each monomer
Uptake = Max_Uptake.mul(pd.concat([csmu,rsm],axis=1).min(axis=1,skipna=True)/csmu,axis=0) #(Monomer*gridsize) * Taxon
Uptake.loc[csmu==0] = np.float32(0)
# End computing monomer uptake
# Update Monomers
# By monomer: total uptake (monomer*gridsize) * 3(C-N-P)
self.Monomers -= self.Monomer_ratios.mul(Uptake.sum(axis=1),axis=0)
# Derive Taxon-specific total uptake of C, N, & P
# By taxon: total uptake; (monomer*gridsize) * taxon
C_uptake_df = Uptake.mul(self.Monomer_ratios["C"],axis=0)
N_uptake_df = Uptake.mul(self.Monomer_ratios["N"],axis=0)
P_uptake_df = Uptake.mul(self.Monomer_ratios["P"],axis=0)
# generic multi-index
C_uptake_df.index = N_uptake_df.index = P_uptake_df.index = [np.arange(self.gridsize).repeat(self.n_monomers),C_uptake_df.index]
TUC_df = C_uptake_df.groupby(level=[0]).sum()
TUN_df = N_uptake_df.groupby(level=[0]).sum()
TUP_df = P_uptake_df.groupby(level=[0]).sum()
# Update these 3 global variables
self.Taxon_Uptake_C = TUC_df.stack().values # spatial C uptake: array
self.Taxon_Uptake_N = TUN_df.stack().values # spatial N uptake: array
self.Taxon_Uptake_P = TUP_df.stack().values # spatial P uptake: array
def metabolism(self,day):
"""
Explicitly calculate intra-cellular production of metabolites.
Handles both constitutive (standing biomass) and inducible (immediate monomers uptake) pathways following:
1. constitutive enzyme and osmolyte production
2. inducible enzyme and osmolyte production
3. emergent CUE & Respiration
4. update both Enzymes (with production & loss) and Substrates (with dead enzymes)
"""
# Constants
Osmo_N_cost = np.float32(0.3) # N cost per unit of osmo-C production
Osmo_Maint_cost = np.float32(5.0) # C loss per unit of osmo-C production
Enzyme_Loss_Rate = np.float32(0.04) # enzyme turnover rate(=0.04; Allison 2006)
# index of dead enzyme in Substrates
is_deadEnz = self.Substrates.index == "DeadEnz"
#---------------------------------------------------------------------#
#......................constitutive processes.........................#
#---------------------------------------------------------------------#
# Variable Acronyms:
# OECCN : Osmo_Enzyme_Consti_Cost_N
# ARROEC: Avail_Req_ratio_osmo_enzyme_consti
# MNAOEC: Min_N_Avail_Osmo_Enzyme_Consti
#...............................................
# Taxon-specific respiration cost of producing transporters: self.uptake_maint_cost = 0.01
# NOTE Microbes['C'],as opposed to Microbes.sum(axis=1) in DEMENT
Taxon_Transporter_Maint = self.Uptake_Enz_Cost.mul(self.Microbes['C'],axis=0).sum(axis=1) * self.Uptake_Maint_Cost
# Osmolyte before adjustment
Taxon_Osmo_Consti = self.Consti_Osmo_C.mul(self.Microbes['C'],axis=0)
Taxon_Osmo_Consti_Cost_N = (Taxon_Osmo_Consti * Osmo_N_cost).sum(axis=1)
# Enzyme before adjustment
Taxon_Enzyme_Consti = self.Consti_Enzyme_C.mul(self.Microbes['C'],axis=0)
Taxon_Enzyme_Consti_Cost_N = (Taxon_Enzyme_Consti.mul(self.Enz_Attrib['N_cost'],axis=1)).sum(axis=1)
# Adjust osmolyte & enzyme production based on available N in microbial biomass
OECCN = Taxon_Osmo_Consti_Cost_N + Taxon_Enzyme_Consti_Cost_N # Total N cost
MNAOEC = (pd.concat([OECCN[OECCN>0],self.Microbes['N'][OECCN>0]],axis=1)).min(axis=1,skipna=True) # get the minimum value
ARROEC = (MNAOEC/OECCN[OECCN>0]).fillna(0) # Derive ratio of availabe N to required N
# Osmolyte adjusted
Taxon_Osmo_Consti[OECCN>0] = Taxon_Osmo_Consti[OECCN>0].mul(ARROEC,axis=0) # adjusted osmolyte
Taxon_Osmo_Consti_Maint = (Taxon_Osmo_Consti * Osmo_Maint_cost).sum(axis=1) # maintenece
Taxon_Osmo_Consti_Cost_N = (Taxon_Osmo_Consti * Osmo_N_cost).sum(axis=1) # N cost (no P)
Taxon_Osmo_Consti_Cost_C = Taxon_Osmo_Consti.sum(axis=1) + Taxon_Osmo_Consti_Maint # total C consumption
# Enzyme adjusted
Taxon_Enzyme_Consti.loc[OECCN>0] = Taxon_Enzyme_Consti.loc[OECCN>0].mul(ARROEC,axis=0) # adjusted enzyme
Taxon_Enzyme_Consti_Maint = (Taxon_Enzyme_Consti.mul(self.Enz_Attrib['Maint_cost'],axis=1)).sum(axis=1) # maintinence
Taxon_Enzyme_Consti_Cost_N = (Taxon_Enzyme_Consti.mul(self.Enz_Attrib['N_cost'], axis=1)).sum(axis=1) # N cost
Taxon_Enzyme_Consti_Cost_P = (Taxon_Enzyme_Consti.mul(self.Enz_Attrib['P_cost'], axis=1)).sum(axis=1) # P cost
Taxon_Enzyme_Consti_Cost_C = Taxon_Enzyme_Consti.sum(axis=1) + Taxon_Enzyme_Consti_Maint # C cost (total)
#---------------------------------------------------------------------#
#.....Inducible processes.............................................#
#---------------------------------------------------------------------#
# Variable Acronyms:
# OEICN : Osmo_Enzyme_Induci_Cost_N
# OEIAN : Osmo_Enzyme_Induci_Avail_N
# ARROEI: Avail_Req_ratio_osmo_enzyme_induci
# MNAOEI: Min_N_Avail_Osmo_Enzyme_Induci
#..................................................
# Assimilation efficiency constrained by temperature
Taxon_AE = self.AE_ref + (self.temp[day] - (self.Tref - np.float32(273))) * self.AE_temp #scalar
# Taxon growth respiration
Taxon_Growth_Respiration = self.Taxon_Uptake_C * (np.float32(1) - Taxon_AE)
# derive the water potential modifier by calling the function microbe_osmo_psi()
f_psi = microbe_osmo_psi(self.alpha,self.wp_fc,self.psi[day])
# Inducible Osmolyte production only when psi reaches below wp_fc
Taxon_Osmo_Induci = self.Induci_Osmo_C.mul(self.Taxon_Uptake_C*Taxon_AE, axis=0) * f_psi
Taxon_Osmo_Induci_Cost_N = (Taxon_Osmo_Induci * Osmo_N_cost).sum(axis=1) # Total osmotic N cost of each taxon (.sum(axis=1))
# Inducible enzyme production
Taxon_Enzyme_Induci = self.Induci_Enzyme_C.mul(self.Taxon_Uptake_C*Taxon_AE, axis=0)
Taxon_Enzyme_Induci_Cost_N = (Taxon_Enzyme_Induci.mul(self.Enz_Attrib['N_cost'],axis=1)).sum(axis=1) # Total enzyme N cost of each taxon (.sum(axis=1))
# Adjust production based on N availabe
OEICN = Taxon_Osmo_Induci_Cost_N + Taxon_Enzyme_Induci_Cost_N # Total N cost of osmolyte and enzymes
OEIAN = pd.Series(data=self.Taxon_Uptake_N, index=self.Microbes.index) # N available
MNAOEI = (pd.concat([OEICN[OEICN>0],OEIAN[OEICN>0]],axis=1)).min(axis=1,skipna=True) # Get the minimum value by comparing N cost to N available
ARROEI = (MNAOEI/OEICN[OEICN>0]).fillna(0) # Ratio of Available to Required
# Osmolyte adjusted: accompanying maintenence and N cost
Taxon_Osmo_Induci[OEICN>0] = Taxon_Osmo_Induci.loc[OEICN>0].mul(ARROEI,axis=0)
Taxon_Osmo_Induci_Maint = (Taxon_Osmo_Induci * Osmo_Maint_cost).sum(axis=1)
Taxon_Osmo_Induci_Cost_N = (Taxon_Osmo_Induci * Osmo_N_cost).sum(axis=1)
Taxon_Osmo_Induci_Cost_C = Taxon_Osmo_Induci.sum(axis=1) + Taxon_Osmo_Induci_Maint
# Enzyme adjusted: Total enzyme carbon cost (+ CO2 loss), N cost, and P cost for each taxon
Taxon_Enzyme_Induci[OEICN>0] = Taxon_Enzyme_Induci.loc[OEICN>0].mul(ARROEI,axis=0)
Taxon_Enzyme_Induci_Maint = (Taxon_Enzyme_Induci.mul(self.Enz_Attrib["Maint_cost"],axis=1)).sum(axis=1)
Taxon_Enzyme_Induci_Cost_N = (Taxon_Enzyme_Induci.mul(self.Enz_Attrib["N_cost"], axis=1)).sum(axis=1)
Taxon_Enzyme_Induci_Cost_P = (Taxon_Enzyme_Induci.mul(self.Enz_Attrib["P_cost"], axis=1)).sum(axis=1)
Taxon_Enzyme_Induci_Cost_C = Taxon_Enzyme_Induci.sum(axis=1) + Taxon_Enzyme_Induci_Maint
# Derive C, N, & P deposited as biomass from Uptake; ensure no negative values
Microbe_C_Gain = self.Taxon_Uptake_C - Taxon_Growth_Respiration - Taxon_Enzyme_Induci_Cost_C - Taxon_Osmo_Induci_Cost_C
Microbe_N_Gain = self.Taxon_Uptake_N - Taxon_Enzyme_Induci_Cost_N - Taxon_Osmo_Induci_Cost_N
Microbe_P_Gain = self.Taxon_Uptake_P - Taxon_Enzyme_Induci_Cost_P
self.Taxon_Enzyme_Cost_C = Taxon_Enzyme_Induci_Cost_C + Taxon_Enzyme_Consti_Cost_C
self.Taxon_Osmo_Cost_C = Taxon_Osmo_Induci_Cost_C + Taxon_Osmo_Consti_Cost_C
self.Microbe_C_Gain = Microbe_C_Gain - Taxon_Enzyme_Consti_Cost_C - Taxon_Osmo_Consti_Cost_C - Taxon_Transporter_Maint
#------------------------------------------------#
#...............Integration......................#
#------------------------------------------------#
# Update Microbial pools with GAINS (from uptake) and LOSSES (from constitutive production)
self.Microbes.loc[:,'C'] += Microbe_C_Gain - Taxon_Enzyme_Consti_Cost_C - Taxon_Osmo_Consti_Cost_C - Taxon_Transporter_Maint
self.Microbes.loc[:,'N'] += Microbe_N_Gain - Taxon_Enzyme_Consti_Cost_N - Taxon_Osmo_Consti_Cost_N
self.Microbes.loc[:,'P'] += Microbe_P_Gain - Taxon_Enzyme_Consti_Cost_P
self.Microbes[self.Microbes<0] = np.float32(0) # avoid negative values
# Taxon-specific emergent CUE
#CUE_taxon = Microbes['C'].copy() # create a dataframe and set all vals to 0
#CUE_taxon[:] = 0
#pos_uptake_index = self.Taxon_Uptake_C > 0
#CUE_taxon[pos_uptake_index] = Microbe_C_Gain[pos_uptake_index]/self.Taxon_Uptake_C[pos_uptake_index]
# System-level emergent CUE
Taxon_Uptake_C_grid = self.Taxon_Uptake_C.sum(axis=0) # Total C Uptake
if Taxon_Uptake_C_grid == 0:
self.CUE_system = np.float32(0)
else:
self.CUE_system = Microbe_C_Gain.sum(axis=0)/Taxon_Uptake_C_grid
# Respiration from Constitutive + Inducible(NOTE: missing sum(MicLoss[,"C"]) in the Mortality below)
self.Respiration = (
Taxon_Transporter_Maint + Taxon_Growth_Respiration + Taxon_Osmo_Consti_Maint +
Taxon_Osmo_Induci_Maint + Taxon_Enzyme_Consti_Maint + Taxon_Enzyme_Induci_Maint
).sum(axis=0)
# Derive Enzyme production
Taxon_Enzyme_Production = Taxon_Enzyme_Consti + Taxon_Enzyme_Induci # gene-specific prod of enzyme of each taxon: (taxon*gridsize) * enzyme
Taxon_Enzyme_Production.index = [np.arange(self.gridsize).repeat(self.n_taxa),Taxon_Enzyme_Production.index] # create a multi-index
EP_df = Taxon_Enzyme_Production.groupby(level=0).sum() # enzyme-specific production in each grid cell
Enzyme_Production = EP_df.stack().values # 1-D array
# Derive Enzyme turnover
Enzyme_Loss = self.Enzymes * Enzyme_Loss_Rate
# Update Enzyme pools by adding enzymes produced and substracting the 'dead' enzymes
self.Enzymes += Enzyme_Production - Enzyme_Loss
# Update Substrates pools with dead enzymes
DeadEnz_df = pd.concat(
[Enzyme_Loss,
Enzyme_Loss.mul(self.Enz_Attrib['N_cost'].tolist()*self.gridsize,axis=0),
Enzyme_Loss.mul(self.Enz_Attrib['P_cost'].tolist()*self.gridsize,axis=0)],
axis=1
)
DeadEnz_df.index = [np.arange(self.gridsize).repeat(self.n_enzymes), DeadEnz_df.index] # create a multi-index
DeadEnz_gridcell = DeadEnz_df.groupby(level=0).sum() # total dead mass across taxa in each grid cell
self.Substrates.loc[is_deadEnz] += DeadEnz_gridcell.values
def mortality(self,day):
"""
Calculate microbial mortality, and update stoichiometry of the alive and microbial pools.
Kill microbes that are starving deterministically and microbes that are drought intolerant stochastically
Also update Substrates with input from dead microbes, monomers(with leaching loss), and respiration
"""
# Indices
Mic_index = self.Microbes.index
is_DeadMic = self.Substrates.index == 'DeadMic'
is_NH4 = self.Monomers.index == 'NH4'
is_PO4 = self.Monomers.index == 'PO4'
# Reset the index to arabic numerals from taxa series
self.Microbes = self.Microbes.reset_index(drop=True)
MinRatios = self.MinRatios.reset_index(drop=True)
# Create a blank dataframe, Death, having the same structure as Microbes
Death = self.Microbes.copy(deep=True)
Death[:] = np.float32(0)
# Create a series, kill, holding boolean value of False
kill = pd.Series([False]*self.n_taxa*self.gridsize)
# Start to calculate mortality
# --Kill microbes deterministically based on threshold values: C_min: 0.086; N_min:0.012; P_min: 0.002
starve_index = (self.Microbes['C']>0) & ((self.Microbes['C']<self.C_min)|(self.Microbes['N']<self.N_min)|(self.Microbes['P']<self.P_min))
# Index the dead, put them in Death, and set them to 0 in Microbes
Death.loc[starve_index] = self.Microbes[starve_index]
self.Microbes.loc[starve_index] = np.float32(0)
# Index the locations where microbial cells remain alive
mic_index = self.Microbes['C'] > 0
# --Kill microbes stochastically based on mortality prob as a function of water potential and drought tolerance
# call the function MMP:microbe_mortality_psi()
r_death = MMP(self.basal_death_prob,self.death_rate,self.tolerance,self.wp_fc,self.psi[day])
kill.loc[mic_index] = r_death[mic_index] > np.random.uniform(0,1,sum(mic_index)).astype('float32')
# Index the dead, put them in Death, and set them to 0 in Microbes
Death.loc[kill] = self.Microbes[kill]
self.Microbes.loc[kill] = np.float32(0)
# Index locations where microbes remain alive
mic_index = self.Microbes['C']>0
# Calculate the total dead mass (threshold & drought) across taxa in each grid cell
Death_gridcell = Death.groupby(Death.index//self.n_taxa).sum()
# Distinguish between conditions of complete death VS partial death
# All cells die
if sum(mic_index) == 0:
#...Update Substrates pool by adding dead microbial biomass
self.Substrates.loc[is_DeadMic] += Death_gridcell.values
# Partly die and adjust stoichiometry of those remaining alive
else:
# Index only those taxa in Microbes that have below-minimum quotas: Mic_subset
MicrobeRatios = self.Microbes[mic_index].divide(self.Microbes[mic_index].sum(axis=1),axis=0)
mic_index_sub = (MicrobeRatios["C"]<MinRatios[mic_index]["C"])|(MicrobeRatios["N"]<MinRatios[mic_index]["N"])|(MicrobeRatios["P"]<MinRatios[mic_index]["P"])
rat_index = self.Microbes.index.map(mic_index_sub).fillna(False)
# Derive the Microbes wanted
Mic_subset = self.Microbes[rat_index]
StartMicrobes = Mic_subset.copy(deep=True)
# Derive new ratios and Calculate difference between actual and min ratios
MicrobeRatios = Mic_subset.divide(Mic_subset.sum(axis=1),axis=0)
MinRat = MinRatios[rat_index]
Ratio_dif = MicrobeRatios - MinRat
# Create a df recording the ratio differences < 0
Ratio_dif_0 = Ratio_dif.copy(deep=True)
Ratio_dif_0[Ratio_dif>0] = np.float32(0)
# Create a df recording the ratio differences > 0
Excess = Ratio_dif.copy(deep=True)
Excess[Ratio_dif<0] = np.float32(0)
# Determine the limiting nutrient that will be conserved
Limiting = (-Ratio_dif/MinRat).idxmax(axis=1) # Series of index of the first occurrence of maximum in each row
# Set all deficient ratios to their minima
MicrobeRatios[Ratio_dif<0] = MinRat[Ratio_dif<0]
# Reduce the mass fractions for non-deficient elements in proportion to the distance from the minimum
# ....Partition the total deficit to the excess element(s) in proportion to their distances from their minima
MicrobeRatios[Ratio_dif>0] += Excess.mul((Ratio_dif_0.sum(axis=1)/Excess.sum(axis=1)),axis=0)[Ratio_dif>0]
# Construct hypothetical nutrient quotas for each possible minimum nutrient
MC = Mic_subset["C"]
MN = Mic_subset["N"]
MP = Mic_subset["P"]
MRC = MicrobeRatios["C"]
MRN = MicrobeRatios["N"]
MRP = MicrobeRatios["P"]
new_C = | pd.concat([MC, MN*MRC/MRN, MP*MRC/MRP],axis=1) | pandas.concat |
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
IntervalIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameSortIndex:
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame(
{"value": [1, 2, 3, 4]},
index=MultiIndex(
levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame(
{"value": [2, 1, 4, 3]},
index=MultiIndex(
levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_non_existent_label_multiindex(self):
# GH#12261
df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []]))
df.loc["b", "2"] = 1
df.loc["a", "3"] = 1
result = df.sort_index().index.is_monotonic
assert result is True
def test_sort_index_reorder_on_ops(self):
# GH#15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["red", "blu"]],
names=["letter", "size", "color"],
),
columns=["near", "far"],
)
df = df.sort_index()
def my_func(group):
group.index = ["newz", "newa"]
return group
result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index()
expected = MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["newa", "newz"]],
names=["letter", "size", None],
)
tm.assert_index_equal(result.index, expected)
def test_sort_index_nan_multiindex(self):
# GH#14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD"))
s = Series(np.arange(4), index=mi)
df2 = DataFrame(
{
"date": pd.DatetimeIndex(
[
"20121002",
"20121007",
"20130130",
"20130202",
"20130305",
"20121002",
"20121207",
"20130130",
"20130202",
"20130305",
"20130202",
"20130305",
]
),
"user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
"whole_cost": [
1790,
np.nan,
280,
259,
np.nan,
623,
90,
312,
np.nan,
301,
359,
801,
],
"cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12],
}
).set_index(["date", "user_id"])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position="last")
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position="first")
expected = df.iloc[[1, 2, 3, 0], :]
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import os
import pandas as pd
import datetime
import dateutil.parser
class Utils:
def __init__(self):
pass
# given date in synthea format return the year
def getYearFromSyntheaDate(self, date):
return datetime.datetime.strptime(date, "%Y-%m-%d").year
# given date in synthea format return the month
def getMonthFromSyntheaDate(self, date):
return datetime.datetime.strptime(date, "%Y-%m-%d").month
# given date in synthea format return the day
def getDayFromSyntheaDate(self, date):
return datetime.datetime.strptime(date, "%Y-%m-%d").day
# given gender as M or F return the OMOP concept code
def getGenderConceptCode(self, gender):
gendre = gender.upper()
if gender=='M':
return '8507'
elif gender=='F':
return '8532'
else:
return 0
# given synthea race code return omop code
def getRaceConceptCode(self, race):
race = race.upper()
if race=='WHITE':
return '8527'
elif race=='BLACK':
return '8516'
elif race=='ASIAN':
return 8515
else:
return '0'
def getEthnicityConceptCode(self, eth):
eth = eth.upper()
#if race=='HISPANIC' or eth=='CENTRAL_AMERICAN' or eth=='DOMINICAN' or eth=='MEXICAN' or eth=='PUERTO_RICAN' or eth=='SOUTH_AMERICAN':
if eth=='CENTRAL_AMERICAN' or eth=='DOMINICAN' or eth=='MEXICAN' or eth=='PUERTO_RICAN' or eth=='SOUTH_AMERICAN':
return '38003563'
else:
return '0'
# convert a synthea timestamp like 2020-02-16T05:05:49Z to omop datestamp like 2020-02-16
def isoTimestampToDate(self, timestamp):
date = dateutil.parser.parse(timestamp)
return datetime.date.strftime(date, '%Y-%m-%d')
# given a datestamp, return on timestamp with default 0 hour
def getDefaultTimestamp(self, datestamp):
return str(datestamp) + " 00:00:00"
#
def getVisitConcept(self, encounterclass):
if encounterclass == 'emergency' or encounterclass == 'urgentcare':
return '9203'
elif encounterclass == 'ambulatory' or encounterclass == 'wellness' or encounterclass == 'outpatient':
return '9202'
else:
return '0'
#
# check memory usage of a pandas dataframe
#
def mem_usage(self, pandas_obj):
if isinstance(pandas_obj,pd.DataFrame):
usage_b = pandas_obj.memory_usage(deep=True).sum()
else: # we assume if not a df it's a series
usage_b = pandas_obj.memory_usage(deep=True)
usage_mb = usage_b / 1024 ** 2 # convert bytes to megabytes
return "{:03.2f} MB".format(usage_mb)
#
# load the concept vocabulary into dataframes
# can be gz compressed or plain text
#
def loadConceptVocabulary(self, BASE_OMOP_INPUT_DIRECTORY, model_omop):
vocab = {}
vocabfiledict = {}
vocablist = ['CONCEPT', 'CONCEPT_RELATIONSHIP']
# determine if vocabulary files exists and whether they are compressed
for vocabfile in vocablist:
if (os.path.exists(os.path.join(BASE_OMOP_INPUT_DIRECTORY,vocabfile + '.csv'))):
vocabfiledict[vocabfile] = os.path.join(BASE_OMOP_INPUT_DIRECTORY,vocabfile + '.csv')
compression=None
elif (os.path.exists(os.path.join(BASE_OMOP_INPUT_DIRECTORY,vocabfile + '.csv.gz'))):
vocabfiledict[vocabfile] = os.path.join(BASE_OMOP_INPUT_DIRECTORY,vocabfile + '.csv.gz')
compression='gzip'
else:
print("Error: Could not find " + vocabfile + " vocabulary file")
exit(1)
vocab[vocabfile.lower()] = pd.read_csv(vocabfiledict[vocabfile], sep='\t', dtype=model_omop.model_schema[vocabfile.lower()], compression=compression)
return vocab
#
# load the full vocabulary into dataframes
# can be gz compressed or plain text
#
def loadVocabulary(self, BASE_OMOP_INPUT_DIRECTORY, model_omop):
vocab = {}
vocabfiledict = {}
vocablist = ['CONCEPT', 'CONCEPT_RELATIONSHIP', 'CONCEPT_SYNONYM', 'CONCEPT_ANCESTOR', 'CONCEPT_CLASS', 'DRUG_STRENGTH']
# determine if vocabulary files exists and whether they are compressed
for vocabfile in vocablist:
if (os.path.exists(os.path.join(BASE_OMOP_INPUT_DIRECTORY,vocabfile + '.csv'))):
vocabfiledict[vocabfile] = os.path.join(BASE_OMOP_INPUT_DIRECTORY,vocabfile + '.csv')
compression=None
elif (os.path.exists(os.path.join(BASE_OMOP_INPUT_DIRECTORY,vocabfile + '.csv.gz'))):
vocabfiledict[vocabfile] = os.path.join(BASE_OMOP_INPUT_DIRECTORY,vocabfile + '.csv.gz')
compression='gzip'
else:
print("Error: Could not find " + vocabfile + " vocabulary file")
exit(1)
vocab[vocabfile.lower()] = pd.read_csv(vocabfiledict[vocabfile], sep='\t', dtype=model_omop.model_schema[vocabfile.lower()], compression=compression)
return vocab
#
# following standard omop query for source to standard mapping is implemented in python pandas
# passing in a vocabulary dictionary, return the source to standard dataframe
#
# SELECT c.concept_code AS SOURCE_CODE, c.concept_id AS SOURCE_CONCEPT_ID,
# c.concept_name AS SOURCE_CODE_DESCRIPTION,
# c.vocabulary_id AS SOURCE_VOCABULARY_ID, c.domain_id AS SOURCE_DOMAIN_ID,
# c.CONCEPT_CLASS_ID AS SOURCE_CONCEPT_CLASS_ID, c.VALID_START_DATE AS SOURCE_VALID_START_DATE,
# c.VALID_END_DATE AS SOURCE_VALID_END_DATE, c.INVALID_REASON AS SOURCE_INVALID_REASON,
# c1.concept_id AS TARGET_CONCEPT_ID, c1.concept_name AS TARGET_CONCEPT_NAME,
# c1.VOCABULARY_ID AS TARGET_VOCABULARY_ID, c1.domain_id AS TARGET_DOMAIN_ID,
# c1.concept_class_id AS TARGET_CONCEPT_CLASS_ID,
# c1.INVALID_REASON AS TARGET_INVALID_REASON,
# c1.standard_concept AS TARGET_STANDARD_CONCEPT
# FROM CONCEPT C
# JOIN CONCEPT_RELATIONSHIP CR
# ON C.CONCEPT_ID = CR.CONCEPT_ID_1
# AND CR.invalid_reason IS NULL
# AND lower(cr.relationship_id) = 'maps to'
# JOIN CONCEPT C1
# ON CR.CONCEPT_ID_2 = C1.CONCEPT_ID
# AND C1.INVALID_REASON IN (NULL,'')
def sourceToStandardVocabMap(self, vocab, model_omop):
concept = vocab['concept']
concept_relationship = vocab['concept_relationship']
source = concept[model_omop.model_schema['source_to_standard_source'].keys()] # get rid of columns we don't need
source = source.rename(columns=model_omop.model_schema['source_to_standard_source'])
target = concept[model_omop.model_schema['source_to_standard_target'].keys()] # get rid of columns we don't need
target = target.rename(columns=model_omop.model_schema['source_to_standard_target'])
source_result = pd.merge(source,concept_relationship[(concept_relationship["invalid_reason"].isnull()) & (concept_relationship["relationship_id"].str.contains('Maps to'))], \
how='inner', left_on='source_concept_id', right_on='concept_id_1')
source_result = source_result[model_omop.model_schema['source_to_standard_source'].values()].drop_duplicates()
target_result = pd.merge(target,concept_relationship[concept_relationship["invalid_reason"].isnull()], \
how='inner', left_on='target_concept_id', right_on='concept_id_2')
target_result = target_result[ model_omop.model_schema['source_to_standard_target'].values()].drop_duplicates()
result = pd.merge(source_result, target_result, how='inner', left_on='source_concept_id', right_on='target_concept_id')
return result
#
# following standard omop query for source to source mapping is implemented in python pandas
# passing in a vocabulary dictionary, return the source to standard dataframe
#
# SELECT c.concept_code AS SOURCE_CODE, c.concept_id AS SOURCE_CONCEPT_ID,
# c.CONCEPT_NAME AS SOURCE_CODE_DESCRIPTION, c.vocabulary_id AS SOURCE_VOCABULARY_ID,
# c.domain_id AS SOURCE_DOMAIN_ID, c.concept_class_id AS SOURCE_CONCEPT_CLASS_ID,
# c.VALID_START_DATE AS SOURCE_VALID_START_DATE, c.VALID_END_DATE AS SOURCE_VALID_END_DATE,
# c.invalid_reason AS SOURCE_INVALID_REASON, c.concept_ID as TARGET_CONCEPT_ID,
# c.concept_name AS TARGET_CONCEPT_NAME, c.vocabulary_id AS TARGET_VOCABULARY_ID, c.domain_id AS TARGET_DOMAIN_ID,
# c.concept_class_id AS TARGET_CONCEPT_CLASS_ID, c.INVALID_REASON AS TARGET_INVALID_REASON,
# c.STANDARD_CONCEPT AS TARGET_STANDARD_CONCEPT
# FROM CONCEPT c
def sourceToSourceVocabMap(self, vocab, model_omop):
concept = vocab['concept']
source = concept[model_omop.model_schema['source_to_standard_source'].keys()] # get rid of columns we don't need
source = source.rename(columns=model_omop.model_schema['source_to_standard_source'])
target = concept[model_omop.model_schema['source_to_standard_target'].keys()] # get rid of columns we don't need
target = target.rename(columns=model_omop.model_schema['source_to_standard_target'])
result = | pd.merge(source, target, how='inner', left_on='source_concept_id', right_on='target_concept_id') | pandas.merge |
import datetime
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
import numpy as np
name_list = []
ticker_any = input('ticker: ')
print("Warning: the more days you predict into the future, the less accurate the model is")
print("")
day_num = int(input("Amount of days you want to predict into the future(0 means 1): "))
ticker_any = ticker_any.upper()
og_link = "https://finance.yahoo.com/quote/AAPL?p=AAPL&.tsrc=fin-srch"
stock_link = "https://finance.yahoo.com/quote/" + ticker_any + "?p=" + ticker_any + "&.tsrc=fin-srch"
csv_link = "https://query1.finance.yahoo.com/v7/finance/download/" + ticker_any + "?period1=-252374400&period2=11635348709&interval=1d&events=history&includeAdjustedClose=true"
import urllib.request
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
url = "http://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers"
headers={'User-Agent':user_agent,}
request=urllib.request.Request(csv_link,None,headers) #The assembled request
response = urllib.request.urlopen(request)
data = response.read()
csv_file = open('values.csv', 'wb')
csv_file.write(data)
def lin_reg():
df = pd.read_csv(csv_link)
data = df.dropna()
bruh = pd.DataFrame(data)
print(bruh)
print(bruh.iloc[[-1]])
new_high = bruh["High"].iloc[-1]
new_low = bruh["Low"].iloc[-1]
High=pd.DataFrame(data['High'])
Low=pd.DataFrame(data['Low'])
lm = linear_model.LinearRegression()
model = lm.fit(High, Low)
import numpy as np
High_new=np.array([float(new_high)])
Low_new=np.array([float(new_low)])
High_new = High_new.reshape(-1,1)
Low_new = Low_new.reshape(-1,1)
High_predict=model.predict(High_new)
Low_predict=model.predict(Low_new)
print("Predicted High: ")
print(High_predict)
print("Predicted Low: ")
print(Low_predict)
print("Model Score: ")
print(model.score(High, Low))
print("Dollar Change($)")
print((High_predict - Low_predict).astype(float))
df = pd.read_csv(csv_link)
data = df.dropna()
bruh = pd.DataFrame(data)
new_high = bruh["High"].iloc[-1]
new_low = bruh["Low"].iloc[-1]
High=pd.DataFrame(data['High'])
Low=pd.DataFrame(data['Low'])
lm = linear_model.LinearRegression()
model = lm.fit(High, Low)
import numpy as np
High_new=np.array([float(new_high)])
Low_new=np.array([float(new_low)])
High_new = High_new.reshape(-1,1)
Low_new = Low_new.reshape(-1,1)
High_predict=model.predict(High_new)
Low_predict=model.predict(Low_new)
try:
lin_reg()
except:
pass
import csv
from datetime import date, datetime, timedelta
today = datetime.today()
tommorow = date.today() + timedelta(days=day_num)
print(today)
print(tommorow)
header = ['date' ,'high','low','close','adj_close','volume',]
High_predict = bruh["High"].iloc[-1]
Low_predict = bruh["Low"].iloc[-1]
with open('values.csv', 'a', encoding='UTF8', newline='') as not_f:
writer = csv.writer(not_f)
writer.writerow('')
for i in range(0,day_num):
print(High_predict)
tommorow = date.today() + timedelta(days=day_num)
bruh.iloc[-1, df.columns.get_loc('Date')] = tommorow
bruh.iloc[-1, df.columns.get_loc('High')] = float(High_predict)
bruh.iloc[-1, df.columns.get_loc('Low')] = float(Low_predict)
writer.writerow(bruh.iloc[-1])
import pandas as pd
df = pd.read_csv('values.csv')
data = df.dropna()
bruh = pd.DataFrame(data)
new_high = High_predict
new_low = Low_predict
High=pd.DataFrame(data['High'])
Low= | pd.DataFrame(data['Low']) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": | Series([1, 2, 3], dtype=float) | pandas.Series |
__author__ = "<NAME>"
import json
import pandas as pd
import sqlite3
import argparse
import os
def BrowserHistoryParse(f):
conn = sqlite3.connect(f)
cursor = conn.cursor()
BrowserHistoryTable = pd.read_sql_query("SELECT events_persisted.sid, events_persisted.payload from events_persisted inner join event_tags on events_persisted.full_event_name_hash = event_tags.full_event_name_hash inner join tag_descriptions on event_tags.tag_id = tag_descriptions.tag_id where tag_descriptions.tag_id = 1", conn)
payload = BrowserHistoryTable['payload'].values.tolist()
sid = BrowserHistoryTable['sid'].values.tolist()
payload_navigation_URL = []
payload_navigation_URL_time = []
payload_navigation_URL_date = []
true_sid = []
for i in range(len(payload)):
temp = json.loads(payload[i])
if (temp['data'].__contains__("navigationUrl") == True) and len(temp['data']['navigationUrl']) > 0:
payload_navigation_URL.append(temp['data']['navigationUrl'])
true_sid.append(sid[i])
timestamp = (temp['data']['Timestamp']).replace("T", " ").replace("Z", "")
timestamp = timestamp.split(" ")
payload_navigation_URL_date.append(timestamp[0])
payload_navigation_URL_time.append(timestamp[1] + " UTC")
temp_dict = {'SID': true_sid,'Date': payload_navigation_URL_date, 'Time': payload_navigation_URL_time, 'VisitedURL': payload_navigation_URL}
return temp_dict
def SoftwareInventory(f):
conn = sqlite3.connect(f)
SoftwareInventoryTable = pd.read_sql_query("""SELECT events_persisted.sid, events_persisted.payload from events_persisted inner join event_tags on events_persisted.full_event_name_hash = event_tags.full_event_name_hash inner join tag_descriptions on event_tags.tag_id = tag_descriptions.tag_id where (tag_descriptions.tag_id = 31 and events_persisted.full_event_name="Microsoft.Windows.Inventory.Core.InventoryApplicationAdd")""", conn)
payload = SoftwareInventoryTable['payload'].values.tolist()
sid = SoftwareInventoryTable['sid'].values.tolist()
Program_Name = []
Path = []
OSVersionAtInstallTime = []
InstallDate = []
AppVersion = []
true_sid = []
for i in range(len(payload)):
temp = json.loads(payload[i])
Program_Name.append(temp['data']['Name'])
Path.append(temp['data']['RootDirPath'])
OSVersionAtInstallTime.append(temp['data']['OSVersionAtInstallTime'])
if len(temp['data']['InstallDate']) > 0:
InstallDate.append(temp['data']['InstallDate'] + " UTC")
else:
InstallDate.append("NULL")
AppVersion.append(temp['data']['Version'])
true_sid.append(sid[i])
SoftwareInventorydict = {'SID': true_sid, 'Program Name': Program_Name, 'Install Path': Path, 'Install Date': InstallDate, 'Program Version': AppVersion, 'OS Version at Install Time': OSVersionAtInstallTime}
return SoftwareInventorydict
def WlanScanResults(f):
conn = sqlite3.connect(f)
cursor = conn.cursor()
wlan_scan_results_table = pd.read_sql_query("""SELECT events_persisted.sid, events_persisted.payload from events_persisted inner join event_tags on events_persisted.full_event_name_hash = event_tags.full_event_name_hash inner join tag_descriptions on event_tags.tag_id = tag_descriptions.tag_id where (tag_descriptions.tag_id = 11 and events_persisted.full_event_name = "WlanMSM.WirelessScanResults")""", conn)
payload = wlan_scan_results_table['payload'].values.tolist()
sid = wlan_scan_results_table['sid'].values.tolist()
ssid = []
mac_addr = []
time = []
true_sid = []
for i in range(len(payload)):
temp = json.loads(payload[i])
scan_results_list = temp['data']['ScanResults'].split('\n')
for j in range(len(scan_results_list) - 1):
temp_list = scan_results_list[j].split('\t')
ssid.append(temp_list[0])
mac_addr.append(temp_list[2])
time.append(temp['time'])
true_sid.append(sid[i])
WlanScanDict = {'SID': true_sid, 'Time': time, 'SSID': ssid, 'MAC Address': mac_addr}
return WlanScanDict
def UserDefault(f, file):
conn = sqlite3.connect(f)
user_default_table = pd.read_sql_query("""SELECT events_persisted.sid, events_persisted.payload from events_persisted inner join event_tags on events_persisted.full_event_name_hash = event_tags.full_event_name_hash inner join tag_descriptions on event_tags.tag_id = tag_descriptions.tag_id where (tag_descriptions.tag_id = 11 and events_persisted.full_event_name = "Census.Userdefault")""", conn)
payload = user_default_table['payload'].values.tolist()
sid = user_default_table['sid'].values.tolist()
true_sid = []
temp_file = open(file, "w")
for i in range(len(payload)):
temp = json.loads(payload[i])
temp_file.write("Device Make: " + temp['ext']['protocol']['devMake'] + "\n")
temp_file.write("Device Model: "+ temp['ext']['protocol']['devModel']+ "\n")
temp_file.write("Timezone: "+ temp['ext']['loc']['tz'] + "\n")
true_sid.append(sid[i])
temp_file.write("Default Browser: "+ temp['data']['DefaultBrowserProgId'] + "\n")
temp_list = temp['data']['DefaultApp'].split('|')
for j in range(len(temp_list)):
temp_file.write(temp_list[j]+ "\n")
temp_file.write("----------------------------------\n\n")
return temp_file
def PhysicalDiskInfo(f, file):
conn = sqlite3.connect(f)
physicaldisk_info_table = pd.read_sql_query("""SELECT events_persisted.payload from events_persisted inner join event_tags on events_persisted.full_event_name_hash = event_tags.full_event_name_hash inner join tag_descriptions on event_tags.tag_id = tag_descriptions.tag_id where (tag_descriptions.tag_id = 11 and events_persisted.full_event_name = "Microsoft.Windows.Inventory.General.InventoryMiscellaneousPhysicalDiskInfoAdd")""", conn)
payload = physicaldisk_info_table['payload'].values.tolist()
temp_file = open(file, "w")
for i in range(len(payload)):
temp = json.loads(payload[i])
temp_file.write("Device Id: "+ temp['data']['DeviceId'] + "\n")
temp_file.write("Serial Number: "+ temp['data']['SerialNumber'] + "\n")
temp_file.write("Size (in bytes): "+ temp['data']['Size'] + "\n")
temp_file.write("Number of partitions: "+ str(temp['data']['NumPartitions']) + "\n")
temp_file.write("Bytes per sector: "+ str(temp['data']['BytesPerSector']) + "\n")
temp_file.write("Media type: "+ temp['data']['MediaType'] + "\n")
temp_file.write("----------------------------------\n\n")
return temp_file
def WiFiConnectedEvents(f):
conn = sqlite3.connect(f)
wifi_connected_events_table = pd.read_sql_query("""SELECT events_persisted.sid, events_persisted.payload from events_persisted inner join event_tags on events_persisted.full_event_name_hash = event_tags.full_event_name_hash inner join tag_descriptions on event_tags.tag_id = tag_descriptions.tag_id where (tag_descriptions.tag_id = 11 and events_persisted.full_event_name = "Microsoft.OneCore.NetworkingTriage.GetConnected.WiFiConnectedEvent")""", conn)
payload = wifi_connected_events_table['payload'].values.tolist()
sid = wifi_connected_events_table['sid'].values.tolist()
interfaceGuid = []
interfaceType = []
interfaceDescription = []
ssid = []
authAlgo = []
bssid = []
apManufacturer = []
apModelName = []
apModelNum = []
true_sid = []
for i in range(len(payload)):
temp = json.loads(payload[i])
interfaceGuid.append(temp['data']['interfaceGuid'])
interfaceType.append(temp['data']['interfaceType'])
interfaceDescription.append(temp['data']['interfaceDescription'])
ssid.append(temp['data']['ssid'])
authAlgo.append(temp['data']['authAlgo'])
bssid.append(temp['data']['bssid'])
apManufacturer.append(temp['data']['apManufacturer'])
apModelName.append(temp['data']['apModelName'])
apModelNum.append(temp['data']['apModelNum'])
true_sid.append(sid[i])
wifi_connected_results_dict = {'SID': true_sid, 'SSID': ssid, 'BSSID': bssid, 'AP Manufacturer': apManufacturer, 'AP Model Name': apModelName, 'AP Model No.': apModelNum, 'Interface Type': interfaceType, 'Interface GUID': interfaceGuid, 'Interface Description': interfaceDescription}
return wifi_connected_results_dict
def PnPDeviceParse(f):
conn = sqlite3.connect(f)
pnp_device_table = pd.read_sql_query("""SELECT events_persisted.sid, events_persisted.payload from events_persisted inner join event_tags on events_persisted.full_event_name_hash = event_tags.full_event_name_hash inner join tag_descriptions on event_tags.tag_id = tag_descriptions.tag_id where (tag_descriptions.tag_id = 11 and events_persisted.full_event_name = "Microsoft.Windows.Inventory.Core.InventoryDevicePnpAdd")""", conn)
payload = pnp_device_table['payload'].values.tolist()
sid = pnp_device_table['sid'].values.tolist()
true_sid = []
installdate = []
firstinstalldate = []
model = []
manufacturer = []
service = []
parent_id = []
object_id = []
for i in range(len(payload)):
temp = json.loads(payload[i].encode('unicode_escape'))
true_sid.append(sid[i])
parent_id.append(temp['data']['ParentId'])
object_id.append(temp['data']['baseData']['objectInstanceId'])
installdate.append(temp['data']['InstallDate'])
firstinstalldate.append(temp['data']['FirstInstallDate'])
model.append(temp['data']['Model'])
manufacturer.append(temp['data']['Manufacturer'])
service.append(temp['data']['Service'])
pnp_device_dict = {'SID': true_sid, 'Object ID': object_id, 'Install Date': installdate, 'First Install Date': firstinstalldate, 'Model': model, 'Manufacturer': manufacturer, 'Service': service, 'Parent ID': parent_id}
return pnp_device_dict
if __name__=="__main__":
event_transcript_parser=argparse.ArgumentParser(
description='''EventTranscript.db parser by <NAME>.''',
epilog= '''For any queries, please reach out to me via Twitter - @_abhiramkumar''')
event_transcript_parser.add_argument('-f','--file', required=True, help="Please specify the path to EventTranscript.db")
parser, empty_list = event_transcript_parser.parse_known_args()
if os.path.exists(parser.file):
BrowsingHistory = BrowserHistoryParse(parser.file)
df = pd.DataFrame(BrowsingHistory)
outfile = "BrowserHistory.csv"
df.to_csv(outfile, index=False)
print ("Output written to " + outfile)
software_inventory = SoftwareInventory(parser.file)
df = pd.DataFrame(software_inventory)
outfile = "SoftwareInventory.csv"
df.to_csv(outfile, index=False)
print ("Output written to " + outfile)
WlanScan = WlanScanResults(parser.file)
df = | pd.DataFrame(WlanScan) | pandas.DataFrame |
import pkg_resources
import pandas as pd
from unittest.mock import sentinel
import osmo_jupyter.dataset.parse as module
def test_parses_ysi_csv_correctly(tmpdir):
test_ysi_classic_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_ysi_classic.csv"
)
formatted_ysi_data = module.parse_ysi_proodo_file(test_ysi_classic_file_path)
expected_ysi_data = pd.DataFrame(
[
{
"timestamp": pd.to_datetime("2019-01-01 00:00:00"),
"YSI barometric pressure (mmHg)": 750,
"YSI DO (% sat)": 19,
"YSI temperature (C)": 24.7,
"YSI unit ID": "unit ID",
}
]
).set_index("timestamp")
pd.testing.assert_frame_equal(formatted_ysi_data, expected_ysi_data)
def test_parses_ysi_kordss_correctly(tmpdir):
test_ysi_kordss_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_ysi_kordss.csv"
)
formatted_ysi_data = module.parse_ysi_prosolo_file(test_ysi_kordss_file_path)
expected_ysi_data = pd.DataFrame(
[
{
"timestamp": pd.to_datetime("2019-01-01 00:00:00"),
"YSI barometric pressure (mmHg)": 750,
"YSI DO (% sat)": 60,
"YSI DO (mg/L)": 6,
"YSI temperature (C)": 24.7,
}
]
).set_index("timestamp")
pd.testing.assert_frame_equal(formatted_ysi_data, expected_ysi_data)
def test_parses_picolog_csv_correctly():
test_picolog_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_picolog.csv"
)
formatted_picolog_data = module.parse_picolog_file(test_picolog_file_path)
expected_picolog_data = pd.DataFrame(
[
{
"timestamp": pd.to_datetime("2019-01-01 00:00:00"),
"PicoLog temperature (C)": 39,
"PicoLog barometric pressure (mmHg)": 750,
},
{
"timestamp": pd.to_datetime("2019-01-01 00:00:02"),
"PicoLog temperature (C)": 40,
"PicoLog barometric pressure (mmHg)": 750,
},
{
"timestamp": pd.to_datetime("2019-01-01 00:00:04"),
"PicoLog temperature (C)": 40,
"PicoLog barometric pressure (mmHg)": 750,
},
]
).set_index("timestamp")
pd.testing.assert_frame_equal(formatted_picolog_data, expected_picolog_data)
def test_parses_calibration_log_correctly():
test_calibration_log_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_calibration_log.csv"
)
formatted_calibration_log_data = module.parse_calibration_log_file(
test_calibration_log_file_path
)
# Nothing is supposed to be renamed or dropped, just datetime formatting
expected_calibration_log_index = pd.DatetimeIndex(
[
pd.to_datetime("2019-01-01 00:00:00"),
pd.to_datetime("2019-01-01 00:00:01"),
pd.to_datetime("2019-01-01 00:00:03"),
pd.to_datetime("2019-01-01 00:00:04"),
],
name="timestamp",
)
pd.testing.assert_index_equal(
formatted_calibration_log_data.index, expected_calibration_log_index
)
class TestParseDataCollectionLog:
def test_parses_data_collection_log_correctly(self):
test_log_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_data_collection_log.xlsx"
)
actual_data_collection_log = module.parse_data_collection_log(
test_log_file_path
)
expected_data_collection_log = pd.DataFrame(
[
{
"experiment_names": [
"2019-07-26--19-34-38-Pi2E32-3000_images_attempt_1"
],
"drive_directory": "2019-07-26 Collect 3000 images (attempt 1)",
"pond": "calibration",
"cosmobot_id": "A",
"cartridge_id": "C00003",
"start_date": pd.to_datetime("2019-07-26 19:12"),
"end_date": pd.to_datetime("2019-07-28 13:55"),
},
{
"experiment_names": [
"2019-08-26--23-34-10-PiE5FB-scum_tank_shakedown"
],
"drive_directory": "2019-08-26 Scum Tank Shakedown",
"pond": "scum tank 1",
"cosmobot_id": "B",
"cartridge_id": "C00005",
"start_date": pd.to_datetime("2019-08-26 23:35"),
"end_date": pd.to_datetime("2019-08-27 08:15"),
},
]
)
pd.testing.assert_frame_equal(
actual_data_collection_log, expected_data_collection_log
)
def test_get_attempt_summary_gets_multiple_buckets(self):
test_attempt_data = pd.Series(
{
"S3 Bucket(s)": "1\n2\n3",
"Drive Directory": "Experiment",
"Cosmobot ID": "Z",
"Cartridge": "C1",
"Start Date/Time": pd.to_datetime("2019"),
"End Date/Time": pd.to_datetime("2020"),
}
)
actual_attempt_summary = module._get_attempt_summary(test_attempt_data)
expected_attempt_summary = pd.Series(
{
"experiment_names": ["1", "2", "3"],
"drive_directory": "Experiment",
"pond": "calibration",
"cosmobot_id": "Z",
"cartridge_id": "C1",
"start_date": | pd.to_datetime("2019") | pandas.to_datetime |
"""
A toy ML workflow intended to demonstrate basic Bionic features. Trains a
logistic regression model on the UCI ML Breast Cancer Wisconsin (Diagnostic)
dataset.
"""
import re
import pandas as pd
from sklearn import datasets, linear_model, metrics, model_selection
import bionic as bn
# Initialize our builder.
builder = bn.FlowBuilder("ml_workflow")
# Define some basic parameters.
builder.assign(
"random_seed", 0, doc="Arbitrary seed for all random decisions in the flow."
)
builder.assign(
"test_split_fraction", 0.3, doc="Fraction of data to include in test set."
)
builder.assign(
"hyperparams_dict", {"C": 1}, doc="Hyperparameters to use when training the model."
)
builder.assign(
"feature_inclusion_regex",
".*",
doc="Regular expression specifying which feature names to include.",
)
# Load the raw data.
@builder
def raw_frame():
"""
The raw data, including all features and a `target` column of labels.
"""
dataset = datasets.load_breast_cancer()
df = | pd.DataFrame(data=dataset.data, columns=dataset.feature_names) | pandas.DataFrame |
#!/usr/bin/env python
from pathlib import Path
import pandas as pd
import typer
from rich.console import Console
from rich.logging import RichHandler
import logging
def check_sample_names(df: pd.DataFrame) -> None:
have_whitespace = df['sample'].str.contains(r'\s', regex=True)
n_samples_with_whitespace = have_whitespace.sum()
if have_whitespace.sum() > 0:
raise ValueError(
f'Found {n_samples_with_whitespace} sample names with whitespace: '
f'{"; ".join(df.loc[have_whitespace,"sample"])}\n'
f'{df.loc[have_whitespace, :]}\n'
f'Please check your sample sheet. Sample names should not have spaces.'
)
def adjust_reads_path(p: str) -> str:
assert (
p.endswith(".fastq")
or p.endswith(".fastq.gz")
or p.endswith(".fq")
or p.endswith(".fq.gz")
), 'FASTQ file "{p}" does not have expected extension: ".fastq", ".fastq.gz", ".fq", ".fq.gz"'
if p.startswith("http") or p.startswith("ftp"):
return p
else:
path = Path(p)
return str(path.resolve().absolute())
def main(input_path: Path, output_sample_sheet: Path):
"""Check and reformat sample sheet into CSV
Outputs CSV with headers: sample, fastq1, fastq2, single_end"""
from rich.traceback import install
install(show_locals=True, width=120, word_wrap=True)
logging.basicConfig(
format="%(message)s",
datefmt="[%Y-%m-%d %X]",
level=logging.DEBUG,
handlers=[RichHandler(rich_tracebacks=True, tracebacks_show_locals=True)],
)
logging.info(
f'input_path="{input_path}" output_sample_sheet="{output_sample_sheet}"'
)
ext = input_path.suffix.lower()
logging.info(f"Input sample sheet extension: {ext}")
try:
if ext in [".tsv", ".txt", ".tab"]:
df = pd.read_table(input_path, dtype="str")
elif ext == ".csv":
df = | pd.read_csv(input_path, dtype="str") | pandas.read_csv |
"""Extract minimal growth media and growth rates."""
import pandas as pd
from micom import load_pickle
from micom.media import minimal_medium
from micom.workflows import workflow
max_procs = 6
processes = []
def media_and_gcs(sam):
com = load_pickle("models/" + sam + ".pickle")
# Get growth rates
sol = com.cooperative_tradeoff(fraction=0.9)
rates = sol.members["growth_rate"].copy()
rates["community"] = sol.growth_rate
rates.name = s
# Get the minimal medium
med = minimal_medium(com, 0.95*sol.growth_rate)
med.name = s
return {"medium": med, "gcs": rates}
samples = | pd.read_csv("recent.csv") | pandas.read_csv |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
import sys
from unittest import TestCase
from datetime import (
datetime,
timedelta,
)
import pickle
import pprint
import pytz
import uuid
import pandas as pd
from nose_parameterized import parameterized
from zipline.finance.trading import with_environment
from zipline.assets import Asset, Equity, Future, AssetFinder
from zipline.errors import (
SymbolNotFound,
MultipleSymbolsFound,
SidAssignmentError,
)
class FakeTable(object):
def __init__(self, name, count, dt, fuzzy_str):
self.name = name
self.count = count
self.dt = dt
self.fuzzy_str = fuzzy_str
self.df = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'TEST%s%s' % (self.fuzzy_str, i),
'company_name': self.name + str(i),
'start_date_nano': pd.Timestamp(dt, tz='UTC').value,
'end_date_nano': pd.Timestamp(dt, tz='UTC').value,
'exchange': self.name,
}
for i in range(1, self.count + 1)
]
)
def read(self, *args, **kwargs):
return self.df.to_records()
class FakeTableIdenticalSymbols(object):
def __init__(self, name, as_of_dates):
self.name = name
self.as_of_dates = as_of_dates
self.df = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': self.name,
'company_name': self.name,
'start_date_nano': date.value,
'end_date_nano': (date + timedelta(days=1)).value,
'exchange': self.name,
}
for i, date in enumerate(self.as_of_dates)
]
)
def read(self, *args, **kwargs):
return self.df.to_records()
class FakeTableFromRecords(object):
def __init__(self, records):
self.records = records
self.df = pd.DataFrame.from_records(self.records)
def read(self, *args, **kwargs):
return self.df.to_records()
@with_environment()
def build_lookup_generic_cases(env=None):
"""
Generate test cases for AssetFinder test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
table = FakeTableFromRecords(
[
{
'sid': 0,
'file_name': 'duplicated',
'company_name': 'duplicated_0',
'start_date_nano': dupe_0_start.value,
'end_date_nano': dupe_0_end.value,
'exchange': '',
},
{
'sid': 1,
'file_name': 'duplicated',
'company_name': 'duplicated_1',
'start_date_nano': dupe_1_start.value,
'end_date_nano': dupe_1_end.value,
'exchange': '',
},
{
'sid': 2,
'file_name': 'unique',
'company_name': 'unique',
'start_date_nano': unique_start.value,
'end_date_nano': unique_end.value,
'exchange': '',
},
],
)
env.update_asset_finder(asset_metadata=table.df)
dupe_0, dupe_1, unique = assets = [
env.asset_finder.retrieve_asset(i)
for i in range(3)
]
# This expansion code is run at module import time, which means we have to
# clear the AssetFinder here or else it will interfere with the cache
# for other tests.
env.update_asset_finder(clear_metadata=True)
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
cases = [
##
# Scalars
# Asset object
(table, assets[0], None, assets[0]),
(table, assets[1], None, assets[1]),
(table, assets[2], None, assets[2]),
# int
(table, 0, None, assets[0]),
(table, 1, None, assets[1]),
(table, 2, None, assets[2]),
# Duplicated symbol with resolution date
(table, 'duplicated', dupe_0_start, dupe_0),
(table, 'duplicated', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(table, 'unique', unique_start, unique),
(table, 'unique', None, unique),
##
# Iterables
# Iterables of Asset objects.
(table, assets, None, assets),
(table, iter(assets), None, assets),
# Iterables of ints
(table, (0, 1), None, assets[:-1]),
(table, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(table, ('duplicated', 'unique'), dupe_0_start, [dupe_0, unique]),
(table, ('duplicated', 'unique'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(table,
('duplicated', 2, 'unique', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
]
return cases
class AssetTestCase(TestCase):
def test_asset_object(self):
self.assertEquals({5061: 'foo'}[Asset(5061)], 'foo')
self.assertEquals(Asset(5061), 5061)
self.assertEquals(5061, Asset(5061))
self.assertEquals(Asset(5061), Asset(5061))
self.assertEquals(int(Asset(5061)), 5061)
self.assertEquals(str(Asset(5061)), 'Asset(5061)')
def test_asset_is_pickleable(self):
# Very wow
s = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
exchange='THE MOON',
)
s_unpickled = pickle.loads(pickle.dumps(s))
attrs_to_check = ['end_date',
'exchange',
'first_traded',
'asset_end_date',
'asset_name',
'asset_start_date',
'sid',
'start_date',
'symbol']
for attr in attrs_to_check:
self.assertEqual(getattr(s, attr), getattr(s_unpickled, attr))
def test_asset_comparisons(self):
s_23 = Asset(23)
s_24 = Asset(24)
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(Asset(3) < Asset(4))
self.assertFalse(Asset(4) < Asset(4))
self.assertFalse(Asset(5) < Asset(4))
def test_le(self):
self.assertTrue(Asset(3) <= Asset(4))
self.assertTrue(Asset(4) <= Asset(4))
self.assertFalse(Asset(5) <= Asset(4))
def test_eq(self):
self.assertFalse(Asset(3) == Asset(4))
self.assertTrue(Asset(4) == Asset(4))
self.assertFalse(Asset(5) == Asset(4))
def test_ge(self):
self.assertFalse(Asset(3) >= Asset(4))
self.assertTrue(Asset(4) >= Asset(4))
self.assertTrue(Asset(5) >= Asset(4))
def test_gt(self):
self.assertFalse(Asset(3) > Asset(4))
self.assertFalse(Asset(4) > Asset(4))
self.assertTrue(Asset(5) > Asset(4))
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(Asset(3) < 'a')
self.assertIsNotNone('a' < Asset(3))
else:
with self.assertRaises(TypeError):
Asset(3) < 'a'
with self.assertRaises(TypeError):
'a' < Asset(3)
class TestFuture(TestCase):
future = Future(2468,
symbol='OMK15',
notice_date='2014-01-20',
expiration_date='2014-02-20',
contract_multiplier=500)
def test_str(self):
strd = self.future.__str__()
self.assertEqual("Future(2468 [OMK15])", strd)
def test_repr(self):
reprd = self.future.__repr__()
self.assertTrue("Future" in reprd)
self.assertTrue("2468" in reprd)
self.assertTrue("OMK15" in reprd)
self.assertTrue("notice_date='2014-01-20'" in reprd)
self.assertTrue("expiration_date='2014-02-20'" in reprd)
self.assertTrue("contract_multiplier=500" in reprd)
def test_reduce(self):
reduced = self.future.__reduce__()
self.assertEqual(Future, reduced[0])
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
self.assertTrue('notice_date' in dictd)
self.assertTrue('expiration_date' in dictd)
self.assertTrue('contract_multiplier' in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
class AssetFinderTestCase(TestCase):
@with_environment()
def test_lookup_symbol_fuzzy(self, env=None):
fuzzy_str = '@'
as_of_date = datetime(2013, 1, 1, tzinfo=pytz.utc)
table = FakeTable(uuid.uuid4().hex, 2, as_of_date,
fuzzy_str)
env.update_asset_finder(asset_metadata=table.df)
sf = env.asset_finder
try:
for i in range(2): # we do it twice to test for caching bugs
self.assertIsNone(sf.lookup_symbol('test', as_of_date))
self.assertIsNotNone(sf.lookup_symbol(
'test%s%s' % (fuzzy_str, 1), as_of_date))
self.assertIsNone(sf.lookup_symbol('test%s' % 1, as_of_date))
self.assertIsNone(sf.lookup_symbol(table.name, as_of_date,
fuzzy=fuzzy_str))
self.assertIsNotNone(sf.lookup_symbol(
'test%s%s' % (fuzzy_str, 1), as_of_date, fuzzy=fuzzy_str))
self.assertIsNotNone(sf.lookup_symbol(
'test%s' % 1, as_of_date, fuzzy=fuzzy_str))
finally:
env.update_asset_finder(clear_metadata=True)
@with_environment()
def test_lookup_symbol_resolve_multiple(self, env=None):
as_of_dates = [
pd.Timestamp('2013-01-01', tz='UTC') + timedelta(days=i)
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
for i in range(0, 10, 2)
]
table = FakeTableIdenticalSymbols(
name='existing',
as_of_dates=as_of_dates,
)
env.update_asset_finder(asset_metadata=table.df)
sf = env.asset_finder
try:
for _ in range(2): # we do it twice to test for caching bugs
with self.assertRaises(SymbolNotFound):
sf.lookup_symbol_resolve_multiple('non_existing',
as_of_dates[0])
with self.assertRaises(MultipleSymbolsFound):
sf.lookup_symbol_resolve_multiple('existing',
None)
for i, date in enumerate(as_of_dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = sf.lookup_symbol_resolve_multiple(
'existing',
date,
)
self.assertEqual(result.symbol, 'existing')
self.assertEqual(result.sid, i)
finally:
env.update_asset_finder(clear_metadata=True)
@with_environment()
def test_lookup_symbol_nasdaq_underscore_collisions(self, env=None):
"""
Ensure that each NASDAQ symbol without underscores maps back to the
original symbol when using fuzzy matching.
"""
sf = env.asset_finder
fuzzy_str = '_'
collisions = []
try:
for sid in sf.sids:
sec = sf.retrieve_asset(sid)
if sec.exchange.startswith('NASDAQ'):
found = sf.lookup_symbol(sec.symbol.replace(fuzzy_str, ''),
sec.end_date, fuzzy=fuzzy_str)
if found != sec:
collisions.append((found, sec))
# KNOWN BUG: Filter out assets that have intersections in their
# start and end dates. We can't correctly resolve these.
unexpected_errors = []
for first, second in collisions:
overlapping_dates = (
first.end_date >= second.start_date or
second.end_date >= first.end_date
)
if not overlapping_dates:
unexpected_errors.append((first, second))
self.assertFalse(
unexpected_errors,
pprint.pformat(unexpected_errors),
)
finally:
env.update_asset_finder(clear_metadata=True)
@parameterized.expand(
build_lookup_generic_cases()
)
@with_environment()
def test_lookup_generic(self, table, symbols, reference_date, expected,
env=None):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
try:
env.update_asset_finder(asset_metadata=table.df)
finder = env.asset_finder
results, missing = finder.lookup_generic(symbols, reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
finally:
env.update_asset_finder(clear_metadata=True)
@with_environment()
def test_lookup_generic_handle_missing(self, env=None):
try:
table = FakeTableFromRecords(
[
# Sids that will be found when we do lookups.
{
'sid': 0,
'file_name': 'real',
'company_name': 'real',
'start_date_nano': | pd.Timestamp('2013-1-1', tz='UTC') | pandas.Timestamp |
import numpy as np
from sklearn.datasets import fetch_mldata
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import time
from sklearn.manifold import TSNE
# import tensorflow.examples.tutorials.mnist.input_data as input_data
datafile = '' #get this either from command line argument or encapslate into function that can be imported in other files
data = np.loadtxt(open(datafile, 'rb'), delimiter=",", skiprows=1)
X = data[:, 1:-1]
y = data[:, -1]
print (X.shape, y.shape)
feat_cols = [ 'pixel'+str(i) for i in range(X.shape[1]) ]
df = | pd.DataFrame(X,columns=feat_cols) | pandas.DataFrame |
import os
import json
import math
import numpy as np
import pandas as pd
import seaborn as sns; sns.set(style="ticks"); sns.set_context("paper") #sns.set_context("talk")
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from utils.helper import make_dir
from utils.sweeper import Sweeper
class Plotter(object):
def __init__(self, cfg):
# Set default value for symmetric EMA (exponential moving average)
# Note that EMA only works when merged is True
cfg.setdefault('EMA', False)
cfg.setdefault('ci', None)
# Copy parameters
self.exp = cfg['exp']
self.merged = cfg['merged']
self.x_label = cfg['x_label']
self.y_label = cfg['y_label']
self.hue_label = cfg['hue_label']
self.show = cfg['show']
self.imgType = cfg['imgType']
self.ci = cfg['ci']
self.EMA = cfg['EMA']
self.sweep_keys = cfg['sweep_keys']
self.sort_by = cfg['sort_by']
self.ascending = cfg['ascending']
self.loc = cfg['loc']
self.runs = cfg['runs']
# Get total combination of configurations
self.total_combination = get_total_combination(self.exp)
def merge_index(self, config_idx, mode, processed, exp=None):
'''
Given exp and config index, merge the results of multiple runs
'''
if exp is None:
exp = self.exp
result_list = []
for _ in range(self.runs):
result_file = f'./logs/{exp}/{config_idx}/result_{mode}.feather'
# If result file exist, read and merge
result = read_file(result_file)
if result is not None:
# Add config index as a column
result['Config Index'] = config_idx
result_list.append(result)
config_idx += get_total_combination(exp)
if len(result_list) == 0:
return None
# Do symmetric EMA (exponential moving average) only
# when we want the original data (i.e. no processed)
if (self.EMA) and (processed == False):
# Get x's and y's in form of numpy arries
xs, ys = [], []
for result in result_list:
xs.append(result[self.x_label].to_numpy())
ys.append(result[self.y_label].to_numpy())
# Do symetric EMA to get new x's and y's
low = max(x[0] for x in xs)
high = min(x[-1] for x in xs)
n = min(len(x) for x in xs)
for i in range(len(xs)):
new_x, new_y, _ = symmetric_ema(xs[i], ys[i], low, high, n)
result_list[i] = result_list[i][:n]
result_list[i].loc[:, self.x_label] = new_x
result_list[i].loc[:, self.y_label] = new_y
else:
# Cut off redundant results
n = min(len(result) for result in result_list)
for i in range(len(result_list)):
result_list[i] = result_list[i][:n]
return result_list
def get_result(self, exp, config_idx, mode, get_process_result_dict=None):
'''
Return: (merged, processed) result
- if (merged == True) or (get_process_result_dict is not None):
Return a list of (processed) result for all runs.
- if (merged == False):
Return unmerged result of one single run in a list.
'''
if get_process_result_dict is not None:
processed = True
else:
processed = False
if self.merged == True or processed == True:
# Check mode
if not (mode in ['Train', 'Valid', 'Test', 'Dynamic']):
return None
# Merge results
print(f'[{exp}]: Merge {mode} results: {config_idx}/{get_total_combination(exp)}')
result_list = self.merge_index(config_idx, mode, processed, exp)
if result_list is None:
print(f'[{exp}]: No {mode} results for {config_idx}')
return None
# Process result
if processed:
print(f'[{exp}]: Process {mode} results: {config_idx}/{get_total_combination(exp)}')
for i in range(len(result_list)):
new_result = get_process_result_dict(result_list[i], config_idx, mode)
result_list[i] = new_result
return result_list
else:
result_file = f'./logs/{exp}/{config_idx}/result_{mode}.feather'
result = read_file(result_file)
if result is None:
return None
else:
return [result]
def plot_vanilla(self, data, image_path):
'''
Plot results for data:
data = [result_1_list, result_2_list, ...]
result_i_list = [result_run_1, result_run_2, ...]
result_run_i is a Dataframe
'''
fig, ax = plt.subplots()
for i in range(len(data)):
# Convert to numpy array
ys = []
for result in data[i]:
ys.append(result[self.y_label].to_numpy())
# Compute x_mean, y_mean and y_ci
ys = np.array(ys)
x_mean = data[i][0][self.x_label].to_numpy()
y_mean = np.mean(ys, axis=0)
if self.ci == 'sd':
y_ci = np.std(ys, axis=0, ddof=0)
elif self.ci == 'se':
y_ci = np.std(ys, axis=0, ddof=0)/math.sqrt(len(ys))
# Plot
plt.plot(x_mean, y_mean, linewidth=1.0, label=data[i][0][self.hue_label][0])
if self.ci in ['sd', 'se']:
plt.fill_between(x_mean, y_mean - y_ci, y_mean + y_ci, alpha=0.5)
# ax.set_title(title)
ax.legend(loc=self.loc)
ax.set_xlabel(self.x_label)
ax.set_ylabel(self.y_label)
ax.get_figure().savefig(image_path)
if self.show:
plt.show()
plt.clf() # clear figure
plt.cla() # clear axis
plt.close() # close window
def plot_indexList(self, indexList, mode, image_name):
'''
Func: Given (config index) list and mode
- merged == True: plot merged result for all runs.
- merged == False: plot unmerged result of one single run.
'''
expIndexModeList = []
for x in indexList:
expIndexModeList.append([self.exp, x ,mode])
self.plot_expIndexModeList(expIndexModeList, image_name)
def plot_indexModeList(self, indexModeList, image_name):
'''
Func: Given (config index, mode) list
- merged == True: plot merged result for all runs.
- merged == False: plot unmerged result of one single run.
'''
expIndexModeList = []
for x in indexModeList:
expIndexModeList.append([self.exp] + x)
self.plot_expIndexModeList(expIndexModeList, image_name)
def plot_expIndexModeList(self, expIndexModeList, image_name):
'''
Func: Given (exp, config index, mode) list
- merged == True: plot merged result for all runs.
- merged == False: plot unmerged result of one single run.
'''
# Get results
results = []
for exp, config_idx, mode in expIndexModeList:
print(f'[{exp}]: Plot {mode} results: {config_idx}')
result_list = self.get_result(exp, config_idx, mode)
if result_list is None:
continue
# Modify `hue_label` value in result_list for better visualization
for i in range(len(result_list)):
result_list[i][self.hue_label] = result_list[i][self.hue_label].map(lambda x: f'[{exp}] {mode} {x} {config_idx}')
results.append(result_list)
make_dir(f'./logs/{self.exp}/0/')
# Plot
if self.merged:
image_path = f'./logs/{self.exp}/0/{image_name}_merged.{self.imgType}'
else:
image_path = f'./logs/{self.exp}/0/{image_name}.{self.imgType}'
self.plot_vanilla(results, image_path)
def plot_results(self, mode, indexes='all'):
'''
Plot merged result for all config indexes
'''
if indexes == 'all':
if self.merged:
indexes = range(1, self.total_combination+1)
else:
indexes = range(1, self.total_combination*self.runs+1)
for config_idx in indexes:
print(f'[{self.exp}]: Plot {mode} results: {config_idx}/{self.total_combination}')
# Get result
result_list = self.get_result(self.exp, config_idx, mode)
if result_list is None:
continue
# Plot
if self.merged:
image_path = f'./logs/{self.exp}/{config_idx}/{mode}_{self.y_label}_merged.{self.imgType}'
else:
image_path = f'./logs/{self.exp}/{config_idx}/{mode}_{self.y_label}.{self.imgType}'
self.plot_vanilla([result_list], image_path)
def csv_results(self, mode, get_csv_result_dict, get_process_result_dict):
'''
Show results: generate a *.csv file that store all merged results
'''
new_result_list = []
for config_idx in range(1, self.total_combination+1):
print(f'[{self.exp}]: CSV {mode} results: {config_idx}/{self.total_combination}')
result_list = self.get_result(self.exp, config_idx, mode, get_process_result_dict)
if result_list is None:
continue
result = pd.DataFrame(result_list)
# Get test results dict
result_dict = get_csv_result_dict(result, config_idx, mode)
# Expand test result dict from config dict
config_file = f'./logs/{self.exp}/{config_idx}/config.json'
with open(config_file, 'r') as f:
config_dict = json.load(f)
for key in self.sweep_keys:
result_dict[key] = find_key_value(config_dict, key)
new_result_list.append(result_dict)
if len(new_result_list) == 0:
print(f'[{self.exp}]: No {mode} results')
return
make_dir(f'./logs/{self.exp}/0/')
results = pd.DataFrame(new_result_list)
# Sort by mean and ste of test result label value
sorted_results = results.sort_values(by=self.sort_by, ascending=self.ascending)
# Save sorted test results into a .feather file
sorted_results_file = f'./logs/{self.exp}/0/{mode}_results.csv'
sorted_results.to_csv(sorted_results_file, index=False)
def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1.0, low_counts_threshold=0.0):
''' Copy from baselines.common.plot_util
Functionality:
perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
low = xolds[0] if low is None else low
high = xolds[-1] if high is None else high
assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(low, xolds[0])
assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(high, xolds[-1])
assert len(xolds) == len(yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds))
xolds, yolds = xolds.astype('float64'), yolds.astype('float64')
luoi = 0 # last unused old index
sum_y = 0.
count_y = 0.
xnews = np.linspace(low, high, n)
decay_period = (high - low) / (n - 1) * decay_steps
interstep_decay = np.exp(- 1. / decay_steps)
sum_ys = np.zeros_like(xnews)
count_ys = np.zeros_like(xnews)
for i in range(n):
xnew = xnews[i]
sum_y *= interstep_decay
count_y *= interstep_decay
while True:
if luoi >= len(xolds): break
xold = xolds[luoi]
if xold <= xnew:
decay = np.exp(- (xnew - xold) / decay_period)
sum_y += decay * yolds[luoi]
count_y += decay
luoi += 1
else: break
sum_ys[i] = sum_y
count_ys[i] = count_y
ys = sum_ys / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xnews, ys, count_ys
def symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1.0, low_counts_threshold=0.0):
''' Copy from baselines.common.plot_util
Functionality:
Perform symmetric EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
xs, ys1, count_ys1 = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold)
_, ys2, count_ys2 = one_sided_ema(-xolds[::-1], yolds[::-1], -high, -low, n, decay_steps, low_counts_threshold)
ys2 = ys2[::-1]
count_ys2 = count_ys2[::-1]
count_ys = count_ys1 + count_ys2
ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys
ys[count_ys < low_counts_threshold] = np.nan
xs = [int(x) for x in xs]
return xs, ys, count_ys
def moving_average(values, window):
'''
Smooth values by doing a moving average
:param values: (numpy array)
:param window: (int)
:return: (numpy array)
'''
weights = np.repeat(1.0, window) / window
return np.convolve(values, weights, 'valid')
def get_total_combination(exp):
'''
Get total combination of experiment configuration
'''
config_file = f'./configs/{exp}.json'
assert os.path.isfile(config_file), f'[{exp}]: No config file <{config_file}>!'
sweeper = Sweeper(config_file)
return sweeper.config_dicts['num_combinations']
def find_key_value(config_dict, key):
'''
Find key value in config dict recursively
'''
for k, v in config_dict.items():
if k == key:
return config_dict[k]
elif type(v) == dict:
value = find_key_value(v, key)
if value is not '/':
return value
return '/'
def read_file(result_file):
if not os.path.isfile(result_file):
print(f'[No such file <{result_file}>')
return None
result = | pd.read_feather(result_file) | pandas.read_feather |
from seaborn.utils import locator_to_legend_entries
import random
import glob
import calendar
import pandas as pd
from datetime import datetime
from tqdm import tqdm
import os
print(os.getcwd())
class DataPreprocessing:
"""
This class preprocesses the data and computes
transition probabilities
"""
def __init__(self, datapath, pattern="csv"):
self.datapath = datapath
self.pattern = pattern
def get_files(self):
return glob.glob(f'{self.datapath}/*{self.pattern}')
def order_files_by_day_of_week(self):
files = self.get_files()
filenames = []
days = [i.split("/")[-1].split(".")[0].title() for i in files]
for ordered_day in list(calendar.day_name):
if ordered_day in days:
index = days.index(ordered_day)
filenames.append(files[index])
return filenames
def read_files(self):
files = self.order_files_by_day_of_week()
return [pd.read_csv(i, index_col = 0, parse_dates=True, sep=";") for i in files]
def get_abbreviations_from_files(self):
return [i.split("/")[-1].split(".")[0][0:3] for i in self.order_files_by_day_of_week()]
def put_dfs_into_dict(self):
weekly_names = self.get_abbreviations_from_files()
df_dict = {}
count = 0
for week_name in weekly_names:
df_dict[week_name] = self.read_files()[count]
count += 1
return df_dict
def df_with_unique_ids(self):
df_dict = self.put_dfs_into_dict()
df_all = []
for key in df_dict.keys():
df = df_dict[key]
df["day_of_week"] = df.index.day_name()
df["customer_no"] = df["customer_no"].astype(str)
df["shortened_day"] = [i[0:3] for i in df["day_of_week"].tolist()]
df["customer_id"] = df["customer_no"] + "_" + df["shortened_day"]
df.drop("shortened_day", axis = 1, inplace=True)
df_all.append(df)
return pd.concat(df_all)
def add_checkout_for_customers(self):
df_all = self.df_with_unique_ids()
groups = df_all.groupby("customer_id")
df_list = []
for name, group in groups:
last_location = group["location"][-1]
if last_location != "checkout":
get_last_row = group.iloc[-1, :]
time = str(get_last_row.name)
row_list = [i for i in get_last_row]
time_last = datetime.strptime(f'{time.split(" ")[0]} 21:59:59', "%Y-%m-%d %H:%M:%S")
group.loc[time_last] = [row_list[0], "checkout", row_list[2], row_list[3]]
df_list.append(group)
return pd.concat(df_list)
def get_customer_and_location(self):
df_mc = self.add_checkout_for_customers()[["customer_id", "location"]]
df_with_entrance = df_mc[["customer_id", "location"]].groupby(["customer_id"])
df_with_entrance_all = []
for name, adf in tqdm(df_with_entrance):
time_str = [i for i in adf.index.strftime("%Y-%m-%d %H:%M:%S")]
time_first = datetime.strptime(time_str[0], "%Y-%m-%d %H:%M:%S") - pd.DateOffset(minutes=1)
adf.loc[time_first] = [name, "entrance"]
df_with_entrance_all.append(adf.sort_values("timestamp"))
df = pd.concat(df_with_entrance_all)
df.to_csv("customers_table.csv")
return df
def resample_by_one_minute(self):
df_mc = self.get_customer_and_location()
return df_mc.groupby("customer_id").resample('1T').ffill().sort_values("timestamp")
def shift_by_one(self):
df_mc = self.resample_by_one_minute()
df_mc["before"] = df_mc["location"]
df_mc["after"] =df_mc["location"].shift(-1)
return df_mc
def get_rid_of_checkout(self):
df_mc = self.shift_by_one()
return df_mc[df_mc["before"] != "checkout"]
def get_transition_probabilities(self):
df_mc_sub = self.get_rid_of_checkout()
ct = | pd.crosstab(df_mc_sub["after"], df_mc_sub["before"], normalize=1) | pandas.crosstab |
"""Classes for report generation and add-ons."""
import os
from copy import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from jinja2 import FileSystemLoader, Environment
from json2html import json2html
from sklearn.metrics import roc_auc_score, precision_recall_fscore_support, roc_curve, precision_recall_curve, \
average_precision_score, explained_variance_score, mean_absolute_error, \
mean_squared_error, median_absolute_error, r2_score, f1_score, precision_score, recall_score, confusion_matrix
from ..utils.logging import get_logger
logger = get_logger(__name__)
base_dir = os.path.dirname(__file__)
def extract_params(input_struct):
params = dict()
iterator = input_struct if isinstance(input_struct, dict) else input_struct.__dict__
for key in iterator:
if key.startswith(('_', 'autonlp_params')):
continue
value = iterator[key]
if type(value) in [bool, int, float, str]:
params[key] = value
elif value is None:
params[key] = None
elif hasattr(value, '__dict__') or isinstance(value, dict):
params[key] = extract_params(value)
else:
params[key] = str(type(value))
return params
def plot_roc_curve_image(data, path):
sns.set(style="whitegrid", font_scale=1.5)
plt.figure(figsize=(10, 10));
fpr, tpr, _ = roc_curve(data['y_true'], data['y_pred'])
auc_score = roc_auc_score(data['y_true'], data['y_pred'])
lw = 2
plt.plot(fpr, tpr, color='blue', lw=lw, label='Trained model');
plt.plot([0, 1], [0, 1], color='red', lw=lw, linestyle='--', label='Random model');
plt.xlim([-0.05, 1.05]);
plt.ylim([-0.05, 1.05]);
plt.xlabel('False Positive Rate');
plt.ylabel('True Positive Rate');
lgd = plt.legend(bbox_to_anchor=(0.5, -0.15), loc='upper center', ncol=2);
plt.xticks(np.arange(0, 1.01, 0.05), rotation=45);
plt.yticks(np.arange(0, 1.01, 0.05));
plt.grid(color='gray', linestyle='-', linewidth=1);
plt.title('ROC curve (GINI = {:.3f})'.format(2 * auc_score - 1));
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight');
plt.close()
return auc_score
def plot_pr_curve_image(data, path):
sns.set(style="whitegrid", font_scale=1.5)
plt.figure(figsize=(10, 10));
precision, recall, _ = precision_recall_curve(data['y_true'], data['y_pred'])
ap_score = average_precision_score(data['y_true'], data['y_pred'])
lw = 2
plt.plot(recall, precision, color='blue', lw=lw, label='Trained model');
positive_rate = np.sum(data['y_true'] == 1) / data.shape[0]
plt.plot([0, 1], [positive_rate, positive_rate], \
color='red', lw=lw, linestyle='--', label='Random model');
plt.xlim([-0.05, 1.05]);
plt.ylim([0.45, 1.05]);
plt.xlabel('Recall');
plt.ylabel('Precision');
lgd = plt.legend(bbox_to_anchor=(0.5, -0.15), loc='upper center', ncol=2);
plt.xticks(np.arange(0, 1.01, 0.05), rotation=45);
plt.yticks(np.arange(0, 1.01, 0.05));
plt.grid(color='gray', linestyle='-', linewidth=1);
plt.title('PR curve (AP = {:.3f})'.format(ap_score));
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight');
plt.close()
def plot_preds_distribution_by_bins(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(figsize=(16, 10))
box_plot_data = []
labels = []
for name, group in data.groupby('bin'):
labels.append(name)
box_plot_data.append(group['y_pred'].values)
box = axs.boxplot(box_plot_data, patch_artist=True, labels=labels)
for patch in box['boxes']:
patch.set_facecolor('green')
axs.set_yscale('log')
axs.set_xlabel('Bin number')
axs.set_ylabel('Prediction')
axs.set_title('Distribution of object predictions by bin')
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_distribution_of_logits(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(figsize=(16, 10))
data['proba_logit'] = np.log(data['y_pred'].values / (1 - data['y_pred'].values))
sns.kdeplot(data[data['y_true'] == 0]['proba_logit'], shade=True, color="r", label='Class 0 logits', ax=axs)
sns.kdeplot(data[data['y_true'] == 1]['proba_logit'], shade=True, color="g", label='Class 1 logits', ax=axs)
axs.set_xlabel('Logits')
axs.set_ylabel('Density')
axs.set_title('Logits distribution of object predictions (by classes)');
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_pie_f1_metric(data, F1_thresh, path):
tn, fp, fn, tp = confusion_matrix(data['y_true'], (data['y_pred'] > F1_thresh).astype(int)).ravel()
(_, prec), (_, rec), (_, F1), (_, _) = precision_recall_fscore_support(data['y_true'],
(data['y_pred'] > F1_thresh).astype(int))
sns.set(style="whitegrid", font_scale=1.5)
fig, ax = plt.subplots(figsize=(20, 10), subplot_kw=dict(aspect="equal"))
recipe = ["{} True Positives".format(tp),
"{} False Positives".format(fp),
"{} False Negatives".format(fn),
"{} True Negatives".format(tn)]
wedges, texts = ax.pie([tp, fp, fn, tn], wedgeprops=dict(width=0.5), startangle=-40)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
kw = dict(arrowprops=dict(arrowstyle="-", color='k'),
bbox=bbox_props, zorder=0, va="center")
for i, p in enumerate(wedges):
ang = (p.theta2 - p.theta1) / 2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
ax.annotate(recipe[i], xy=(x, y), xytext=(1.35 * np.sign(x), 1.4 * y),
horizontalalignment=horizontalalignment, **kw)
ax.set_title(
"Trained model: Precision = {:.2f}%, Recall = {:.2f}%, F1-Score = {:.2f}%".format(prec * 100, rec * 100, F1 * 100))
plt.savefig(path, bbox_inches='tight');
plt.close()
return prec, rec, F1
def f1_score_w_co(data, min_co=.01, max_co=.99, step=0.01):
data['y_pred'] = np.clip(np.ceil(data['y_pred'].values / step) * step, min_co, max_co)
pos = data['y_true'].sum()
neg = data['y_true'].shape[0] - pos
grp = pd.DataFrame(data).groupby('y_pred')['y_true'].agg(['sum', 'count'])
grp.sort_index(inplace=True)
grp['fp'] = grp['sum'].cumsum()
grp['tp'] = pos - grp['fp']
grp['tn'] = (grp['count'] - grp['sum']).cumsum()
grp['fn'] = neg - grp['tn']
grp['pr'] = grp['tp'] / (grp['tp'] + grp['fp'])
grp['rec'] = grp['tp'] / (grp['tp'] + grp['fn'])
grp['f1_score'] = 2 * (grp['pr'] * grp['rec']) / (grp['pr'] + grp['rec'])
best_score = grp['f1_score'].max()
best_co = grp.index.values[grp['f1_score'] == best_score].mean()
# print((y_pred < best_co).mean())
return best_score, best_co
def get_bins_table(data):
bins_table = data.groupby('bin').agg({'y_true': [len, np.mean], \
'y_pred': [np.min, np.mean, np.max]}).reset_index()
bins_table.columns = ['Bin number', 'Amount of objects', 'Mean target', \
'Min probability', 'Average probability', 'Max probability']
return bins_table.to_html(index=False)
# Regression plots:
def plot_target_distribution_1(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(2, 1, figsize=(16, 20))
sns.kdeplot(data['y_true'], shade=True, color="g", ax=axs[0])
axs[0].set_xlabel('Target value')
axs[0].set_ylabel('Density')
axs[0].set_title('Target distribution (y_true)');
sns.kdeplot(data['y_pred'], shade=True, color="r", ax=axs[1])
axs[1].set_xlabel('Target value')
axs[1].set_ylabel('Density')
axs[1].set_title('Target distribution (y_pred)');
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_target_distribution_2(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(figsize=(16, 10))
sns.kdeplot(data['y_true'], shade=True, color="g", label="y_true", ax=axs)
sns.kdeplot(data['y_pred'], shade=True, color="r", label="y_pred", ax=axs)
axs.set_xlabel('Target value')
axs.set_ylabel('Density')
axs.set_title('Target distribution');
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_target_distribution(data, path):
data_pred = pd.DataFrame({'Target value': data['y_pred']})
data_pred['source'] = 'y_pred'
data_true = pd.DataFrame({'Target value': data['y_true']})
data_true['source'] = 'y_true'
data = pd.concat([data_pred, data_true], ignore_index=True)
sns.set(style="whitegrid", font_scale=1.5)
g = sns.displot(data, x="Target value", row="source", height=9, aspect=1.5, kde=True, color="m",
facet_kws=dict(margin_titles=True))
g.fig.suptitle("Target distribution")
g.fig.tight_layout()
g.fig.subplots_adjust(top=0.95)
g.fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_error_hist(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, ax = plt.subplots(figsize=(16, 10))
g = sns.kdeplot(data['y_pred'] - data['y_true'], shade=True, color="m", ax=ax)
ax.set_xlabel('Error = y_pred - y_true')
ax.set_ylabel('Density')
ax.set_title('Error histogram');
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_reg_scatter(data, path):
sns.set(style="whitegrid", font_scale=1.5)
g = sns.jointplot(x="y_pred", y="y_true", data=data, \
kind="reg", truncate=False, color="m", \
height=14)
g.fig.suptitle("Scatter plot")
g.fig.tight_layout()
g.fig.subplots_adjust(top=0.95)
g.fig.savefig(path, bbox_inches='tight');
plt.close()
# Multiclass plots:
def plot_confusion_matrix(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, ax = plt.subplots(figsize=(16, 12))
cmat = confusion_matrix(data['y_true'], data['y_pred'], normalize='true')
g = sns.heatmap(cmat, annot=True, linewidths=.5, cmap='Purples', ax=ax)
ax.set_xlabel('y_pred')
ax.set_ylabel('y_true')
ax.set_title('Confusion matrix');
fig.savefig(path, bbox_inches='tight');
plt.close()
class ReportDeco:
"""
Decorator to wrap :class:`~lightautoml.automl.base.AutoML` class to generate html report on ``fit_predict`` and ``predict``.
Example:
>>> report_automl = ReportDeco(output_path='output_path', report_file_name='report_file_name')(automl).
>>> report_automl.fit_predict(train_data)
>>> report_automl.predict(test_data)
Report will be generated at output_path/report_file_name automatically.
Warning:
Do not use it just to inference (if you don't need report), because:
- It needs target variable to calc performance metrics.
- It takes additional time to generate report.
- Dump of decorated automl takes more memory to store.
To get unwrapped fitted instance to pickle
and inferecne access ``report_automl.model`` attribute.
"""
@property
def model(self):
"""Get unwrapped model.
Returns:
model.
"""
return self._model
@property
def mapping(self):
return self._model.reader.class_mapping
def __init__(self, *args, **kwargs):
"""
Note:
Valid kwargs are:
- output_path: Folder with report files.
- report_file_name: Name of main report file.
Args:
*args: Arguments.
**kwargs: Additional parameters.
"""
if not kwargs:
kwargs = {}
# self.task = kwargs.get('task', 'binary')
self.n_bins = kwargs.get('n_bins', 20)
self.template_path = kwargs.get('template_path', os.path.join(base_dir, 'lama_report_templates/'))
self.output_path = kwargs.get('output_path', 'lama_report/')
self.report_file_name = kwargs.get('report_file_name', 'lama_interactive_report.html')
if not os.path.exists(self.output_path):
os.makedirs(self.output_path, exist_ok=True)
self._base_template_path = 'lama_base_template.html'
self._model_section_path = 'model_section.html'
self._train_set_section_path = 'train_set_section.html'
self._results_section_path = 'results_section.html'
self._inference_section_path = {'binary': 'binary_inference_section.html', \
'reg': 'reg_inference_section.html', \
'multiclass': 'multiclass_inference_section.html'}
self.title = 'LAMA report'
self.sections_order = ['intro', 'model', 'train_set', 'results']
self._sections = {}
self._sections['intro'] = '<p>This report was generated automatically.</p>'
self._model_results = []
self.generate_report()
def __call__(self, model):
self._model = model
# AutoML only
self.task = self._model.task._name # valid_task_names = ['binary', 'reg', 'multiclass']
# add informataion to report
self._model_name = model.__class__.__name__
self._model_parameters = json2html.convert(extract_params(model))
self._model_summary = None
self._sections = {}
self._sections['intro'] = '<p>This report was generated automatically.</p>'
self._model_results = []
self._n_test_sample = 0
self._generate_model_section()
self.generate_report()
return self
def _binary_classification_details(self, data):
self._inference_content['sample_bins_table'] = get_bins_table(data)
prec, rec, F1 = plot_pie_f1_metric(data, self._F1_thresh, \
path=os.path.join(self.output_path, self._inference_content['pie_f1_metric']))
auc_score = plot_roc_curve_image(data, path=os.path.join(self.output_path, self._inference_content['roc_curve']))
plot_pr_curve_image(data, path=os.path.join(self.output_path, self._inference_content['pr_curve']))
plot_preds_distribution_by_bins(data, path=os.path.join(self.output_path, \
self._inference_content['preds_distribution_by_bins']))
plot_distribution_of_logits(data, path=os.path.join(self.output_path, \
self._inference_content['distribution_of_logits']))
return auc_score, prec, rec, F1
def _regression_details(self, data):
# graphics
plot_target_distribution(data, path=os.path.join(self.output_path, self._inference_content['target_distribution']))
plot_error_hist(data, path=os.path.join(self.output_path, self._inference_content['error_hist']))
plot_reg_scatter(data, path=os.path.join(self.output_path, self._inference_content['scatter_plot']))
# metrics
mean_ae = mean_absolute_error(data['y_true'], data['y_pred'])
median_ae = median_absolute_error(data['y_true'], data['y_pred'])
mse = mean_squared_error(data['y_true'], data['y_pred'])
r2 = r2_score(data['y_true'], data['y_pred'])
evs = explained_variance_score(data['y_true'], data['y_pred'])
return mean_ae, median_ae, mse, r2, evs
def _multiclass_details(self, data):
y_true = data['y_true']
y_pred = data['y_pred']
# precision
p_micro = precision_score(y_true, y_pred, average='micro')
p_macro = precision_score(y_true, y_pred, average='macro')
p_weighted = precision_score(y_true, y_pred, average='weighted')
# recall
r_micro = recall_score(y_true, y_pred, average='micro')
r_macro = recall_score(y_true, y_pred, average='macro')
r_weighted = recall_score(y_true, y_pred, average='weighted')
# f1-score
f_micro = f1_score(y_true, y_pred, average='micro')
f_macro = f1_score(y_true, y_pred, average='macro')
f_weighted = f1_score(y_true, y_pred, average='weighted')
# classification report for features
classes = sorted(self.mapping, key=self.mapping.get)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred)
cls_report = pd.DataFrame({'Class name': classes, 'Precision': p, 'Recall': r, 'F1-score': f, 'Support': s})
self._inference_content['classification_report'] = cls_report.to_html(index=False, float_format='{:.4f}'.format,
justify='left')
plot_confusion_matrix(data, path=os.path.join(self.output_path, self._inference_content['confusion_matrix']))
return [p_micro, p_macro, p_weighted, r_micro, r_macro, r_weighted, f_micro, f_macro, f_weighted]
def _collect_data(self, preds, sample):
data = pd.DataFrame({'y_true': sample[self._target].values})
if self.task in 'multiclass':
if self.mapping is not None:
data['y_true'] = np.array([self.mapping[y] for y in data['y_true'].values])
data['y_pred'] = preds._data.argmax(axis=1)
else:
data['y_pred'] = preds._data[:, 0]
data.sort_values('y_pred', ascending=False, inplace=True)
data['bin'] = (np.arange(data.shape[0]) / data.shape[0] * self.n_bins).astype(int)
# remove NaN in predictions:
data = data[~data['y_pred'].isnull()]
return data
def fit_predict(self, *args, **kwargs):
"""Wrapped ``automl.fit_predict`` method.
Valid args, kwargs are the same as wrapped automl.
Args:
*args: Arguments.
**kwargs: Additional parameters.
Returns:
OOF predictions.
"""
# TODO: parameters parsing in general case
preds = self._model.fit_predict(*args, **kwargs)
train_data = kwargs["train_data"] if "train_data" in kwargs else args[0]
input_roles = kwargs["roles"] if "roles" in kwargs else args[1]
self._target = input_roles['target']
valid_data = kwargs.get("valid_data", None)
if valid_data is None:
data = self._collect_data(preds, train_data)
else:
data = self._collect_data(preds, valid_data)
self._inference_content = {}
if self.task == 'binary':
# filling for html
self._inference_content = {}
self._inference_content['roc_curve'] = 'valid_roc_curve.png'
self._inference_content['pr_curve'] = 'valid_pr_curve.png'
self._inference_content['pie_f1_metric'] = 'valid_pie_f1_metric.png'
self._inference_content['preds_distribution_by_bins'] = 'valid_preds_distribution_by_bins.png'
self._inference_content['distribution_of_logits'] = 'valid_distribution_of_logits.png'
# graphics and metrics
_, self._F1_thresh = f1_score_w_co(data)
auc_score, prec, rec, F1 = self._binary_classification_details(data)
# update model section
evaluation_parameters = ['AUC-score', \
'Precision', \
'Recall', \
'F1-score']
self._model_summary = pd.DataFrame({'Evaluation parameter': evaluation_parameters, \
'Validation sample': [auc_score, prec, rec, F1]})
elif self.task == 'reg':
# filling for html
self._inference_content['target_distribution'] = 'valid_target_distribution.png'
self._inference_content['error_hist'] = 'valid_error_hist.png'
self._inference_content['scatter_plot'] = 'valid_scatter_plot.png'
# graphics and metrics
mean_ae, median_ae, mse, r2, evs = self._regression_details(data)
# model section
evaluation_parameters = ['Mean absolute error', \
'Median absolute error', \
'Mean squared error', \
'R^2 (coefficient of determination)', \
'Explained variance']
self._model_summary = pd.DataFrame({'Evaluation parameter': evaluation_parameters, \
'Validation sample': [mean_ae, median_ae, mse, r2, evs]})
elif self.task == 'multiclass':
self._inference_content['confusion_matrix'] = 'valid_confusion_matrix.png'
index_names = np.array([['Precision', 'Recall', 'F1-score'], \
['micro', 'macro', 'weighted']])
index = pd.MultiIndex.from_product(index_names, names=['Evaluation metric', 'Average'])
summary = self._multiclass_details(data)
self._model_summary = pd.DataFrame({'Validation sample': summary}, index=index)
self._inference_content['title'] = 'Results on validation sample'
self._generate_model_section()
# generate train data section
self._train_data_overview = self._data_genenal_info(train_data)
self._describe_roles(train_data)
self._describe_dropped_features(train_data)
self._generate_train_set_section()
# generate fit_predict section
self._generate_inference_section(data)
self.generate_report()
return preds
def predict(self, *args, **kwargs):
"""Wrapped automl.predict method.
Valid args, kwargs are the same as wrapped automl.
Args:
*args: arguments.
**kwargs: additional parameters.
Returns:
predictions.
"""
self._n_test_sample += 1
# get predictions
test_preds = self._model.predict(*args, **kwargs)
test_data = kwargs["test"] if "test" in kwargs else args[0]
data = self._collect_data(test_preds, test_data)
if self.task == 'binary':
# filling for html
self._inference_content = {}
self._inference_content['roc_curve'] = 'test_roc_curve_{}.png'.format(self._n_test_sample)
self._inference_content['pr_curve'] = 'test_pr_curve_{}.png'.format(self._n_test_sample)
self._inference_content['pie_f1_metric'] = 'test_pie_f1_metric_{}.png'.format(self._n_test_sample)
self._inference_content['bins_preds'] = 'test_bins_preds_{}.png'.format(self._n_test_sample)
self._inference_content['preds_distribution_by_bins'] = 'test_preds_distribution_by_bins_{}.png'.format(
self._n_test_sample)
self._inference_content['distribution_of_logits'] = 'test_distribution_of_logits_{}.png'.format(self._n_test_sample)
# graphics and metrics
auc_score, prec, rec, F1 = self._binary_classification_details(data)
if self._n_test_sample >= 2:
self._model_summary['Test sample {}'.format(self._n_test_sample)] = [auc_score, prec, rec, F1]
else:
self._model_summary['Test sample'] = [auc_score, prec, rec, F1]
elif self.task == 'reg':
# filling for html
self._inference_content = {}
self._inference_content['target_distribution'] = 'test_target_distribution_{}.png'.format(self._n_test_sample)
self._inference_content['error_hist'] = 'test_error_hist_{}.png'.format(self._n_test_sample)
self._inference_content['scatter_plot'] = 'test_scatter_plot_{}.png'.format(self._n_test_sample)
# graphics
mean_ae, median_ae, mse, r2, evs = self._regression_details(data)
# update model section
if self._n_test_sample >= 2:
self._model_summary['Test sample {}'.format(self._n_test_sample)] = [mean_ae, median_ae, mse, r2, evs]
else:
self._model_summary['Test sample'] = [mean_ae, median_ae, mse, r2, evs]
elif self.task == 'multiclass':
self._inference_content['confusion_matrix'] = 'test_confusion_matrix_{}.png'.format(self._n_test_sample)
test_summary = self._multiclass_details(data)
if self._n_test_sample >= 2:
self._model_summary['Test sample {}'.format(self._n_test_sample)] = test_summary
else:
self._model_summary['Test sample'] = test_summary
# layout depends on number of test samples
if self._n_test_sample >= 2:
self._inference_content['title'] = 'Results on test sample {}'.format(self._n_test_sample)
else:
self._inference_content['title'] = 'Results on test sample'
# update model section
self._generate_model_section()
# generate predict section
self._generate_inference_section(data)
self.generate_report()
return test_preds
def _data_genenal_info(self, data):
general_info = pd.DataFrame(columns=['Parameter', 'Value'])
general_info.loc[0] = ('Number of records', data.shape[0])
general_info.loc[1] = ('Total number of features', data.shape[1])
general_info.loc[2] = ('Used features', len(self._model.reader._used_features))
general_info.loc[3] = ('Dropped features', len(self._model.reader._dropped_features))
# general_info.loc[4] = ('Number of positive cases', np.sum(data[self._target] == 1))
# general_info.loc[5] = ('Number of negative cases', np.sum(data[self._target] == 0))
return general_info.to_html(index=False, justify='left')
def _describe_roles(self, train_data):
# detect feature roles
roles = self._model.reader._roles
numerical_features = [feat_name for feat_name in roles if roles[feat_name].name == 'Numeric']
categorical_features = [feat_name for feat_name in roles if roles[feat_name].name == 'Category']
datetime_features = [feat_name for feat_name in roles if roles[feat_name].name == 'Datetime']
# numerical roles
numerical_features_df = []
for feature_name in numerical_features:
item = {'Feature name': feature_name}
item['NaN ratio'] = "{:.4f}".format(train_data[feature_name].isna().sum() / train_data.shape[0])
values = train_data[feature_name].dropna().values
item['min'] = np.min(values)
item['quantile_25'] = np.quantile(values, 0.25)
item['average'] = np.mean(values)
item['median'] = np.median(values)
item['quantile_75'] = np.quantile(values, 0.75)
item['max'] = np.max(values)
numerical_features_df.append(item)
if numerical_features_df == []:
self._numerical_features_table = None
else:
self._numerical_features_table = pd.DataFrame(numerical_features_df).to_html(index=False,
float_format='{:.2f}'.format,
justify='left')
# categorical roles
categorical_features_df = []
for feature_name in categorical_features:
item = {'Feature name': feature_name}
item['NaN ratio'] = "{:.4f}".format(train_data[feature_name].isna().sum() / train_data.shape[0])
value_counts = train_data[feature_name].value_counts(normalize=True)
values = value_counts.index.values
counts = value_counts.values
item['Number of unique values'] = len(counts)
item['Most frequent value'] = values[0]
item['Occurance of most frequent'] = "{:.1f}%".format(100 * counts[0])
item['Least frequent value'] = values[-1]
item['Occurance of least frequent'] = "{:.1f}%".format(100 * counts[-1])
categorical_features_df.append(item)
if categorical_features_df == []:
self._categorical_features_table = None
else:
self._categorical_features_table = pd.DataFrame(categorical_features_df).to_html(index=False, justify='left')
# datetime roles
datetime_features_df = []
for feature_name in datetime_features:
item = {'Feature name': feature_name}
item['NaN ratio'] = "{:.4f}".format(train_data[feature_name].isna().sum() / train_data.shape[0])
values = train_data[feature_name].dropna().values
item['min'] = np.min(values)
item['max'] = np.max(values)
item['base_date'] = self._model.reader._roles[feature_name].base_date
datetime_features_df.append(item)
if datetime_features_df == []:
self._datetime_features_table = None
else:
self._datetime_features_table = | pd.DataFrame(datetime_features_df) | pandas.DataFrame |
import torch
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import tqdm
from rec.model.pinsage import PinSage
from rec.datasets.movielens import MovieLens
from rec.utils import cuda
from dgl import DGLGraph
import argparse
import pickle
import os
parser = argparse.ArgumentParser()
parser.add_argument('--opt', type=str, default='SGD')
parser.add_argument('--lr', type=float, default=1)
parser.add_argument('--sched', type=str, default='none')
parser.add_argument('--layers', type=int, default=2)
parser.add_argument('--use-feature', action='store_true')
parser.add_argument('--sgd-switch', type=int, default=-1)
parser.add_argument('--n-negs', type=int, default=1)
parser.add_argument('--loss', type=str, default='hinge')
parser.add_argument('--hard-neg-prob', type=float, default=0)
args = parser.parse_args()
print(args)
cache_file = 'ml.pkl'
if os.path.exists(cache_file):
with open(cache_file, 'rb') as f:
ml = pickle.load(f)
else:
ml = MovieLens('./ml-1m')
with open(cache_file, 'wb') as f:
pickle.dump(ml, f)
g = ml.g
neighbors = ml.user_neighbors + ml.movie_neighbors
n_hidden = 100
n_layers = args.layers
batch_size = 256
margin = 0.9
n_negs = args.n_negs
hard_neg_prob = args.hard_neg_prob
sched_lambda = {
'none': lambda epoch: 1,
'decay': lambda epoch: max(0.98 ** epoch, 1e-4),
}
loss_func = {
'hinge': lambda diff: (diff + margin).clamp(min=0).mean(),
'bpr': lambda diff: (1 - torch.sigmoid(-diff)).mean(),
}
model = cuda(PinSage(
g.number_of_nodes(),
[n_hidden] * (n_layers + 1),
20,
0.5,
10,
use_feature=args.use_feature,
G=g,
))
opt = getattr(torch.optim, args.opt)(model.parameters(), lr=args.lr)
sched = torch.optim.lr_scheduler.LambdaLR(opt, sched_lambda[args.sched])
def forward(model, g_prior, nodeset, train=True):
if train:
return model(g_prior, nodeset)
else:
with torch.no_grad():
return model(g_prior, nodeset)
def filter_nid(nids, nid_from):
nids = [nid.numpy() for nid in nids]
nid_from = nid_from.numpy()
np_mask = np.logical_and(*[np.isin(nid, nid_from) for nid in nids])
return [torch.from_numpy(nid[np_mask]) for nid in nids]
def runtrain(g_prior_edges, g_train_edges, train):
global opt
if train:
model.train()
else:
model.eval()
g_prior_src, g_prior_dst = g.find_edges(g_prior_edges)
g_prior = DGLGraph()
g_prior.add_nodes(g.number_of_nodes())
g_prior.add_edges(g_prior_src, g_prior_dst)
g_prior.ndata.update({k: cuda(v) for k, v in g.ndata.items()})
edge_batches = g_train_edges[torch.randperm(g_train_edges.shape[0])].split(batch_size)
with tqdm.tqdm(edge_batches) as tq:
sum_loss = 0
sum_acc = 0
count = 0
for batch_id, batch in enumerate(tq):
count += batch.shape[0]
src, dst = g.find_edges(batch)
dst_neg = []
for i in range(len(dst)):
if np.random.rand() < hard_neg_prob:
nb = torch.LongTensor(neighbors[dst[i].item()])
mask = ~(g.has_edges_between(nb, src[i].item()).byte())
dst_neg.append(np.random.choice(nb[mask].numpy(), n_negs))
else:
dst_neg.append(np.random.randint(
len(ml.user_ids), len(ml.user_ids) + len(ml.movie_ids), n_negs))
dst_neg = torch.LongTensor(dst_neg)
dst = dst.view(-1, 1).expand_as(dst_neg).flatten()
src = src.view(-1, 1).expand_as(dst_neg).flatten()
dst_neg = dst_neg.flatten()
mask = (g_prior.in_degrees(dst_neg) > 0) & \
(g_prior.in_degrees(dst) > 0) & \
(g_prior.in_degrees(src) > 0)
src = src[mask]
dst = dst[mask]
dst_neg = dst_neg[mask]
if len(src) == 0:
continue
nodeset = cuda(torch.cat([src, dst, dst_neg]))
src_size, dst_size, dst_neg_size = \
src.shape[0], dst.shape[0], dst_neg.shape[0]
h_src, h_dst, h_dst_neg = (
forward(model, g_prior, nodeset, train)
.split([src_size, dst_size, dst_neg_size]))
diff = (h_src * (h_dst_neg - h_dst)).sum(1)
loss = loss_func[args.loss](diff)
acc = (diff < 0).sum()
assert loss.item() == loss.item()
grad_sqr_norm = 0
if train:
opt.zero_grad()
loss.backward()
for name, p in model.named_parameters():
assert (p.grad != p.grad).sum() == 0
grad_sqr_norm += p.grad.norm().item() ** 2
opt.step()
sum_loss += loss.item()
sum_acc += acc.item() / n_negs
avg_loss = sum_loss / (batch_id + 1)
avg_acc = sum_acc / count
tq.set_postfix({'loss': '%.6f' % loss.item(),
'avg_loss': '%.3f' % avg_loss,
'avg_acc': '%.3f' % avg_acc,
'grad_norm': '%.6f' % np.sqrt(grad_sqr_norm)})
return avg_loss, avg_acc
def runtest(g_prior_edges, validation=True):
model.eval()
n_users = len(ml.users.index)
n_items = len(ml.movies.index)
g_prior_src, g_prior_dst = g.find_edges(g_prior_edges)
g_prior = DGLGraph()
g_prior.add_nodes(g.number_of_nodes())
g_prior.add_edges(g_prior_src, g_prior_dst)
g_prior.ndata.update({k: cuda(v) for k, v in g.ndata.items()})
hs = []
with torch.no_grad():
with tqdm.trange(n_users + n_items) as tq:
for node_id in tq:
nodeset = cuda(torch.LongTensor([node_id]))
h = forward(model, g_prior, nodeset, False)
hs.append(h)
h = torch.cat(hs, 0)
rr = []
with torch.no_grad():
with tqdm.trange(n_users) as tq:
for u_nid in tq:
uid = ml.user_ids[u_nid]
pids_exclude = ml.ratings[
(ml.ratings['user_id'] == uid) &
(ml.ratings['train'] | ml.ratings['test' if validation else 'valid'])
]['movie_id'].values
pids_candidate = ml.ratings[
(ml.ratings['user_id'] == uid) &
ml.ratings['valid' if validation else 'test']
]['movie_id'].values
pids = np.setdiff1d(ml.movie_ids, pids_exclude)
p_nids = np.array([ml.movie_ids_invmap[pid] for pid in pids])
p_nids_candidate = np.array([ml.movie_ids_invmap[pid] for pid in pids_candidate])
dst = torch.from_numpy(p_nids) + n_users
src = torch.zeros_like(dst).fill_(u_nid)
h_dst = h[dst]
h_src = h[src]
score = (h_src * h_dst).sum(1)
score_sort_idx = score.sort(descending=True)[1].cpu().numpy()
rank_map = {v: i for i, v in enumerate(p_nids[score_sort_idx])}
rank_candidates = np.array([rank_map[p_nid] for p_nid in p_nids_candidate])
rank = 1 / (rank_candidates + 1)
rr.append(rank.mean())
tq.set_postfix({'rank': rank.mean()})
return np.array(rr)
def train():
global opt, sched
best_mrr = 0
for epoch in range(500):
ml.refresh_mask()
g_prior_edges = g.filter_edges(lambda edges: edges.data['prior'])
g_train_edges = g.filter_edges(lambda edges: edges.data['train'] & ~edges.data['inv'])
g_prior_train_edges = g.filter_edges(
lambda edges: edges.data['prior'] | edges.data['train'])
print('Epoch %d validation' % epoch)
with torch.no_grad():
valid_mrr = runtest(g_prior_train_edges, True)
if best_mrr < valid_mrr.mean():
best_mrr = valid_mrr.mean()
torch.save(model.state_dict(), 'model.pt')
print(pd.Series(valid_mrr).describe())
print('Epoch %d test' % epoch)
with torch.no_grad():
test_mrr = runtest(g_prior_train_edges, False)
print( | pd.Series(test_mrr) | pandas.Series |
import pandas as pd
import numpy as np
import math
import warnings
import scipy.stats as st
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
from .utils import *
LULC_COLORS_DEFAULT = pd.DataFrame(
columns=['lulc', 'color'],
data=[
['Pasture','#ffc100'],
['Annual crop','#D5A6BD'],
['Tree plantation','#935132'],
['Semi-perennial crop','#C27BA0'],
['Urban infrastructure','#af2a2a'],
['Wetland','#18b08d'],
['Grassland formation','#B8AF4F'],
['Forest formation','#006400'],
['Savanna formation','#32CD32'],
['Water','#0000FF'],
['Other','#5f5f5f'],
['Perennial crop','#D5A6BD'],
['Other non-forest natural formation','#BDB76B']
]
)
class Area_Estimator:
def __init__(self, samples, weight_col, strata_col, lulc_col, id_col,
year_col, pixel_size, confidence_interval, lulc_colors=None, verbose = True):
self.samples = samples
self.weight_col = weight_col
self.lulc_col = lulc_col
self.id_col = id_col
self.year_col = year_col
self.pixel_size = pixel_size
self.strata_col = strata_col
self.verbose = verbose
self.confidence_interval = confidence_interval
# Density points according the confidence interval
self.std_norm = round(st.norm.ppf(1 - ( 1 - confidence_interval ) / 2), 2)
# Area in hectares
self.pixel_area = (self.pixel_size * self.pixel_size) / 10000
self.lulc_list = self._unique_vals(self.lulc_col)
self.year_list = self._unique_vals(self.year_col)
# Min and max years for the lulc change analysis
self.min_year = self.samples[self.year_col].min()
self.max_year = self.samples[self.year_col].max()
if lulc_colors is None:
self.lulc_colors = LULC_COLORS_DEFAULT
else:
self.lulc_colors = lulc_colors
def _unique_vals(self, column):
return list(self.samples[column].unique())
def _verbose(self, *args, **kwargs):
if self.verbose:
ttprint(*args, **kwargs)
def _population(self, samples):
population = (1 * samples[self.weight_col]).sum()
return population, (population * self.pixel_area)
def _calc_se(self, samples, mask, population):
def _strata_variance(samples_strata, var_map_correct_s):
nsamples_s, _ = samples_strata.shape
population_s = (1 * samples_strata[self.weight_col]).sum()
strata_var = 0
if population_s > 0:
strata_var = math.pow(population_s,2) \
* (1 - nsamples_s / population_s) \
* var_map_correct_s / nsamples_s
return strata_var
args = []
var_map_correct_s = np.var(mask.astype('int'))
for name, samples_strata in samples.groupby(self.strata_col):
args.append((samples_strata, var_map_correct_s))
glob_var = 0
for strata_var in do_parallel(_strata_variance, args, backend='threading'):
glob_var += strata_var
glob_var = 1 / math.pow(population, 2) * glob_var
glob_se = self.std_norm * math.sqrt(glob_var)
return glob_se
def _calc_area(self, samples, year, value_col, value_list, region_label):
result = []
for value in value_list:
try:
lulc_mask = (samples[value_col] == value)
samples.loc[:, 'ESTIMATOR'] = 0
samples.loc[lulc_mask, 'ESTIMATOR'] = 1
population, area_population = self._population(samples)
lulc_proportion = ((samples['ESTIMATOR'] * samples[self.weight_col]).sum()) / population
lulc_se = self._calc_se(samples, lulc_mask, population)
lulc_area = lulc_proportion * area_population
result.append([value, lulc_area, lulc_proportion, lulc_se, year, region_label])
except:
self._verbose(f'_calc_area ERROR for value_col={value_col} value={value}, ' + \
'year={year}, region_label={region_label} ')
continue
return result
def _filter_samples(self, region_filter = None):
return self.samples if (region_filter is None) else self.samples[region_filter]
def _valid_year_range(self, year, n_years, backward = False):
step = -1 if backward else 1
start_year = year + step
end_year = year + (n_years * step) + 1
end_year = self.min_year - 1 if end_year < self.min_year else end_year
end_year = self.max_year + 1 if end_year > self.max_year else end_year
#if start_year == end_year:
# return [ year ]
#else:
result =[ y for y in range(start_year, end_year, step) ]
return result
def _change_mask(self, samples, lulc_arr, year, past_arr, past_nyears, future_arr, future_nyears):
past_years = self._valid_year_range(year, past_nyears, backward = True)
future_years = self._valid_year_range(year, future_nyears, backward = False)
# Considering all the samples
past_mask = np.logical_and(
self.samples[self.lulc_col].isin(past_arr),
self.samples[self.year_col].isin(past_years)
)
#print(past_arr, past_years, np.unique(past_mask, return_counts=True))
# Considering all the samples
future_mask = np.logical_and(
self.samples[self.lulc_col].isin(future_arr),
self.samples[self.year_col].isin(future_years)
)
past_fur_mask = np.logical_or(past_mask, future_mask)
n_years = len(past_years) + len(future_years)
# Considering all the samples
samples_ids = self.samples[[self.id_col]].copy()
samples_ids['past_fur_mask'] = 0
samples_ids['past_fur_mask'].loc[past_fur_mask] = 1
past_fur_agg = samples_ids[past_fur_mask][['id', 'past_fur_mask']].groupby('id').sum()
past_fur_ids = past_fur_agg[past_fur_agg['past_fur_mask'] == n_years].index
# Considering samples passed as params
change_mask = np.logical_and(
samples[self.lulc_col].isin(lulc_arr),
samples[self.id_col].isin(past_fur_ids)
)
#print('change_mask', samples.shape)
change_mask = np.logical_and(change_mask, samples[self.year_col] == year)
#print(np.unique(change_mask, return_counts=True))
return change_mask
def lulc(self, lulc = None, year = None, region_label = 'Brazil', region_filter = None):
args = []
_samples = self._filter_samples(region_filter)
_lulc_list = self.lulc_list if (lulc is None) else [lulc]
_year_list = self.year_list if (year is None) else [year]
result = []
self._verbose(f'Estimating area of {len(_lulc_list)} LULC classes for {region_label} ({len(_year_list)} years)')
for _year in _year_list:
year_samples = _samples[_samples[self.year_col] == _year]
args.append((year_samples, _year, self.lulc_col, _lulc_list, region_label))
result = []
for year_result in do_parallel(self._calc_area, args):
result += year_result
self._verbose(f'Finished')
result = | pd.DataFrame(result, columns=['lulc', 'area_ha', 'proportion', 'se', 'year', 'region']) | pandas.DataFrame |
"""the simple baseline for autograph"""
import random
import os
import joblib
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import torch_geometric.utils as gtils
from collections import defaultdict
from torch_geometric.data import Data
from sklearn.model_selection import train_test_split
from scipy.stats import gmean
from models import *
from models import MODEL_PARAMETER_LIB
from utils import *
from preprocessing import *
from config import Config
from utils.ensemble import get_top_models_by_std, get_top_models_by_r
from utils.drop_edge import DropEdgeEachStep
import copy
import gc
def fix_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
logger = get_logger("INFO", use_error_log=True)
class Model:
def __init__(self):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.config = None
self.metadata = {}
self._num_nodes = None
self._origin_graph_data_indices = None
self._valid_indices = None
self._valid_mask = None
self._train_indices = None
self._train_mask = None
self._test_mask = None
self._sampler = None
self._n_class = None
self.y_train = None
self.models_topK = defaultdict(list)
self.used_model_num = 0
self.citation_configs = ['a', 'b', 'demo', 'coauthor-cs', 'coauthor-phy', 'phy10000']
self.use_adaptive_topK = True
def load_config(self, data, n_class):
dir_path = os.path.dirname(__file__)
try:
tree = joblib.load(f"{dir_path}/meta.model")
encoder = joblib.load(f"{dir_path}/meta.encoder")
# pd.set_option('display.max_columns', None)
meta_info = pd.Series(
extract_graph_feature(data, n_class)
)
logger.info("meta_info:\n {}".format(meta_info))
meta_info = | pd.DataFrame([meta_info]) | pandas.DataFrame |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, scalar='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, scalar='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
- ``_low`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [0, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 0, 0],
'b': [0, -1, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
- ``_high`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/+4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [6, 6, 4],
'b': [7, 7, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_low`` = ['a', 'c'].
- ``_high`` = ['b'].
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(-2).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high=['b'], strict=True)
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'c#']
instance._columns_to_reconstruct = ['a', 'c']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(1)] * 3,
'c#': [np.log(1)] * 3,
})
out = instance.reverse_transform(transformed)
print(out)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_positive(self):
"""Test the ``GreaterThan.reverse_transform`` method for positive constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(4)],
'b#': [np.log(5), np.log(6), np.log(0)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 0],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_negative(self):
"""Test the ``GreaterThan.reverse_transform`` method for negative constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [-1, -2, 1],
'b': [-4, -5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(0)],
'b#': [np.log(5), np.log(6), np.log(2)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [-1, -2, 0],
'b': [-4, -5, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_scalar`` variable to ``'low'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = None
"""
# Run
instance = Positive(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Positive.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'high' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = 'high'
"""
# Run
instance = Positive(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_scalar`` variable to ``'high'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = None
"""
# Run
instance = Negative(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Negative.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'low' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = 'low'
"""
# Run
instance = Negative(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
if data['a'] is None or data['b'] is None:
return None
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance,
import the formula to use for the computation, and
set the specified constraint column.
Input:
- column = 'col'
- formula = new_column
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
assert instance.constraint_columns == ('col', )
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column,
handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_with_nans(self):
"""Test the ``ColumnFormula.is_valid`` method for with a formula that produces nans.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, None],
'c': [5, 7, None]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test__transform(self):
"""Test the ``ColumnFormula._transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_without_dropping_column(self):
"""Test the ``ColumnFormula._transform`` method without dropping the column.
If `drop_column` is false, expect to not drop the constraint column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column, drop_column=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_missing_column(self):
"""Test the ``ColumnFormula._transform`` method when the constraint column is missing.
When ``_transform`` is called with data that does not contain the constraint column,
expect to return the data as-is.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, unchanged (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_fit_only_one_datetime_arg(self):
"""Test the ``Between.fit`` method by passing in only one arg as datetime.
If only one of the bound parameters is a datetime type, expect a ValueError.
Input:
- low is an int scalar
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
column = 'a'
low = 0.0
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high)
# Run and assert
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
with pytest.raises(ValueError):
instance.fit(table_data)
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_scalar_column(self):
"""Test the ``Between._transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_scalar(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_datetime(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as datetimes.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
- High and Low as datetimes
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_column(self):
"""Test the ``Between._transform`` method with ``low`` as datetime and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as datetime.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a#b#2021-01-01T00:00:00.000000000': transform(
table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` and ``high`` as datetime columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as scalar.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_column(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_datetime_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as datetime.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
- High and low as datetimes
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_series_equal(expected_out['b'], out['b'])
pd.testing.assert_series_equal(expected_out['a'], out['a'].astype('datetime64[ms]'))
def test_reverse_transform_datetime_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as datetime and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-02'),
pd.to_datetime('2020-08-03'),
]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as datetime.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-03'),
pd.to_datetime('2020-08-04'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
| pd.to_datetime('2020-02-01') | pandas.to_datetime |
from datetime import datetime
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas._testing as tm
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
msg = "stop passing 'keep_tz'"
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
assert msg in str(m[0].message)
# convert to utc
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = | date_range("2013", periods=6, freq="A", tz="Asia/Tokyo") | pandas.date_range |
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from MyAIGuide.utilities.dataFrameUtilities import (
subset_period,
insert_data_to_tracker_mean_steps,
adjust_var_and_place_in_data,
insert_rolling_mean_columns,
insert_relative_values_columns
)
def create_test_dataframe(start_date, num_periods):
"""This creates a dummy dataframe for testing. It has date index
starting at given start date with a number of periods.
Params:
start_date: initial date for the index
num_periods: number of index values
"""
i = pd.date_range(start_date, periods=num_periods, freq='1D')
sLength = len(i)
empty = pd.Series(np.zeros(sLength)).values
d = {
'col1': empty + 1,
'col2': empty + 3,
'tracker_mean_steps': empty
}
return pd.DataFrame(data=d, index=i)
def test_subset_period():
# create empty (full of 0s) test dataframe
test_data = create_test_dataframe('2020-07-01', 4)
# only 1 day
period1 = ('2020-07-01', '2020-07-01')
# usual period of more than 1 day
period2 = ('2020-07-01', '2020-07-02')
# wrong period with start_date > end_date
period3 = ('2020-07-01', '2020-06-30')
# generate expected dataframes
expected_data1 = create_test_dataframe('2020-07-01', 1)
expected_data2 = create_test_dataframe('2020-07-01', 2)
expected_data3 = create_test_dataframe('2020-07-01', 0)
# run the function with the test data
result1 = subset_period(test_data, period1[0], period1[1])
result2 = subset_period(test_data, period2[0], period2[1])
# attention, function does not raise warning when start_date > end_date
result3 = subset_period(test_data, period3[0], period3[1])
# compare results and expected dataframes
assert_frame_equal(result1, expected_data1)
assert_frame_equal(result2, expected_data2)
assert_frame_equal(result3, expected_data3)
def test_insert_data_to_tracker_mean_steps():
# create empty (full of 0s) test dataframe
test_data = create_test_dataframe('2020-07-01', 4)
# only 1 day
period1 = ('2020-07-01', '2020-07-01')
# usual period of more than 1 day
period2 = ('2020-07-01', '2020-07-02')
# wrong period with start_date > end_date
period3 = ('2020-07-01', '2020-06-30')
# generate expected dataframes
expected_data1 = create_test_dataframe('2020-07-01', 4)
expected_data1['tracker_mean_steps'] = [1.0, 0.0, 0.0, 0.0]
expected_data2 = create_test_dataframe('2020-07-01', 4)
expected_data2['tracker_mean_steps'] = [1.0, 1.0, 0.0, 0.0]
expected_data3 = create_test_dataframe('2020-07-01', 4)
# run the function with the test data
result1 = insert_data_to_tracker_mean_steps(period1, test_data, 'col1', 'tracker_mean_steps')
result2 = insert_data_to_tracker_mean_steps(period2, test_data, 'col1', 'tracker_mean_steps')
# attention, function does not raise warning when start_date > end_date
result3 = insert_data_to_tracker_mean_steps(period3, test_data, 'col1', 'tracker_mean_steps')
# compare results and expected dataframes
assert_frame_equal(result1, expected_data1)
assert_frame_equal(result2, expected_data2)
assert_frame_equal(result3, expected_data3)
def test_adjust_var_and_place_in_data():
# create empty (full of 0s) test dataframe
test_data = create_test_dataframe('2020-07-01', 4)
# only 1 day
period1 = ('2020-07-01', '2020-07-01')
# usual period of more than 1 day
period2 = ('2020-07-01', '2020-07-02')
# generate expected dataframes
expected_data1 = create_test_dataframe('2020-07-01', 4)
expected_data1['tracker_mean_steps'] = [3.0, 0.0, 0.0, 0.0]
expected_data2 = create_test_dataframe('2020-07-01', 4)
expected_data2['tracker_mean_steps'] = [3.0, 3.0, 0.0, 0.0]
# run the function with the test data
result1 = adjust_var_and_place_in_data(period1, test_data, 'col1', 'col2', 'tracker_mean_steps')
result2 = adjust_var_and_place_in_data(period2, test_data, 'col1', 'col2', 'tracker_mean_steps')
# compare results and expected dataframes
| assert_frame_equal(result1, expected_data1) | pandas.testing.assert_frame_equal |
#%%
import pandas as pd
import numpy as np
import requests
from datetime import datetime as dt
from io import StringIO
import os
import us
import git
from functools import reduce
from datetime import datetime, timedelta, date
#%%
def clean_df(df, date):
"""Cleans up dataframe to get only US counties (i.e. things with FIPS)"""
df.dropna(subset=['FIPS', 'Admin2'], inplace=True)
pd.options.mode.chained_assignment = None
df = df[[df.columns[0]] + list(df.columns[-5:-1])]
df.loc[:, 'Date'] = date
return df
# list all dates between two dates
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield (start_date + timedelta(n)).strftime('%m-%d-%Y')
# urls for data in Johns Hopkins github repository
urls = {}
# Note that JHU only started reporting county information from this date
# Which is why we start our query from 3/23/2020
start_date = date(2020, 3, 23)
end_date = date.today()
for d in daterange(start_date, end_date):
urls[d] = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/" + d + ".csv"
# Initialize dictionary to save output dataframes
output_dfs = {}
# Loop urls
for condition, url in urls.items():
# Obtain data
request = requests.get(url)
# Convert into string
txt = StringIO(request.text)
# Convert into dataframe
df = pd.read_csv(txt)
# Add to dictionary
output_dfs[condition] = clean_df(df, condition)
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
datadir = f"{homedir}/data/us/covid/"
dfs = list(output_dfs.values())
dfs = | pd.concat(dfs) | pandas.concat |
#
# Copyright (C) 2014 Xinguard Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -*- coding: utf-8 -*-
import pandas as pd
import os
import re
from flask import Flask
from flask import render_template, jsonify, request, g, abort, redirect, url_for
import json
#from xinui.rest import Rest
from rest import Rest
app = Flask(__name__)
global_flow_table = {}
# Summary page
@app.route("/")
def summary():
sw_desc = Rest.get_switch_desc()
if sw_desc is False:
abort(404)
return render_template("summary.html", sw_desc=sw_desc)
# Policy page
@app.route("/policy/")
def policy():
sw_dpid_list = Rest.get_switch_list()
if sw_dpid_list is False:
abort(404)
return render_template("policy.html", sw_dpid_list=sw_dpid_list)
@app.route("/policy/<dpid>")
def dpid_policy(dpid):
sw_dpid_list = Rest.get_switch_list()
if sw_dpid_list is False or int(dpid) not in sw_dpid_list:
abort(404)
flow_table = None
flow_table = Rest.get_flow_table(dpid)
global global_flow_table
global_flow_table = flow_table
port = Rest.get_switch_port(dpid)
return render_template("policy.html", sw_dpid_list=sw_dpid_list, dpid=dpid, port=port, flow_table=flow_table)
# Topology page
@app.route("/topology")
def topology():
topo = Rest.get_topology()
return render_template("topology.html", topo=topo)
@app.route("/_query_flow/")
def query_flow(dpid):
#dpid = request.args.get("dpid")
flow_table = Rest.get_flow_table(dpid)
#print flow_table
return render_template("policy.html", flow_table=flow_table)
#return jsonify(flow_table)
@app.route("/_query_port")
def query_port():
dpid = request.args.get("dpid")
port = Rest.get_switch_port(dpid)
return jsonify(port)
@app.route("/_add_flow", methods=["POST"])
def add_flow():
req = json.loads(request.form["flow_cmd"])
flow_cmd = {}
match_dict = {}
action_list = []
flow_cmd["dpid"] = req["common"]["dpid"]
flow_cmd["priority"] = req["common"]["priority"]
flow_cmd["idle_timeout"] = req["common"]["idle"]
flow_cmd["hard_timeout"] = req["common"]["hard"]
if req["match"]["input"] != "Any":
match_dict["in_port"] = int(req["match"]["input"])
if req["match"]["dl_saddr"] != "None":
match_dict["dl_src"] = req["match"]["dl_saddr"]
if req["match"]["dl_daddr"] != "None":
match_dict["dl_dst"] = req["match"]["dl_daddr"]
if req["match"]["nw_saddr"] != "None":
match_dict["nw_src"] = req["match"]["nw_saddr"]
if req["match"]["nw_daddr"] != "None":
match_dict["nw_dst"] = req["match"]["nw_daddr"]
if req["match"]["nw_saddr"] != "None" or req["match"]["nw_daddr"] != "None" or req["match"]["l4_proto"]:
match_dict["dl_type"] = 2048
if req["match"]["l4_proto"] != "None":
if req["match"]["l4_proto"] == "TCP":
match_dict["nw_proto"] = 6
elif req["match"]["l4_proto"] == "UDP":
match_dict["nw_proto"] = 17
if req["match"]["sport"] != "None":
match_dict["tp_src"] = int(req["match"]["sport"])
if req["match"]["dport"] != "None":
match_dict["tp_dst"] = int(req["match"]["dport"])
if req["match"]["vlan_id"] != "None":
match_dict["dl_vlan"] = int(req["match"]["vlan_id"])
if req["action"]["output"] != "Drop":
for value in req["action"]["output"].split(" "):
action_dict = {}
action_dict["port"] = int(value)
action_dict["type"] = "OUTPUT"
action_list.append(action_dict)
if req["action"]["vlan_action"] != "None":
if "Strip" in req["action"]["vlan_action"]:
action_dict = {}
action_dict["type"] = "POP_VLAN"
action_list.append(action_dict)
elif "Swap" in req["action"]["vlan_action"]:
action_dict = {}
action_dict["field"] = "vlan_vid"
action_dict["value"] = req["action"]["vlan_action"].split(" ")[-1]
action_dict["type"] = "SET_FIELD"
action_list.append(action_dict)
elif "New" in req["action"]["vlan_action"]:
push_vlan_dict = {}
push_vlan_dict["ethertype"] = 33024
push_vlan_dict["type"] = "PUSH_VLAN"
action_list.append(push_vlan_dict)
action_dict = {}
action_dict["field"] = "vlan_vid"
action_dict["value"] = req["action"]["vlan_action"].split(" ")[-1]
action_dict["type"] = "SET_FIELD"
action_list.append(action_dict)
flow_cmd["match"] = match_dict
flow_cmd["actions"] = action_list
#print json.dumps(flow_cmd)
Rest.add_flow(json.dumps(flow_cmd))
return "foo"
@app.route("/_del_flow", methods=["POST"])
def del_flow():
index = request.form["index"]
flow_cmd = {}
match_dict = {}
for key, list in global_flow_table.iteritems():
flow_cmd["dpid"] = key
match_dict = list[int(index)]["match"]
flow_cmd["match"] = match_dict
#print json.dumps(flow_cmd)
Rest.del_flow(json.dumps(flow_cmd))
return "foo"
@app.errorhandler(404)
def page_not_found(error):
return render_template("page_not_found.html"), 404
#CRUD
@app.route("/get")
def show_tables():
filename = 'example2.xlsx'
data = pd.read_excel(filename,sheetname='Sheet1')
data = data.fillna('')
return render_template('index.html',tables=[re.sub(' mytable', '" id="example', data.to_html(classes='mytable'))],
titles = ['Excel Data to Flask'])
@app.route('/insert', methods= ['POST','GET'])
def insert():
q1 = request.form['num1']
q2 = request.form['num2']
print(q1,q2)
df = pd.DataFrame({'a': [q1],
'b': [q2]})
book = pd.read_excel('example2.xlsx')
writer = | pd.ExcelWriter('example2.xlsx', engine='openpyxl') | pandas.ExcelWriter |
from __future__ import absolute_import
import pytest
skimage = pytest.importorskip("skimage")
import numpy as np
import pandas as pd
from datashader.bundling import directly_connect_edges, hammer_bundle
from datashader.layout import circular_layout, forceatlas2_layout, random_layout
@pytest.fixture
def nodes():
# Four nodes arranged at the corners of a 200x200 square with one node
# at the center
nodes_df = pd.DataFrame({'id': np.arange(5),
'x': [0, -100, 100, -100, 100],
'y': [0, 100, 100, -100, -100]})
nodes_df.set_index('id')
return nodes_df
@pytest.fixture
def edges():
# Four edges originating from the center node and connected to each
# corner
edges_df = pd.DataFrame({'id': np.arange(4),
'source': np.zeros(4, dtype=int),
'target': np.arange(1, 5)})
edges_df.set_index('id')
return edges_df
@pytest.fixture
def weighted_edges():
# Four weighted edges originating from the center node and connected
# to each corner
edges_df = pd.DataFrame({'id': np.arange(4),
'source': np.zeros(4, dtype=int),
'target': np.arange(1, 5),
'weight': np.ones(4)})
edges_df.set_index('id')
return edges_df
def test_immutable_nodes(nodes, edges):
# Expect nodes to remain immutable after any bundling operation
original = nodes.copy()
directly_connect_edges(nodes, edges)
assert original.equals(nodes)
@pytest.mark.parametrize('bundle', [directly_connect_edges, hammer_bundle])
def test_renamed_columns(nodes, weighted_edges, bundle):
nodes = nodes.rename(columns={'x': 'xx', 'y': 'yy'})
edges = weighted_edges.rename(columns={'source': 'src', 'target': 'dst', 'weight': 'w'})
df = bundle(nodes, edges, x='xx', y='yy', source='src', target='dst', weight='w')
assert 'xx' in df and 'x' not in df
assert 'yy' in df and 'y' not in df
assert 'w' in df and 'weight' not in df
@pytest.mark.parametrize('bundle', [directly_connect_edges, hammer_bundle])
@pytest.mark.parametrize('layout', [random_layout, circular_layout, forceatlas2_layout])
def test_same_path_endpoints(layout, bundle):
# Expect path endpoints to match original edge source/target
edges = pd.DataFrame({'id': [0], 'source': [0], 'target': [1]}).set_index('id')
nodes = pd.DataFrame({'id': np.unique(edges.values)}).set_index('id')
node_positions = layout(nodes, edges)
bundled = bundle(node_positions, edges)
source, target = edges.iloc[0]
expected_source = node_positions.loc[source]
expected_target = node_positions.loc[target]
actual_source = bundled.loc[0]
actual_target = bundled.loc[len(bundled)-2]
assert np.allclose(expected_source, actual_source)
assert np.allclose(expected_target, actual_target)
@pytest.mark.parametrize("include_edge_id", [True, False])
def test_directly_connect_with_weights(nodes, weighted_edges, include_edge_id):
# Expect four lines starting at center (0.5, 0.5) and terminating
# at a different corner and NaN
data = pd.DataFrame({'edge_id':
[1.0, 1.0, np.nan, 2.0, 2.0, np.nan,
3.0, 3.0, np.nan, 4.0, 4.0, np.nan],
'x':
[0.0, -100.0, np.nan, 0.0, 100.0, np.nan,
0.0, -100.0, np.nan, 0.0, 100.0, np.nan],
'y':
[0.0, 100.0, np.nan, 0.0, 100.0, np.nan,
0.0, -100.0, np.nan, 0.0, -100.0, np.nan]})
columns = ['edge_id', 'x', 'y'] if include_edge_id else ['x', 'y']
expected = | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
#from urllib import urlopen # python2
#import urllib2 # python2
import urllib.request as urllib2
#import StringIO python2
from io import StringIO
import gzip
import pybedtools
from pybedtools import BedTool
from .gtf import GTFtoBED
from .gtf import readGTF
from .gtf import retrieve_GTF_field
import sys
def writeBED(inBED, file_path):
"""
Writes a bed dataframe into a bed file.
Bed format: 'chrom','chromStart','chromEnd','name','score','strand'
:param inBED: bed dataframe to be written.
:param file_path: /path/to/file.bed
:returns: nothing
"""
inBED.to_csv(file_path,index=None,sep="\t",header=None)
def GetBEDnarrowPeakgz(URL_or_PATH_TO_file):
"""
Reads a gz compressed BED narrow peak file from a web address or local file
:param URL_or_PATH_TO_file: web address of path to local file
:returns: a Pandas dataframe
"""
if os.path.isfile(URL_or_PATH_TO_file):
response=open(URL_or_PATH_TO_file, "r")
compressedFile = StringIO.StringIO(response.read())
else:
response = urllib2.urlopen(URL_or_PATH_TO_file)
compressedFile = StringIO.StringIO(response.read())
decompressedFile = gzip.GzipFile(fileobj=compressedFile)
out=decompressedFile.read().split("\n")
out=[ s.split("\t") for s in out]
out= | pd.DataFrame(out) | pandas.DataFrame |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import UserList
import io
import pathlib
import pytest
import socket
import threading
import weakref
import numpy as np
import pyarrow as pa
from pyarrow.tests.util import changed_environ
try:
from pandas.testing import assert_frame_equal, assert_series_equal
import pandas as pd
except ImportError:
pass
class IpcFixture:
write_stats = None
def __init__(self, sink_factory=lambda: io.BytesIO()):
self._sink_factory = sink_factory
self.sink = self.get_sink()
def get_sink(self):
return self._sink_factory()
def get_source(self):
return self.sink.getvalue()
def write_batches(self, num_batches=5, as_table=False):
nrows = 5
schema = pa.schema([('one', pa.float64()), ('two', pa.utf8())])
writer = self._get_writer(self.sink, schema)
batches = []
for i in range(num_batches):
batch = pa.record_batch(
[np.random.randn(nrows),
['foo', None, 'bar', 'bazbaz', 'qux']],
schema=schema)
batches.append(batch)
if as_table:
table = pa.Table.from_batches(batches)
writer.write_table(table)
else:
for batch in batches:
writer.write_batch(batch)
self.write_stats = writer.stats
writer.close()
return batches
class FileFormatFixture(IpcFixture):
is_file = True
options = None
def _get_writer(self, sink, schema):
return pa.ipc.new_file(sink, schema, options=self.options)
def _check_roundtrip(self, as_table=False):
batches = self.write_batches(as_table=as_table)
file_contents = pa.BufferReader(self.get_source())
reader = pa.ipc.open_file(file_contents)
assert reader.num_record_batches == len(batches)
for i, batch in enumerate(batches):
# it works. Must convert back to DataFrame
batch = reader.get_batch(i)
assert batches[i].equals(batch)
assert reader.schema.equals(batches[0].schema)
assert isinstance(reader.stats, pa.ipc.ReadStats)
assert isinstance(self.write_stats, pa.ipc.WriteStats)
assert tuple(reader.stats) == tuple(self.write_stats)
class StreamFormatFixture(IpcFixture):
# ARROW-6474, for testing writing old IPC protocol with 4-byte prefix
use_legacy_ipc_format = False
# ARROW-9395, for testing writing old metadata version
options = None
is_file = False
def _get_writer(self, sink, schema):
return pa.ipc.new_stream(
sink,
schema,
use_legacy_format=self.use_legacy_ipc_format,
options=self.options,
)
class MessageFixture(IpcFixture):
def _get_writer(self, sink, schema):
return pa.RecordBatchStreamWriter(sink, schema)
@pytest.fixture
def ipc_fixture():
return IpcFixture()
@pytest.fixture
def file_fixture():
return FileFormatFixture()
@pytest.fixture
def stream_fixture():
return StreamFormatFixture()
@pytest.fixture(params=[
pytest.param(
pytest.lazy_fixture('file_fixture'),
id='File Format'
),
pytest.param(
pytest.lazy_fixture('stream_fixture'),
id='Stream Format'
)
])
def format_fixture(request):
return request.param
def test_empty_file():
buf = b''
with pytest.raises(pa.ArrowInvalid):
pa.ipc.open_file(pa.BufferReader(buf))
def test_file_simple_roundtrip(file_fixture):
file_fixture._check_roundtrip(as_table=False)
def test_file_write_table(file_fixture):
file_fixture._check_roundtrip(as_table=True)
@pytest.mark.parametrize("sink_factory", [
lambda: io.BytesIO(),
lambda: pa.BufferOutputStream()
])
def test_file_read_all(sink_factory):
fixture = FileFormatFixture(sink_factory)
batches = fixture.write_batches()
file_contents = pa.BufferReader(fixture.get_source())
reader = pa.ipc.open_file(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
def test_open_file_from_buffer(file_fixture):
# ARROW-2859; APIs accept the buffer protocol
file_fixture.write_batches()
source = file_fixture.get_source()
reader1 = pa.ipc.open_file(source)
reader2 = pa.ipc.open_file(pa.BufferReader(source))
reader3 = pa.RecordBatchFileReader(source)
result1 = reader1.read_all()
result2 = reader2.read_all()
result3 = reader3.read_all()
assert result1.equals(result2)
assert result1.equals(result3)
st1 = reader1.stats
assert st1.num_messages == 6
assert st1.num_record_batches == 5
assert reader2.stats == st1
assert reader3.stats == st1
@pytest.mark.pandas
def test_file_read_pandas(file_fixture):
frames = [batch.to_pandas() for batch in file_fixture.write_batches()]
file_contents = pa.BufferReader(file_fixture.get_source())
reader = pa.ipc.open_file(file_contents)
result = reader.read_pandas()
expected = | pd.concat(frames) | pandas.concat |
"""
Test for the normalization operation
"""
from datetime import datetime
from unittest import TestCase
import numpy as np
import pandas as pd
import pyproj
import xarray as xr
from jdcal import gcal2jd
from numpy.testing import assert_array_almost_equal
from xcube.core.gridmapping import GridMapping
from xcube.core.new import new_cube
from xcube.core.normalize import DatasetIsNotACubeError
from xcube.core.normalize import adjust_spatial_attrs
from xcube.core.normalize import decode_cube
from xcube.core.normalize import encode_cube
from xcube.core.normalize import normalize_coord_vars
from xcube.core.normalize import normalize_dataset
from xcube.core.normalize import normalize_missing_time
# noinspection PyPep8Naming
def assertDatasetEqual(expected, actual):
# this method is functionally equivalent to
# `assert expected == actual`, but it
# checks each aspect of equality separately for easier debugging
assert expected.equals(actual), (expected, actual)
class DecodeCubeTest(TestCase):
def test_cube_stays_cube(self):
dataset = new_cube(variables=dict(a=1, b=2, c=3))
cube, grid_mapping, rest = decode_cube(dataset)
self.assertIs(dataset, cube)
self.assertIsInstance(grid_mapping, GridMapping)
self.assertTrue(grid_mapping.crs.is_geographic)
self.assertIsInstance(rest, xr.Dataset)
self.assertEqual(set(), set(rest.data_vars))
def test_no_cube_vars_are_dropped(self):
dataset = new_cube(variables=dict(a=1, b=2, c=3))
dataset = dataset.assign(
d=xr.DataArray([8, 9, 10], dims='level'),
crs=xr.DataArray(0, attrs=pyproj.CRS.from_string('CRS84').to_cf()),
)
self.assertEqual({'a', 'b', 'c', 'd', 'crs'}, set(dataset.data_vars))
cube, grid_mapping, rest = decode_cube(dataset)
self.assertIsInstance(cube, xr.Dataset)
self.assertIsInstance(grid_mapping, GridMapping)
self.assertEqual({'a', 'b', 'c'}, set(cube.data_vars))
self.assertEqual(pyproj.CRS.from_string('CRS84'), grid_mapping.crs)
self.assertIsInstance(rest, xr.Dataset)
self.assertEqual({'d', 'crs'}, set(rest.data_vars))
def test_encode_is_inverse(self):
dataset = new_cube(variables=dict(a=1, b=2, c=3),
x_name='x', y_name='y')
dataset = dataset.assign(
d=xr.DataArray([8, 9, 10], dims='level'),
crs=xr.DataArray(0, attrs=pyproj.CRS.from_string('CRS84').to_cf()),
)
cube, grid_mapping, rest = decode_cube(dataset)
dataset2 = encode_cube(cube, grid_mapping, rest)
self.assertEqual(set(dataset.data_vars), set(dataset2.data_vars))
self.assertIn('crs', dataset2.data_vars)
def test_no_cube_vars_found(self):
dataset = new_cube()
self.assertEqual(set(), set(dataset.data_vars))
with self.assertRaises(DatasetIsNotACubeError) as cm:
decode_cube(dataset, force_non_empty=True)
self.assertEqual("No variables found with dimensions"
" ('time', [...] 'lat', 'lon')"
" or dimension sizes too small",
f'{cm.exception}')
def test_no_grid_mapping(self):
dataset = xr.Dataset(dict(a=[1, 2, 3], b=0.5))
with self.assertRaises(DatasetIsNotACubeError) as cm:
decode_cube(dataset)
self.assertEqual("Failed to detect grid mapping:"
" cannot find any grid mapping in dataset",
f'{cm.exception}')
def test_grid_mapping_not_geographic(self):
dataset = new_cube(x_name='x', y_name='y',
variables=dict(a=0.5), crs='epsg:25832')
with self.assertRaises(DatasetIsNotACubeError) as cm:
decode_cube(dataset, force_geographic=True)
self.assertEqual("Grid mapping must use geographic CRS,"
" but was 'ETRS89 / UTM zone 32N'",
f'{cm.exception}')
class EncodeCubeTest(TestCase):
def test_geographical_crs(self):
cube = new_cube(variables=dict(a=1, b=2, c=3))
gm = GridMapping.from_dataset(cube)
dataset = encode_cube(cube, gm)
self.assertIs(cube, dataset)
dataset = encode_cube(cube, gm,
xr.Dataset(dict(d=True)))
self.assertIsInstance(dataset, xr.Dataset)
self.assertEqual({'a', 'b', 'c', 'd'}, set(dataset.data_vars))
def test_non_geographical_crs(self):
cube = new_cube(x_name='x',
y_name='y',
crs='epsg:25832',
variables=dict(a=1, b=2, c=3))
gm = GridMapping.from_dataset(cube)
dataset = encode_cube(cube,
gm,
xr.Dataset(dict(d=True)))
self.assertIsInstance(dataset, xr.Dataset)
self.assertEqual({'a', 'b', 'c', 'd', 'crs'}, set(dataset.data_vars))
class TestNormalize(TestCase):
def test_normalize_zonal_lat_lon(self):
resolution = 10
lat_size = 3
lat_coords = np.arange(0, 30, resolution)
lon_coords = [i + 5. for i in np.arange(-180.0, 180.0, resolution)]
lon_size = len(lon_coords)
one_more_dim_size = 2
one_more_dim_coords = np.random.random(2)
var_values_1_1d = xr.DataArray(np.random.random(lat_size),
coords=[('latitude_centers', lat_coords)],
dims=['latitude_centers'],
attrs=dict(chunk_sizes=[lat_size],
dimensions=['latitude_centers']))
var_values_1_1d.encoding = {'chunks': (lat_size,)}
var_values_1_2d = xr.DataArray(np.array([var_values_1_1d.values for _ in lon_coords]).T,
coords={'lat': lat_coords, 'lon': lon_coords},
dims=['lat', 'lon'],
attrs=dict(chunk_sizes=[lat_size, lon_size],
dimensions=['lat', 'lon']))
var_values_1_2d.encoding = {'chunks': (lat_size, lon_size)}
var_values_2_2d = xr.DataArray(np.random.random(lat_size * one_more_dim_size).
reshape(lat_size, one_more_dim_size),
coords={'latitude_centers': lat_coords,
'one_more_dim': one_more_dim_coords},
dims=['latitude_centers', 'one_more_dim'],
attrs=dict(chunk_sizes=[lat_size, one_more_dim_size],
dimensions=['latitude_centers', 'one_more_dim']))
var_values_2_2d.encoding = {'chunks': (lat_size, one_more_dim_size)}
var_values_2_3d = xr.DataArray(np.array([var_values_2_2d.values for _ in lon_coords]).T,
coords={'one_more_dim': one_more_dim_coords,
'lat': lat_coords,
'lon': lon_coords, },
dims=['one_more_dim', 'lat', 'lon', ],
attrs=dict(chunk_sizes=[one_more_dim_size,
lat_size,
lon_size],
dimensions=['one_more_dim', 'lat', 'lon']))
var_values_2_3d.encoding = {'chunks': (one_more_dim_size, lat_size, lon_size)}
dataset = xr.Dataset({'first': var_values_1_1d, 'second': var_values_2_2d})
expected = xr.Dataset({'first': var_values_1_2d, 'second': var_values_2_3d})
expected = expected.assign_coords(
lon_bnds=xr.DataArray([[i - (resolution / 2), i + (resolution / 2)] for i in expected.lon.values],
dims=['lon', 'bnds']))
expected = expected.assign_coords(
lat_bnds=xr.DataArray([[i - (resolution / 2), i + (resolution / 2)] for i in expected.lat.values],
dims=['lat', 'bnds']))
actual = normalize_dataset(dataset)
xr.testing.assert_equal(actual, expected)
self.assertEqual(actual.first.chunk_sizes, expected.first.chunk_sizes)
self.assertEqual(actual.second.chunk_sizes, expected.second.chunk_sizes)
def test_normalize_lon_lat_2d(self):
"""
Test nominal execution
"""
dims = ('time', 'y', 'x')
attrs = {'valid_min': 0., 'valid_max': 1.}
t_size = 2
y_size = 3
x_size = 4
a_data = np.random.random_sample((t_size, y_size, x_size))
b_data = np.random.random_sample((t_size, y_size, x_size))
time_data = [1, 2]
lat_data = [[10., 10., 10., 10.],
[20., 20., 20., 20.],
[30., 30., 30., 30.]]
lon_data = [[-10., 0., 10., 20.],
[-10., 0., 10., 20.],
[-10., 0., 10., 20.]]
dataset = xr.Dataset({'a': (dims, a_data, attrs),
'b': (dims, b_data, attrs)
},
{'time': (('time',), time_data),
'lat': (('y', 'x'), lat_data),
'lon': (('y', 'x'), lon_data)
},
{'geospatial_lon_min': -15.,
'geospatial_lon_max': 25.,
'geospatial_lat_min': 5.,
'geospatial_lat_max': 35.
}
)
new_dims = ('time', 'lat', 'lon')
expected = xr.Dataset({'a': (new_dims, a_data, attrs),
'b': (new_dims, b_data, attrs)},
{'time': (('time',), time_data),
'lat': (('lat',), [10., 20., 30.]),
'lon': (('lon',), [-10., 0., 10., 20.]),
},
{'geospatial_lon_min': -15.,
'geospatial_lon_max': 25.,
'geospatial_lat_min': 5.,
'geospatial_lat_max': 35.})
actual = normalize_dataset(dataset)
xr.testing.assert_equal(actual, expected)
def test_normalize_lon_lat(self):
"""
Test nominal execution
"""
dataset = xr.Dataset({'first': (['latitude',
'longitude'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['lat', 'lon'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize_dataset(dataset)
assertDatasetEqual(actual, expected)
dataset = xr.Dataset({'first': (['lat', 'long'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['lat', 'lon'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize_dataset(dataset)
assertDatasetEqual(actual, expected)
dataset = xr.Dataset({'first': (['latitude',
'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['lat', 'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize_dataset(dataset)
assertDatasetEqual(actual, expected)
dataset = xr.Dataset({'first': (['zef', 'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['zef', 'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize_dataset(dataset)
assertDatasetEqual(actual, expected)
def test_normalize_does_not_reorder_increasing_lat(self):
first = np.zeros([3, 45, 90])
first[0, :, :] = np.eye(45, 90)
ds = xr.Dataset({
'first': (['time', 'lat', 'lon'], first),
'second': (['time', 'lat', 'lon'], np.zeros([3, 45, 90])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 4)]}).chunk(
chunks={'time': 1})
actual = normalize_dataset(ds)
xr.testing.assert_equal(actual, ds)
def test_normalize_with_missing_time_dim(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101',
'time_coverage_end': '20121231'})
norm_ds = normalize_dataset(ds)
self.assertIsNot(norm_ds, ds)
self.assertEqual(len(norm_ds.coords), 4)
self.assertIn('lon', norm_ds.coords)
self.assertIn('lat', norm_ds.coords)
self.assertIn('time', norm_ds.coords)
self.assertIn('time_bnds', norm_ds.coords)
self.assertEqual(norm_ds.first.shape, (1, 90, 180))
self.assertEqual(norm_ds.second.shape, (1, 90, 180))
self.assertEqual(norm_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2012-07-01T12:00:00')))
self.assertEqual(norm_ds.coords['time_bnds'][0][0], xr.DataArray( | pd.to_datetime('2012-01-01') | pandas.to_datetime |
# -*- coding: utf-8 -*-
# 検体検査結果データ(患者ごと)の読み込みと検体検査結果データ(検査項目ごと)の出力
# └→RS_Base_laboファイル
#
# 入力ファイル
# └→患者マスターファイル :name.csv
# └→検体検査結果データファイル:患者ID.txt(例:101.txt,102.txt,103.txt・・・)
#
# Create 2017/07/09 : Update 2017/07/09
# Auther Katsumi.Oshiro
import csv # csvモジュールの読み込み(CSVファイルの読み書き)
import glob # globモジュールの読み込み(ファイル名のパターンマッチング)
import pandas as pd # pandasモジュールの読み込み
import os # osモジュールの読み込み
print('# RS_Base_laboデータによる検体検査データの作成(START)')
# 辞書(患者ID、生年月日)の作成
birth = {}
# 患者マスター(name.csv)の読み込み
count = 0
with open('../data/name.csv', 'r')as f:
reader = csv.reader(f)
for row in reader:
# print(row[0],row[1],row[2],row[3])
birth.update({row[0]:row[3]})
count += 1
print('# 患者マスター読み込み件数--------->', count)
# 辞書(birth):生年月日の検索テスト(患者ID:679)
print('# 辞書テスト:患者ID:679の生年月日->', birth["679"])
# 年齢計算テスト(患者ID:679)
today = int(pd.to_datetime('today').strftime('%Y%m%d'))
birthday = int(pd.to_datetime(birth["679"]).strftime('%Y%m%d'))
print('# 年齢テスト:患者ID:697の年齢----->', int((today - birthday)/ 10000))
# フォルダ内の検体検査ファイル名の取得(ワイルドカードが使用可能)
txt_file = glob.glob('../data/labo/*.txt')
blood = input('# 検体検査名を入力して下さい:')
count = 0
# 検体検査結果ファイル(検査項目ごと)の作成
# 元データ:low[0]生年月日,low[1]患者ID,low[2]氏名,low[3]性別,low[5]検査項目名,low[6]判定,low[10]結果値)
with open("../data/labo/" + blood + ".csv", "w") as f:
writer = csv.writer(f, lineterminator='\n')
header = []
header.append('患者ID')
header.append('年齢')
header.append('基準値(男)')
header.append('基準値(女)')
header.append('基準値以下(男)')
header.append('基準値以下(女)')
header.append('基準値以上(男)')
header.append('基準値以上(女)')
header.append('私')
writer.writerow(header)
for file_name in txt_file:
with open(file_name, 'r')as f2:
reader = csv.reader(f2)
for low in reader:
if low[5] == blood:
writer = csv.writer(f, lineterminator='\n')
listdata = []
listdata.append(low[1])
today = int(pd.to_datetime(low[0]).strftime('%Y%m%d'))
birthday = int( | pd.to_datetime(birth[low[1]]) | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# In[211]:
import uuid
from pathlib import Path
import pandas_profiling
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import neptune.new as neptune
import neptune.new.types
import seaborn as sns
import sklearn.ensemble
import sklearn.metrics
import sklearn.model_selection
import sklearn.preprocessing
import xgboost as xgb
from IPython.display import display
# In[5]:
run_id = uuid.uuid4()
# In[ ]:
# Include source files.
# source_files=["model.py", "prep_data.py"]
# In[10]:
run = neptune.init(
name=f'run_{run_id}',
# source_files=source_files,
)
# In[13]:
# https://numpy.org/doc/stable/reference/generated/numpy.set_printoptions.html
np.set_printoptions(suppress=True) # Print floating point numbers using fixed point notation.
# https://pandas.pydata.org/pandas-docs/stable/user_guide/options.html
# Print out the full DataFrame repr for wide DataFrames across multiple lines.
| pd.set_option('display.expand_frame_repr', True) | pandas.set_option |
import datetime
import math
import os
import glob
import matplotlib.pyplot as plt
import pandas as pd
# Instructions at the bottom of the script
if False:
import matplotlib
matplotlib.rcParams['interactive'] == True
matplotlib.use('MacOSX')
save = True
only_agg = True
fsize = (8, 6)
plot_name = "DEFAULT"
comp_with_flink = False
no_title = True
sort_sinks = True
def extractInfo(sink_name: str, socket=''):
prefix = 'sock ' + socket.split('socket')[1].split('_')[0] if socket else ''
if 'flink' in sink_name:
return prefix + ' flink ' + sink_name.split('_')[3] + ' f: ' + \
sink_name.split('%')[1]
type = sink_name.split('_')[2]
param_list = sink_name.split('%')[1:8]
additions = param_list[0]
removals = param_list[2]
wait = param_list[4]
fixed = param_list[6]
if comp_with_flink:
return prefix + ' ' + type + ' f:' + fixed
else:
return prefix + ' ' + type + ' a:' + additions + ' r:' + removals + ' ' \
'w:' + wait + ' f:' + fixed
def make_plot_name(prefix, newline=False):
if no_title:
return ""
return prefix + ' ' + ' '.join(n for n in ' '.join(plot_name.split(
'V')).split(' ') if not n.isdecimal()) + '\n' if newline else ''
def extractInfoWithNumber(sink_name: str):
base = extractInfo(sink_name)
return 'sink ' + sink_name.split('_')[1] + base
def saveIfNeeded(plt, name, skip=False):
if save and not skip:
plt.savefig('chart_' + name + '.pdf', format='pdf', bbox_inches='tight')
def should_take_sink(name: str):
should = False
for element in must_contain:
if element in name:
should = True
for element in must_not_contain:
if element in name:
should = False
return should
def read_mulitple_dir(directories: list):
l_df_sinks = []
l_df_sockets = []
l_matches = []
l_filenameSinks = []
l_filenameSockets = []
for dire in directories:
a_df_sinks, a_df_sockets, a_matches, a_filenameSinks, a_filenameSockets = \
read_and_match_files(dire)
l_df_sinks.extend(a_df_sinks)
l_df_sockets.extend(a_df_sockets)
l_matches.extend(a_matches)
l_filenameSinks.extend(a_filenameSinks)
l_filenameSockets.extend(a_filenameSockets)
return l_df_sinks, l_df_sockets, l_matches, l_filenameSinks, l_filenameSockets
def read_and_match_files(directory: str):
run_dir = r"~/hdes-data/" + directory
filenameSockets = sorted([file for file in glob.glob(
os.path.expanduser(run_dir) + os.sep + 'socket*')])
filenameSinks = sorted([file for file in glob.glob(
os.path.expanduser(run_dir) + os.sep + 'sink*')])
matches = []
for sourceName in filenameSockets:
sourceTime = list(map(int, sourceName.split('_')[-1][0:-4].split('-')))
sourceTime = datetime.datetime(2020, 1, 1, sourceTime[0], sourceTime[
1], sourceTime[2]).timestamp()
candidate = filenameSinks[0]
candidateTime = list(
map(int, candidate.split('_')[-1][1:-4].split('-')))
candidateTime = datetime.datetime(2020, 1, 1, candidateTime[0],
candidateTime[
1], candidateTime[2]).timestamp()
for name in filenameSinks[1:]:
time = list(map(int, name.split('_')[-1][1:-4].split('-')))
time = datetime.datetime(2020, 1, 1, time[0], time[
1], time[2]).timestamp()
if math.fabs(sourceTime - time) < math.fabs(
sourceTime - candidateTime):
candidateTime = time
candidate = name
matches.append((candidate, sourceName))
print('Sockets and Matches:', matches)
df_sockets = []
remaining_matches = []
for sinkFile, socketFile in matches:
if should_take_sink(sinkFile):
try:
df_sockets.append(
pd.read_csv(socketFile, header=0, sep=",", dtype={
'seconds': 'Int64', 'events': 'Int64'}))
except Exception as e:
matches = [m for m in matches if socketFile not in m]
print('Skipped ', socketFile)
print(e)
remaining_matches.append((sinkFile, socketFile))
matches = remaining_matches
# Event Time, Processing Time, Ejection Time
df_sinks = []
existingSinks = []
for sinkFile in filenameSinks:
if should_take_sink(sinkFile):
try:
if 'flink' in sinkFile:
flink_df = pd.read_csv(sinkFile, header=None,
names=['eventTime',
'processingTime',
'ejectionTime'
], sep=",", dtype={
'eventTime': 'Int64', 'processingTime': 'Int64',
'ejectionTime': 'Int64'})[:-1]
if flink_df.shape[0] > 0:
df_sinks.append(flink_df)
else:
raise Exception("empty csv")
else:
df_sinks.append(
pd.read_csv(sinkFile, header=0, sep=",", dtype={
'eventTime': 'Int64', 'processingTime': 'Int64',
'ejectionTime': 'Int64'})[:-1])
existingSinks.append(sinkFile)
except Exception as e:
print('Skipped ', sinkFile)
print(e)
filenameSinks = existingSinks
matches = [extractInfo(m, sock) for m, sock in matches]
print(matches)
return df_sinks, df_sockets, matches, filenameSinks, filenameSockets
# Ingested Tuples sum
def ingested_tuples_sum():
sum = {'name': [], 'events': []}
for df, name, match in zip(df_sockets, filenameSockets, matches):
sum['name'].append(match)
sum['events'].append(df['events'].sum())
sumdf = pd.DataFrame(sum)
sumdf.plot(kind='bar', x='name', y='events',
title=make_plot_name('#Ingested Tuples'))
plt.ylabel('#events')
saveIfNeeded(plt, 'IngestedTuples')
plt.show()
def ingested_tuples_sum_collapsed_bar():
sums = {}
for df, name, match in zip(df_sockets, filenameSockets, matches):
if sums.get(match[6:]) is not None:
new_df = sums[match[6:]] + df['events']
sums[match[6:]] = new_df
else:
sums[match[6:]] = df['events']
sumdf = pd.DataFrame.from_dict(sums)
sumdf.plot(kind='bar',
title=make_plot_name('#Ingested Tuples'))
plt.ylabel('Events per second')
plt.title(make_plot_name('Events per second'))
saveIfNeeded(plt, 'IngestedTuplesBar' + plot_name)
plt.show()
# Ingested Tuples sum
def ingested_tuples_sum_collapsed_box():
sums = {}
for df, name, match in zip(df_sockets, filenameSockets, matches):
if sums.get(match[6:]) is not None:
new_df = sums[match[6:]] + df['events']
sums[match[6:]] = new_df
else:
sums[match[6:]] = df['events']
sumdf = pd.DataFrame.from_dict(sums)
sumdf.boxplot(rot=90, showfliers=False)
plt.ylabel('Events per second')
plt.title(make_plot_name('Events per second', True))
saveIfNeeded(plt, 'IngestedTuples' + plot_name)
plt.show()
# Ejected Tuples sum
def ejected_tuples_sum():
sum = {'name': [], 'events': []}
for df, name in zip(df_sinks, filenameSinks):
sum['name'].append(extractInfoWithNumber(name))
sum['events'].append(df['ejectionTime'].count() * 1000)
sumdf = pd.DataFrame(sum)
sumdf.plot(kind='bar', x='name', y='events', title='#Resulting Tuples')
plt.ylabel('avg_events/sec')
saveIfNeeded(plt, 'resultingTuples' + plot_name)
plt.show()
# Comparison average ingested tuples
def avg_ingested_tup_comp():
ax = None
for df, name, match in zip(df_sockets, filenameSockets, matches):
appendix = match
df['avg15 ' + appendix] = df['events'].rolling(15).mean()
df[appendix] = df['events']
df = df[[appendix, 'avg15 ' + appendix]]
ax = df.plot(kind="line", title="Throughput", ax=ax, figsize=fsize)
plt.ylabel('#events')
plt.xlabel('seconds')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=3)
saveIfNeeded(plt, 'ThroughputComp' + plot_name)
plt.show()
def throughput_diagramm():
ax = None
temp = {}
for df, name, match in zip(df_sockets, filenameSockets, matches):
appendix = match[7:]
df[appendix] = df['events']
df = df[[appendix]][:301]
if temp.get(appendix) is not None:
combined_df = temp.get(appendix) + df
ax = combined_df.plot(kind="line", title=make_plot_name(
"Throughput"),
ax=ax,
figsize=fsize)
elif 'map' in appendix or 'filter' in appendix or 'Filter' in appendix:
ax = df.plot(kind="line", title=make_plot_name("Throughput"), ax=ax,
figsize=fsize)
else:
temp[appendix] = df
plt.ylabel('#events')
plt.xlabel('seconds')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=3)
saveIfNeeded(plt, 'ThroughputComp' + plot_name)
plt.show()
def throughput_rolling_average(rolling_avg):
ax = None
temp = {}
for df, name, match in zip(df_sockets, filenameSockets, matches):
appendix = match[7:]
df['avg15 ' + appendix] = df['events'].rolling(rolling_avg).mean()
df = df[['avg15 ' + appendix]][:301]
if temp.get(appendix) is not None:
combined_df = temp.get(appendix) + df
ax = combined_df.plot(kind="line", title=make_plot_name(
"Throughput"),
ax=ax,
figsize=fsize)
else:
temp[appendix] = df
plt.ylabel('#events')
plt.xlabel('seconds')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=3)
saveIfNeeded(plt, 'ThroughputComp' + plot_name)
plt.show()
# Latency comparision between runs
def latency_comp(etl=True, ptl=True):
ax = None
if sort_sinks:
for sink in df_sinks:
if ptl:
sink.sort_values(['processingTime'], inplace=True)
sink['time'] = (sink['processingTime'] - sink['processingTime'][
0]) / 1_000
if etl:
sink.sort_values(['eventTime'], inplace=True)
sink['time'] = (sink['eventTime'] - sink['eventTime'][
0]) / 1_000
for df, name in zip(df_sinks, filenameSinks):
df['etl'] = df['ejectionTime'] - df['eventTime']
df['ptl'] = df['ejectionTime'] - df['processingTime']
# df['time'] = (df['ejectionTime'] - df['ejectionTime'][0]) / 1_000
y = ['etl'] if etl else []
y = y + ['ptl'] if ptl else y
label = [l + extractInfo(name) for l in y]
df = df[df['time'] < 301]
df = df[df['time'] >= 0]
ax = df.plot(kind="line",
y=y,
label=label,
title=make_plot_name('Latency'),
x='time', ax=ax, figsize=fsize)
plt.ylabel('latency in milliseconds')
plt.xlabel('runtime in seconds')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=2)
saveIfNeeded(plt, 'Latency' + plot_name)
plt.show()
def latency_comp_box():
final_df = pd.DataFrame()
for df, name in zip(df_sinks, filenameSinks):
df['eventTimeLatency'] = df['ejectionTime'] - df['eventTime']
df['processingTimeLatency'] = df['ejectionTime'] - df['processingTime']
final_df['etl' + extractInfo(name)] = df['eventTimeLatency']
final_df['ptl' + extractInfo(name)] = df[
'processingTimeLatency']
final_df.boxplot(rot=90, showfliers=False)
plt.ylabel('Latency in milliseconds')
plt.title(make_plot_name('Latencies', True))
saveIfNeeded(plt, 'PLatency-Box' + plot_name)
plt.show()
def latency_comp_box_etl():
final_df = | pd.DataFrame() | pandas.DataFrame |
import MELC.utils.myFiles as myF
import pandas as pd
from os.path import join
import cv2
import tifffile as tiff
from numpy import unique, where
from config import *
import sys
SEPARATOR = '/'
class RawDataset:
"""RawDataset loader.
works with RAW folder structure of MELC images.
Basicaly only one thing it does is that creates pandas DataFrame with the list of the files in the folder/bleach and folder/source
This class is used in another function for converting raw data to the raw-melc structured folder into standardized uint16 tiff files
Example:
from myFiles import RawDataset
path = "root/data/MELC"
dataset = RawDataset(path)
dataframe = dataset.merged()
"""
def __init__(self, path_raw_data):
f_source, c_source = self.get_dataFrame_MELCraw(join(path_raw_data, 'source'))
f_bleach, c_bleach = self.get_dataFrame_MELCraw(join(path_raw_data, 'bleach'))
source_raw_pd = pd.DataFrame(f_source)
source_raw_pd[1] = pd.DataFrame(c_source)
bleach_raw_pd = pd.DataFrame(f_bleach)
bleach_raw_pd[1] = pd.DataFrame(c_bleach)
self.merged_pd = pd.concat([source_raw_pd, bleach_raw_pd])
self.merged_pd = self.merged_pd.reset_index(drop=True)
def get_dataFrame_MELCraw(self, path_raw_data):
files_png, creation_times = myF.get_files(path_raw_data, ('png'))
files_pd = pd.DataFrame(files_png)
creation_times = | pd.DataFrame(creation_times) | pandas.DataFrame |
import pandas as _pd
import warnings
from apodeixi.text_layout.excel_layout import Palette
from apodeixi.util.a6i_error import ApodeixiError
from apodeixi.util.dataframe_utils import DataFrameUtils, TupleColumnUtils
from apodeixi.util.time_buckets import FY_Quarter
from apodeixi.util.warning_utils import WarningUtils
class ReportWriterUtils():
'''
Utility class aiming to provide common methods for writing Excel reports
'''
def __init__(self):
# We call self.write_report multiple times, and want each subsequent time to never shrink columns to
# less than previously set.
# That is the purpose of this dictionary
#
# Keys are ints (column number starting at 0), and values are floats (column width)
self.remembered_column_widths = {}
def _set_column_width(self, parent_trace, report_ws, column_nb, width):
'''
Helper method.
Wraps the corresponding xlsxwriter method by "remembering" widths and making sure they are never
less than what might have been previously set as the width for the column in question.
This comes handy when multiple DataFrames are reported on the same worksheet.
@column_nb An int, representing a column number, starting at 0
@width A float, representing the width of a column
'''
if column_nb in self.remembered_column_widths.keys():
prior_width = self.remembered_column_widths[column_nb]
width_to_use = max(width, prior_width)
else:
width_to_use = width
self.remembered_column_widths[column_nb] = width_to_use
report_ws.set_column(column_nb, column_nb, width_to_use)
def write_report(self, parent_trace, report_df, column_widths, workbook, sheet, description, x_offset=0, y_offset=0):
'''
Helper method used as part of the process to create one or more reports in an Excel file.
The caller determines how many worksheets the Excel file should have, and is reponsible for persisting the
Excel file.
This method helps with the portion of that process that populates the contents of one of the worksheets, i.e.,
one report.
Thus the caller is expected to have initialized the an xlsxwriter.Workbook object, to which a call to
this method results in adding and populating a worksheet.
After this method returns, the caller is responsible for saving the `workbook` object.
The content for the report populated by this method comes from `data`, with headers taken from `columns`
The amount of space taken by the report is (N + 2) * (M + 1), where:
* N is the number of rows in `report_df`. Two additioal rows are added at the top: for the description, and the columns.
* M is the number of columns. An additional column is added on the left, for the index of `report_df`
@param report_df A DataFrame, whose contents are to be written into Excel
@param column_widths: A list of floats, whose length must equal the number of elements in `columns`.
They determine how wide each of the Excel columns should be
@param workbook An xlsxwriter.Workbook object, to which the report must be added in a dedicated worksheet
@param sheet A string, corresponding to the name in the `workbook` into which the report must be written
@param description A string, used to give a description of the report. Example: "big-rock_v1-v2_diff".
@param x_offset The first Excel column with content for this report. Defaults to 0.
@param y_offset The first Excel column for the content of this report. Defaults to 0.
'''
ROOT_FMT = {'text_wrap': True, 'valign': 'top', 'border': True, 'border_color': Palette.WHITE}
HEADER_FMT = ROOT_FMT | {'bold': True, 'font_color': Palette.WHITE, 'align': 'center','border_color': Palette.WHITE,
'right': True, 'fg_color': Palette.DARK_BLUE}
header_format = workbook.add_format(HEADER_FMT)
report_ws = workbook.get_worksheet_by_name(sheet)
if report_ws == None:
report_ws = workbook.add_worksheet(sheet)
report_ws.set_zoom(85)
contextual_trace = parent_trace.doing("Writing a report",
data = {"description": str(description), "sheet": str(sheet)})
my_trace = contextual_trace.doing("Writing out the description")
if True:
fmt_dict ={'bold': True, 'font_color': Palette.DARK_BLUE}
fmt = workbook.add_format(fmt_dict)
self._write_val( parent_trace = my_trace,
ws = report_ws,
x = x_offset + 1,
y = y_offset,
val = description,
fmt = fmt)
my_trace = contextual_trace.doing("Extracting data and row labels")
if True:
# GOTCHA -
# For some reports, the index is not an integer. If we do DataFrame.iterrows(),
# the "index" row[0] might be a string, not an int, as we need for the "idx%s" remainder logic to alternate
# row colors.
# So we do a prepartory loop to get the row data into a list of series objects, and another loop on a
# list so we have integer indexing as we count row numbers
data_rows = [] # A list of series, one per row
row_labels = []
for row in report_df.iterrows():
data_rows.append(row[1])
row_labels.append(str(row[0]))
labels_width = max([len(label) for label in row_labels]) * 1.1
# Make enough room for the row labels
#report_ws.set_column(x_offset, x_offset, labels_width)
self._set_column_width(parent_trace, report_ws, x_offset, labels_width)
my_trace = contextual_trace.doing("Writing out columns")
columns = list(report_df.columns)
# Check if we have a multi-level index
if type(columns[0]) == tuple:
nb_levels = len(columns[0])
y_offset += nb_levels - 1
if True:
# Write the headers.
for idx in range(len(columns)):
#report_ws.set_column(idx + x_offset + 1, idx + x_offset + 1, column_widths[idx])
self._set_column_width(parent_trace, report_ws, idx + x_offset + 1, column_widths[idx])
col = columns[idx]
if type(col) == tuple:
for level in range(nb_levels):
self._write_val( parent_trace = my_trace,
ws = report_ws,
x = idx + x_offset + 1,
y = y_offset + 2 - nb_levels + level,
val = col[level],
fmt = header_format)
else:
self._write_val( parent_trace = my_trace,
ws = report_ws,
x = idx + x_offset + 1,
y = y_offset + 1,
val = col,
fmt = header_format)
my_trace = contextual_trace.doing("Writing the rows")
if True:
for idx in range(len(data_rows)):
row = data_rows[idx]
# First write the row label
self._write_val( parent_trace = my_trace,
ws = report_ws,
x = x_offset,
y = idx + y_offset + 2,
val = row_labels[idx],
fmt = header_format)
# Now write the "real" columns
for jdx in range(len(columns)):
col = columns[jdx]
val = row[col]
clean_val = DataFrameUtils().clean(val)
fmt_dict = ROOT_FMT.copy()
if idx%2 == 0:
fmt_dict |= {'bg_color': Palette.LIGHT_BLUE}
fmt = workbook.add_format(fmt_dict)
# GOTCHA
#
# For some reports (e.g., diffs of manifests), the clean_val might be a "field name", i.e., the column in
# a DataFrame's column for a manifest being diff-ed.
# In such cases, clean_val might be a tuple if it is a MultiLevel column in the manifest's DataFrame.
# If so, covert it to a string to avoid errors writing it out.
#
if type(clean_val) == tuple:
clean_val = str(clean_val)
self._write_val( parent_trace = my_trace,
ws = report_ws,
x = jdx + x_offset + 1,
y = idx + y_offset + 2,
val = clean_val,
fmt = fmt)
def _write_val(self, parent_trace, ws, x, y, val, fmt):
'''
Helper method to wrap xlsxwriter in order to catch its Exceptions
'''
try:
ws.write(y, x, val, fmt)
except Exception as ex:
raise ApodeixiError(parent_trace, "Unable to write value to Excel",
data = {"x": str(x), "y": str(y), "val": str(val), "error": str(ex)})
class TimebucketDataFrameJoiner():
'''
Utility class to join DataFrames so that timebuckets are grouped together and sorted, and return the resulting
DataFrame
For example, imagine having three DataFrames:
* A DataFrame of sales regions - columns are strings like "Region", "Country"
* A DataFrame of sales targets for 2 years - columns are tuples for timebuckets, like ("Q1", "FY23"), ...., ("Q4", "FY24)
* A DataFrame of sales actuals for 3 quarters- columns are tuples like ("Q1", "FY23"), ..., ("Q3", "FY23")
Morever, assume that the DataFrames for sales targets and actuals either have the same index as the DataFrame for sales regions,
or else have a foreign key pointing to the sales regions (as a not necessarily injection, i.e., some regions may lack
a sales target or actual)
Then this class provides functionality to "merge" all these 3 DataFrames into 1 DataFrame with columns that are
2-level tuples, returning someting that looks like this:
| Q1 FY23 | Q2 FY23 | Q3 FY23 | Q4 FY23 | Q1 FY24 | Q2 FY24 | Q3 FY24 | Q4 FY24
Region | Country | Target | Actual | Target | Actual | Target | Actual | Target | Target | Target | Target | Target
=============================================================================================================================
| | | | | | | | | | | |
In particular:
* Timebuckets are standardized. If they are provided as tuples, they are turned into strings
* A lower level (like "Target" and "Actual") can be introduced by the caller. The caller can also introduce "higher levels"
and the standard grouping semantics are enforced: lower-levels are grouped within the same time bucket, whereas time buckets
are grouped per higher level value, if higher levels are provided.
* Timebuckets may be provided as size-2 tuples or as size-1 tuples or as strings
* The non-timebucket columns are made to appear to the left, as the first columns
@param reference_df A DataFrame none of whose columns are for a timebucket. All columns must be strings.
@param link_field May be None. If not null, must be a column of `reference_df` (i.e., a string) such that
"could be used as an index", in the sense that all rows of `reference_df` have a different value for this column.
@param timebucket_df_list A list of DataFrames all of which have only timebucket columns. A "timebucket column" can be
either a string or a tuple whole last 1 or 2 levels can be parsed as a FY_Quarter object. Valid examples:
"Q2 FY23", "FY 2025", ("Q2", "FY 24"), ("Q2 FY22").
The index for all these DataFrames must be the same (or a subset) of the index of `reference_df`, or else
must contain a column whose name equals the `link_field` parameter, and which has unique values per row.
@param timebucket_df_lower_tags May be None. If not null, must be a list of the same length as `timebucket_df_list`,
all of it strings or all of it tuples of strings of the same size.
@param timebucket_df_upper_tags May be None. If not null, must be a list of the same length as `timebucket_df_list`,
all of it strings or all of it tuples of strings of the same size.
@param a6i_config Apodeixi configuration.
@return A DataFrame
'''
def __init__(self, parent_trace, reference_df, link_field, timebucket_df_list,
timebucket_df_lower_tags, timebucket_df_upper_tags, a6i_config):
standardizer = TimebucketStandardizer()
my_trace = parent_trace.doing("Validating inputs provided to TimebucketDataFrameJoiner")
if True:
# Check reference_df is a DataFrame
if not type(reference_df) == _pd.DataFrame:
raise ApodeixiError(my_trace, "Bad reference_df provided: it should be a DataFrame, not a '"
+ str(type(reference_df)) + "'")
# Check no column of reference_df is a timebucket
bad_cols = [col for col in reference_df.columns if standardizer.is_a_timebucket_column(my_trace,
col,
a6i_config)]
if len(bad_cols) > 0:
raise ApodeixiError(my_trace, "Invalid reference DataFrame provided: it contains some timebucket columns, but it "
+ "shouldn't",
data = {"ts_cols": str(bad_cols)})
# Check every column of reference_df is a string
bad_cols = [col for col in reference_df.columns if not type(col)==str]
if len(bad_cols) > 0:
raise ApodeixiError(my_trace, "Invalid reference DataFrame provided: it contains some non-string columns, but it "
+ "shouldn't",
data = {"ts_cols": str(bad_cols)})
# Check link_field is either null or is a valid column of reference_df
if link_field != None and not link_field in reference_df.columns:
raise ApodeixiError(my_trace, "Invalid link field provided: it should be a column in reference DataFrame ",
data = {"link_field": str(link_field),
"reference_df.columns": str(reference_df.columns)})
# Check timebucket_df_list is a list
if type(timebucket_df_list) != list:
raise ApodeixiError(my_trace, "Invalid timebucket_df_list provided: should be a list, not a '"
+ str(type(timebucket_df_list)) + "'")
# Check timebucket_df_list is not empty list
if len(timebucket_df_list) == 0:
raise ApodeixiError(my_trace, "Invalid timebucket_df_list provided: it is emtpy, and it shouldn't")
# Check timebucket_df_lower_tags is a list
if timebucket_df_lower_tags !=None and type(timebucket_df_lower_tags) != list:
raise ApodeixiError(my_trace, "Invalid timebucket_df_lower_tags provided: should be a list, not a '"
+ str(type(timebucket_df_lower_tags)) + "'")
# Check timebucket_df_upper_tags is a list
if timebucket_df_upper_tags != None and type(timebucket_df_upper_tags) != list:
raise ApodeixiError(my_trace, "Invalid timebucket_df_upper_tags provided: should be a list, not a '"
+ str(type(timebucket_df_upper_tags)) + "'")
# Check timebucket_df_lower_tags are unique
if timebucket_df_lower_tags !=None and len(timebucket_df_lower_tags) != len(set(timebucket_df_lower_tags)):
raise ApodeixiError(my_trace, "Invalid timebucket_df_lower_tags provided: it has duplicates, and shouldn't: '"
+ str(timebucket_df_lower_tags) + "'")
# Check timebucket_df_upper_tags are unique
if timebucket_df_upper_tags !=None and len(timebucket_df_upper_tags) != len(set(timebucket_df_upper_tags)):
raise ApodeixiError(my_trace, "Invalid timebucket_df_upper_tags provided: it has duplicates, and shouldn't: '"
+ str(timebucket_df_upper_tags) + "'")
# Check all members of timebucket_df_list are DataFrames
bad_list = [elt for elt in timebucket_df_list if type(elt) != _pd.DataFrame]
if len(bad_list) > 0:
raise ApodeixiError(my_trace, "Invalid timebucket_df_list provided: some elements are not DataFrames. "
+ "Instead they are " + ", ".join([str(type(elt)) for elt in bad_list]))
# Check all columns for members of timebucket_df_list are timebuckets, except for the linkfield, if it exists
for df in timebucket_df_list:
for col in df.columns:
if self._is_a_link_column(col, link_field):
# Skip if col "is" the linkfield, where "is" must be tuple-sensitive (i.e., maybe the last level of col
# is the link_field)
continue
flattened_col, timebucket, timebucket_indices = standardizer.standardizeOneTimebucketColumn(my_trace,
raw_col = col,
a6i_config = a6i_config,
expected_collapsing_info = None)
if timebucket == None:
raise ApodeixiError(my_trace, "Invalid timebucket_df_list: all columns of all DataFrames should be "
+ "timebuckets, but at least one column is not: '" + str(col) + "'")
if type(flattened_col) == tuple and max(timebucket_indices) < len(flattened_col) - 1:
raise ApodeixiError(my_trace, "Invalid timebucket_df_list: at least 1 DataFrame has a column "
+ "has lower levels below the timebucket levels: '" + str(col) + "'")
# Check all members of timebucket_df_list contain the link_field, if it is set
if link_field != None:
for df in timebucket_df_list:
if len([col for col in df.columns if self._is_a_link_column(col, link_field)]) == 0:
raise ApodeixiError(my_trace, "Invalid link_field '" + str(link_field) + "' : it is not present "
+ "as a column in at least some of the input dataframes supposed to join on that field",
data = {"dataframe columns": str(df.columns)})
if len([col for col in df.columns if self._is_a_link_column(col, link_field)]) > 1:
raise ApodeixiError(my_trace, "Invalid link_field '" + str(link_field) + "' : it present "
+ "in multiple columns in at least some of the input dataframes supposed to join "
+ "on that field (should be present in exactly 1 column",
data = {"dataframe columns": str(df.columns)})
# Check tags lists (if not null) are of the right length
if timebucket_df_lower_tags != None and len(timebucket_df_lower_tags) != len(timebucket_df_list):
raise ApodeixiError(my_trace, "Invalid timebucket_df_lower_tags: size does not match that of timebucket_df_list",
data = {"len(timebucket_df_lower_tags)": str(len(timebucket_df_lower_tags)),
"len(timebucket_df_list)": str(len(timebucket_df_list))})
if timebucket_df_upper_tags != None and len(timebucket_df_upper_tags) != len(timebucket_df_list):
raise ApodeixiError(my_trace, "Invalid timebucket_df_upper_tags: size does not match that of timebucket_df_list",
data = {"len(timebucket_df_upper_tags)": str(len(timebucket_df_upper_tags)),
"len(timebucket_df_list)": str(len(timebucket_df_list))})
# Check tag lists (if not null) are of the same number of MultiIndex levels
if timebucket_df_lower_tags != None:
tag_lengths = [len(tag) if type(tag)==tuple else 0 for tag in timebucket_df_lower_tags]
tag_lengths = list(set(tag_lengths)) # Remove duplicates
if len(tag_lengths) != 1:
raise ApodeixiError(my_trace, "Invalid timebucket_df_lower_tags: there are tags of various lengths",
data = {"tag_lengths": str(tag_lengths)})
if timebucket_df_upper_tags != None:
tag_lengths = [len(tag) if type(tag)==tuple else 0 for tag in timebucket_df_upper_tags]
tag_lengths = list(set(tag_lengths)) # Remove duplicates
if len(tag_lengths) != 1:
raise ApodeixiError(my_trace, "Invalid timebucket_df_upper_tags: there are tags of various lengths",
data = {"tag_lengths": str(tag_lengths)})
self.reference_df = reference_df
self.link_field = link_field
self.timebucket_df_list = timebucket_df_list
self.timebucket_df_lower_tags = timebucket_df_lower_tags
self.timebucket_df_upper_tags = timebucket_df_upper_tags
self.a6i_config = a6i_config
BINARY_OPERATION = "BINARY_OPERATION"
UNARY_OPERATION = "UNARY_OPERATION"
CUMULATIVE_OPERATION = "CUMULATIVE_OPERATION"
def _is_a_link_column(self, col, link_field):
'''
Helper method that returns a boolean, determining if a DataFrame's column `col` "is" the link_field, where "is linke field" is
interpreted in a tuple-sensitive way: ie, a column `col` is considered to be the `link_field` if either
* col == link_field
* or col is a tuple and link_field is col's last level
@param col A DataFrame's column
@param link_field A string
'''
if col==link_field or (type(col)==tuple and col[-1]==link_field):
return True
else:
return False
def _untuple_link_column(self, parent_trace, df):
'''
Returns a DataFrame almost identical to the input `df`, except that it might rename "the link_field column" of df, if
self.link_field is not null, so that it is a string in the event it is a tuple.
Example: Suppose the link_field is "Country". Because of how Pandas reads Excel into DataFrames such as `df`, it is
possible that in `df` the column is held as a tuple, like ("", "", Country).
In that case, that thuple column is replaced by a string column "Country"
'''
if self.link_field == None: # Nothing to do
return df
matching_columns = [col for col in df.columns if self._is_a_link_column(col, self.link_field)]
if len(matching_columns) == 0: # No column to rename
return df
elif len(matching_columns) > 1:
raise ApodeixiError(parent_trace, "Invalid DataFrame provided: multiple columns can be considered to have "
+ "the link field '" + self.link_field + "'",
data = {"df columns": str(df.columns)})
original_link_field_col = matching_columns[0]
# Now replace the column of df from possibly ("", "Country") to "Country" (see comments in Example above)
cleaned_df = df.copy()
cleaned_df.columns = [col if col != original_link_field_col else self.link_field for col in df.columns]
return cleaned_df
def enrich_with_tb_binary_operation(self, parent_trace, a_ltag, b_ltag, c_ltag, func):
'''
Used to compute derived DataFrames.
Example use case: suppose that self.timebucket_df_list has two DataFrames, with lower tags called
"Sales Target", "Sales Actual". Then this methoc can be used to derive a third DataFrame
which the caller (via the c_ltag) can choose to tag as "% Target Achieved", computed
(via the `func` function parameter) as the ration of actuals to targets, row-by-row.
More generally:
This method enlarges self.timebucket_df_list by adding 1 additional DataFrame C_df, derived from two of the
pre-existing DataFrames A_df, B_df already in self.timebucket_df_list, so that the following holds true:
1) A_df is the unique member self.timebucket_df_list[idx] such that a_ltag = self.timebucket_df_lower_tags[idx]
2) Same as 1), but for B_df and b_ltag
3) For each timebucket column col in both A_df, and B_df, C_df[col] = func(A_df[col], B_df[col])
4) If self.link_field is not null, then C_df[link_field] = A_df[link_field]
5) If A_df and B_df don't have the "same rows", then the above hold true with A_df, B_df replaced by the intersection
of rows both in A_df and B_df. By "same rows" we mean: rows where A_df, B_df have the same value for
self.link_field or, if self._link_field is null, rows with the same index value.
It also enriches self.timebucket_df_lower_tags by adding c_ltag for C_df
@param a_ltag A string, which must belong to self.timebucket_df_lower_tags, and the latter must not be null
@param b_ltag A string, which must belong to self.timebucket_df_lower_tags, and the latter must not be null
@param c_ltag A string that should be used as a lower tag for the result. It must not already exist in
self.timebucket_df_lower_tags, and it is appended to the latter, increasing its size by 1.
@func A function that takes 3 arguments: a FunctionalTrace object, and two Pandas series, and returns a third series.
The function may assume that both input series have the same index.
'''
with warnings.catch_warnings(record=True) as w:
WarningUtils().turn_traceback_on(parent_trace, warnings_list=w)
my_trace = parent_trace.doing("Checking if a_ltag is valid")
if True:
if not a_ltag in self.timebucket_df_lower_tags:
raise ApodeixiError(my_trace, "Can't use tag '" + str(a_ltag) + "' to identify which DataFrame to use as an "
+ " enrichment input because "
+ "tag is not in valid list of tags",
data = {"allowed tags": str(self.timebucket_df_lower_tags)})
my_trace = parent_trace.doing("Identifying a_df")
if True:
a_idx = self.timebucket_df_lower_tags.index(a_ltag)
a_df = self.timebucket_df_list[a_idx]
self._enrich_with_tb_operation(parent_trace, a_df = a_df,
b_ltag = b_ltag,
c_ltag = c_ltag,
func = func,
operation_type = self.BINARY_OPERATION,
ref_column = None)
WarningUtils().handle_warnings(parent_trace, warning_list=w)
return
def enrich_with_tb_unary_operation(self, parent_trace, ref_column, b_ltag, c_ltag, func):
'''
Used to compute derived DataFrames.
Example use case: suppose that self.reference_df has a column called "Journey Target", to represent
how many modernization tasks to do over the course of a multi-year modernization program.
And suppose self.timebucket_df_list contains a DataFrame (identified by lower tag b_ltag)
with quarterly targets for such tasks.
Then this this methoc can be used to derive another DataFrame corresponding
to "% Target Achieved", computed
(via the `func` function parameter) as the ration of actuals to targets, row-by-row.
This derived DataFrame would get a lower tag given by c_ltag.
More generally:
This method enlarges self.timebucket_df_list by adding 1 additional DataFrame C_df, derived from
self.reference_df and from a pre-existing DataFrame ref_df, B_df already in self.timebucket_df,
so that the following holds true:
1) ref_column is a column in self.reference_df
2) B_df is the unique member self.timebucket_df_list[idx] such that b_ltag = self.timebucket_df_lower_tags[idx]
3) For each timebucket column col in B_df, C_df[col] = func(self.reference_df[ref_column], B_df[col])
4) If self.link_field is not null, then C_df[link_field] = A_df[link_field]
5) If self.reference_df and B_df don't have the "same rows", then the above hold true with
self.reference_df, B_df replaced by the intersection
of rows both in self.reference_df and B_df. By "same rows" we mean: rows where self.reference_df,
B_df have the same value for self.link_field or, if self._link_field is null, rows with the same index value.
It also enriches self.timebucket_df_lower_tags by adding c_ltag for C_df
@param ref_column A string, which must be a column in self.reference_df
@param b_ltag A string, which must belong to self.timebucket_df_lower_tags, and the latter must not be null
@param c_ltag A string that should be used as a lower tag for the result. It must not already exist in
self.timebucket_df_lower_tags, and it is appended to the latter, increasing its size by 1.
@func A function that takes 3 arguments: a FunctionalTrace object, and two Pandas series, and returns a third series.
The function may assume that both input series have the same index.
'''
with warnings.catch_warnings(record=True) as w:
WarningUtils().turn_traceback_on(parent_trace, warnings_list=w)
if not ref_column in self.reference_df.columns:
raise ApodeixiError(parent_trace, "Can't apply unary operation to enrich DataFrames list because '"
+ str(ref_column) + "' is not a valid column for self.reference_df",
data = {"valid columns": str(self.reference_df.columns)})
self._enrich_with_tb_operation(parent_trace, a_df = self.reference_df,
b_ltag = b_ltag,
c_ltag = c_ltag,
func = func,
operation_type = self.UNARY_OPERATION,
ref_column = ref_column)
WarningUtils().handle_warnings(parent_trace, warning_list=w)
def enrich_with_tb_cumulative_operation(self, parent_trace, b_ltag, c_ltag, func):
'''
Used to compute derived DataFrames.
Example use case: suppose that self.reference_df has a column called "Sales", to represent
sales per quarter, and you would like to have an additional column "Cum Sales" for
the accumulated sales for all prior quarters, up to the current quarter.
Then this this methoc can be used to derive another DataFrame corresponding
to "Cum Sales", computed
(via the `func` function parameter) as the accumulation of quarterly sales, row-by-row.
This derived DataFrame would get a lower tag given by c_ltag.
More generally:
This method enlarges self.timebucket_df_list by adding 1 additional DataFrame C_df, derived from
a pre-existing DataFrame ref_df, B_df already in self.timebucket_df,
so that the following holds true:
1) B_df is the unique member self.timebucket_df_list[idx] such that b_ltag = self.timebucket_df_lower_tags[idx]
2) For each timebucket column col in B_df, C_df[col] = func(C_df[col-], B_df[col]) where col- is the column
in C_df preceding col, unless col is the first row in C_df, in whic case C_df[col-] = None
3) If self.link_field is not null, then C_df[link_field] = A_df[link_field]
It also enriches self.timebucket_df_lower_tags by adding c_ltag for C_df
@param b_ltag A string, which must belong to self.timebucket_df_lower_tags, and the latter must not be null
@param c_ltag A string that should be used as a lower tag for the result. It must not already exist in
self.timebucket_df_lower_tags, and it is appended to the latter, increasing its size by 1.
@func A function that takes 3 arguments: a FunctionalTrace object, and two Pandas Series.
It returns a third series. The function may assume that both input series have the same index.
'''
with warnings.catch_warnings(record=True) as w:
WarningUtils().turn_traceback_on(parent_trace, warnings_list=w)
self._enrich_with_tb_operation(parent_trace, a_df = None,
b_ltag = b_ltag,
c_ltag = c_ltag,
func = func,
operation_type = self.CUMULATIVE_OPERATION,
ref_column = None)
WarningUtils().handle_warnings(parent_trace, warning_list=w)
def _enrich_with_tb_operation(self, parent_trace, a_df, b_ltag, c_ltag, func, operation_type, ref_column):
'''
@param operation_type A string, which must be one of: self.BINARY_OPERATION, self.UNARY_OPERATION, self.CUMULATIVE_OPERATION
'''
my_trace = parent_trace.doing("Validate inputs to enrich_with_binary_operation method")
if True:
if self.timebucket_df_lower_tags == None:
raise ApodeixiError(my_trace, "Can't use enrich list of DataFrames to join unless lower tags are provided")
if c_ltag in self.timebucket_df_lower_tags:
raise ApodeixiError(my_trace, "Can't use tag '" + str(c_ltag) + "' to enrich list of DataFrames because "
+ "tag is already used by another DataFrame in the list",
data = {"tags already used": str(self.timebucket_df_lower_tags)})
if not b_ltag in self.timebucket_df_lower_tags:
raise ApodeixiError(my_trace, "Can't use tag '" + str(b_ltag) + "' to identify which DataFrame to use as an "
+ " enrichment input because "
+ "tag is not in valid list of tags",
data = {"allowed tags": str(self.timebucket_df_lower_tags)})
if not operation_type in [self.BINARY_OPERATION, self.UNARY_OPERATION, self.CUMULATIVE_OPERATION]:
raise ApodeixiError(my_trace, "Invalid operation type '" + str(operation_type) + "': should be one of: "
+ str([self.BINARY_OPERATION, self.UNARY_OPERATION, self.CUMULATIVE_OPERATION]))
my_trace = parent_trace.doing("Combining DataFrames as preparation to applying binary operation")
if True:
LEFT_SUFFIX = "_left"
RIGHT_SUFFIX = "_right"
b_idx = self.timebucket_df_lower_tags.index(b_ltag)
b_df = self.timebucket_df_list[b_idx]
if operation_type in [self.BINARY_OPERATION, self.UNARY_OPERATION]:
left_df = a_df.copy()
right_df = b_df.copy()
if self.link_field != None:
left_df = self._untuple_link_column(my_trace, left_df) # Need to untuple before setting index
right_df = self._untuple_link_column(my_trace, right_df) # Need to untuple before setting index
right_df = right_df.set_index(self.link_field)
joined_df = left_df.join(right_df, on=self.link_field, how="inner",
lsuffix=LEFT_SUFFIX, rsuffix=RIGHT_SUFFIX)
else:
joined_df = left_df.join(right_df, how="inner",
lsuffix=LEFT_SUFFIX, rsuffix=RIGHT_SUFFIX)
else:
# b_df might not have columns sorted by timebucket, but we need to sort them before we start doing the cumulative
# operation, since cumulative operations are order dependent
standardizer = TimebucketStandardizer()
joined_df, info = standardizer.standardizeAllTimebucketColumns(my_trace,
a6i_config = self.a6i_config,
df = b_df,
lower_level_key = None)
my_trace = parent_trace.doing("Populating derived DataFrame")
if True:
derived_df = | _pd.DataFrame({}) | pandas.DataFrame |
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd
import pylab as pl
import numpy as np
from scipy import ndimage
from scipy.cluster import hierarchy
from scipy.spatial import distance_matrix
from sklearn import manifold, datasets, preprocessing, metrics
from sklearn.cluster import AgglomerativeClustering
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.datasets._samples_generator import make_blobs
from io import StringIO
from math import sqrt
import pydotplus
import itertools
# Storing the movie information into a pandas dataframe
movies_df = | pd.read_csv('movies.csv') | pandas.read_csv |
if "snakemake" in locals():
debug = False
else:
debug = True
if not debug:
import sys
sys.stderr = open(snakemake.log[0], "w")
import pandas as pd
def merge_deep_arg_calls(mapping, meta_data, deep_arg_calls, output):
mapping = pd.read_excel(mapping)
meta_data = pd.read_csv(meta_data, sep="|")
merged_data = pd.merge(
mapping,
meta_data,
how="left",
left_on="label",
right_on="label",
validate="1:1",
)
merged_data = merged_data.drop(columns=["osd_id_y"]).rename(
columns={"osd_id_x": "osd_id"}
)
deep_arg_calls = | pd.read_csv(deep_arg_calls, sep="\t") | pandas.read_csv |
import typing as T
import pickle
import itertools as it
from enum import Enum
from pathlib import Path
import defopt
import numpy as np
import pandas as pd
import scipy.stats as st
from sklearn.base import BaseEstimator
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.compose import make_column_transformer, make_column_selector
from sklearn.pipeline import make_pipeline
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from lightgbm import LGBMClassifier
import dask
from dask.distributed import Client
from dask_jobqueue import SLURMCluster
import dask_ml.model_selection as dcv
def columns_transform():
return make_column_transformer(
(
StandardScaler(),
make_column_selector("^(?!crashYear)", dtype_include=np.number),
),
(
OneHotEncoder(handle_unknown="ignore"),
make_column_selector(dtype_include=object),
),
)
def fit_dummy(X, y, n_iter):
"""Fit a dummy estimator"""
model = DummyClassifier(strategy="prior")
model.fit(X, y)
return model
def fit_linear(X, y, n_iter):
"""Fit a logistic regression model"""
model = LogisticRegression(max_iter=500, penalty="elasticnet", solver="saga")
model = make_pipeline(columns_transform(), model)
param_space = {
"logisticregression__l1_ratio": st.uniform(0, 1),
"logisticregression__C": st.loguniform(1e-4, 1e4),
}
model = dcv.RandomizedSearchCV(
model, param_space, scoring="neg_log_loss", n_iter=n_iter, random_state=42, cv=5
)
model.fit(X, y)
return model
def fit_mlp(X, y, n_iter):
"""Fit a simple multi-layer perceptron model"""
model = MLPClassifier(random_state=42, early_stopping=True)
model = make_pipeline(columns_transform(), model)
layers_options = [
[n_units] * n_layers
for n_units, n_layers in it.product([32, 64, 128, 256, 512], [1, 2])
]
param_space = {
"mlpclassifier__hidden_layer_sizes": layers_options,
"mlpclassifier__alpha": st.loguniform(1e-5, 1e-2),
"mlpclassifier__learning_rate_init": st.loguniform(1e-4, 1e-1),
}
model = dcv.RandomizedSearchCV(
model, param_space, scoring="neg_log_loss", n_iter=n_iter, random_state=42, cv=5
)
model.fit(X, y)
return model
def loguniform_int(a, b):
"""Create a discrete random variable following a log-uniform distribution"""
xs = np.arange(a, b + 1)
probs = 1 / (xs * np.log(b / a))
probs /= probs.sum()
return st.rv_discrete(a=a, b=b, values=[xs, probs], name="loguniform_int")
def fit_knn(X, y, n_iter):
"""Fit a KNN model on geographical coordinates only"""
columns_tf = make_column_transformer(("passthrough", ["X", "Y"]))
model = make_pipeline(columns_tf, KNeighborsClassifier())
param_space = {
"kneighborsclassifier__n_neighbors": loguniform_int(1, 500),
"kneighborsclassifier__weights": ["uniform", "distance"],
}
model = dcv.RandomizedSearchCV(
model, param_space, scoring="neg_log_loss", n_iter=n_iter, random_state=42, cv=5
)
model.fit(X, y)
return model
def fit_gbdt(X, y, n_iter):
"""Fit a gradient boosted decision trees model"""
model = LGBMClassifier(n_estimators=2000, random_state=42)
model = make_pipeline(columns_transform(), model)
param_space = {
"lgbmclassifier__min_data_in_leaf": loguniform_int(5, 500),
"lgbmclassifier__num_leaves": loguniform_int(31, 500),
"lgbmclassifier__reg_alpha": st.loguniform(1e-10, 1.0),
"lgbmclassifier__reg_lambda": st.loguniform(1e-10, 1.0),
"lgbmclassifier__learning_rate": st.loguniform(1e-4, 1e-1),
}
model = dcv.RandomizedSearchCV(
model, param_space, scoring="neg_log_loss", n_iter=n_iter, random_state=42, cv=5
)
model.fit(X, y)
return model
def slurm_cluster(n_workers, cores_per_worker, mem_per_worker, walltime, dask_folder):
"""helper function to start a Dask Slurm-based cluster
:param n_workers: maximum number of workers to use
:param cores_per_worker: number of cores per worker
:param mem_per_worker: maximum of RAM for workers
:param walltime: maximum time for workers
:param dask_folder: folder to keep workers temporary data
"""
dask.config.set(
{
"distributed.worker.memory.target": False, # avoid spilling to disk
"distributed.worker.memory.spill": False, # avoid spilling to disk
}
)
cluster = SLURMCluster(
cores=cores_per_worker,
processes=1,
memory=mem_per_worker,
walltime=walltime,
log_directory=dask_folder / "logs", # folder for SLURM logs for each worker
local_directory=dask_folder, # folder for workers data
)
cluster.adapt(minimum=1, maximum=n_workers)
client = Client(cluster)
return client
ModelType = Enum("ModelType", "dummy linear mlp knn gbdt")
def fit(
dset: Path,
output_file: Path,
*,
model_type: ModelType = ModelType.linear,
n_iter: int = 50,
n_workers: int = 1,
cores_per_worker: int = 4,
dask_folder: Path = Path.cwd() / "dask",
mem_per_worker: str = "2GB",
walltime: str = "0-00:30",
use_slurm: bool = False,
) -> BaseEstimator:
"""Fit a model
:param dset: CAS dataset
:param output_file: output .pickle file
:param model_type: type of model to use
:param n_iter: budget for hyper-parameters optimization
:param n_workers: number of workers to use, maximum number for Slurm backend
:param cores_per_worker: number of cores per worker
:param dask_folder: folder to keep workers temporary data
:param mem_per_worker: maximum of RAM for workers, only for Slurm backend
:param walltime: maximum time for workers, only for Slurm backend
:param use_slurm: use Slurm backend for the Dask cluster
:returns: fitted model
"""
dset = pd.read_csv(dset)
X = dset[dset.fold == "train"].drop(columns="fold")
y = X.pop("injuryCrash")
# find function to fit the model in the global namespace
model_func = globals()["fit_" + model_type.name]
# start a Dask cluster, local by default, use a configuration file for Slurm
if use_slurm:
client = slurm_cluster(
n_workers=n_workers,
cores_per_worker=cores_per_worker,
mem_per_worker=mem_per_worker,
walltime=walltime,
dask_folder=dask_folder,
)
else:
client = Client(
n_workers=n_workers,
threads_per_worker=cores_per_worker,
local_directory=dask_folder,
)
client.wait_for_workers(1)
model = model_func(X, y, n_iter=n_iter)
with output_file.open("wb") as fd:
pickle.dump(model, fd)
def predict(
dset: T.Union[pd.DataFrame, Path],
model: T.Union[BaseEstimator, Path],
*,
output_file: T.Optional[Path] = None,
) -> pd.Series:
"""Make predictions from a fitted model
:param dset: CAS dataset
:param model: trained model
:param output_file: output .csv file
:returns: predictions
"""
if isinstance(dset, Path):
dset = pd.read_csv(dset)
if isinstance(model, Path):
with model.open("rb") as fd:
model = pickle.load(fd)
X = dset.drop(columns=["injuryCrash", "fold"])
y_prob = model.predict_proba(X)
y_prob = | pd.Series(y_prob[:, 1], name="crashInjuryProb") | pandas.Series |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
return_value = s.replace([1, 2, 3], inplace=True)
assert return_value is None
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
assert return_value is None
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
ser = pd.Series(np.arange(5), dtype="int64")
def check_replace(to_rep, val, expected):
sc = ser.copy()
result = ser.replace(to_rep, val)
return_value = sc.replace(to_rep, val, inplace=True)
assert return_value is None
tm.assert_series_equal(expected, result)
tm.assert_series_equal(expected, sc)
# 3.0 can still be held in our int64 series, so we do not upcast GH#44940
tr, v = [3], [3.0]
check_replace(tr, v, ser)
# Note this matches what we get with the scalars 3 and 3.0
check_replace(tr[0], v[0], ser)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, "a"])
tr, v = [3, 4], [3.5, "a"]
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")])
tr, v = [3, 4], [3.5, pd.Timestamp("20130101")]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype="object")
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))
result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])
expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace("fun", "in-the-sun")
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, "2u")
expected = pd.Series(["2u", False, "2u"])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
result = s.replace({"asdf": "asdb", True: "yes"})
expected = pd.Series(["yes", False, "yes"])
tm.assert_series_equal(result, expected)
def test_replace_Int_with_na(self, any_int_ea_dtype):
# GH 38267
result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA)
expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)
tm.assert_series_equal(result, expected)
result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA)
result.replace(1, pd.NA, inplace=True)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
from statistics import stdev, mean
import pandas as pd
import pickle
from .visualisations import stability_visualizer
import re
def pickle_reader(filename):
accuracies_ = pickle.load(open(filename, 'rb'))
return accuracies_
class ResultAnalysis():
def __init__(self, filename, seq_len):
self.pkl_file = filename + '_logs.pkl'
self.txt_file = filename + '.txt'
self.seq_len = seq_len
self.data = pickle_reader(self.pkl_file)
self.size_to_reports_dict = self.data['reports']
self.size_to_detailed_acc = self.data['detailed_acc']
def compute_avg_report_by_sizes(self):
"""
Function to compute the average over scikit-learn's list of classification_report objects.
:param size_to_reports_dict: Dictionary with keys = labels indicating tp/holdout sizes,
values = list of sklearn's classification_report objects
:return: dict with keys = keys of size_to_reports_dict, values = metrics averaged over the respective sequence of
values of size_to_reports_dict
"""
# del self.size_to_reports_dict[4]
# del self.size_to_reports_dict[6]
# del self.size_to_reports_dict[8]
# del self.size_to_reports_dict[10]
# del self.size_to_reports_dict[15]
accuracy_by_person = {}
precision = 3 # round off limit
for size_key, _list in self.size_to_reports_dict.items():
print(len(_list), size_key)
# assert len(_list) == self.seq_len, f"Sequence lengths should be exactly {self.seq_len}."
# calculate avg over this tp
by_tp = {key: {'precision': [], 'recall': [], 'f1-score': [], 'support': []} if key != 'accuracy' else []
for key, val in _list[0].items()}
for sequence in _list:
for key, val in sequence.items():
if key != 'accuracy':
for metric, score in val.items():
by_tp[key][metric].append(score)
else:
by_tp[key].append(val)
averaged_dict = {key: {metric: f"{mean(scores) * 100:.{precision}f} +/- {stdev(scores) * 100:.{precision}f}" for
metric, scores in
metric_dict.items()} if key != 'accuracy' else
f"{mean(metric_dict) * 100:.{precision}f} +/- {stdev(metric_dict) * 100:.{precision}f}" for key, metric_dict in
by_tp.items()}
accuracy_by_person[size_key] = averaged_dict
for key in self.size_to_reports_dict.keys():
print(" =================== " * 10)
print(f"\n\nAveraged classification report for tp/holdout: {key}")
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print( | pd.DataFrame(accuracy_by_person[key]) | pandas.DataFrame |
import os
import warnings
from typing import List
import joblib
import mlflow
import pandas as pd
from fastapi import FastAPI
from pydantic import BaseModel
pokemon_app = FastAPI()
class Pokemon(BaseModel):
hp: int
attack: int
defence: int
special_attack: int
special_defense: int
speed: int
@pokemon_app.get("/")
def show_welcome_page():
model_name: str = os.getenv("MLFLOW_MODEL_NAME")
model_version: str = os.getenv("MLFLOW_MODEL_VERSION")
return {"Message": "Welcome to pokemon api",
"Model_name": f"{model_name}",
"Model_version": f"{model_version}"}
@pokemon_app.get("/pokemon-type")
def get_pokemon_type(hp: int, attack: int, defence: int, special_attack: int, special_defense: int, speed: int):
df = pd.DataFrame(columns=["hp", "attack", "defence", "special_attack", "special_defense", "speed"])
df.loc[0] = pd.Series({'hp': hp, 'attack': attack, 'defence': defence, 'special_attack': special_attack,
'special_defense': special_defense, 'speed': speed})
model = get_model()
res = bool(model.predict(df)[0])
return {"is_legendary": res}
@pokemon_app.post("/pokemon-type")
def post_pokemon_type(pokemon: Pokemon):
df = | pd.DataFrame(columns=["hp", "attack", "defence", "special_attack", "special_defense", "speed"]) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.