prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import numpy as np
import csv
import json
from math import radians, cos, sin, asin, sqrt
import os
class Peaje:
def __init__(self, name, code):
self.code = code
self.name = name
self.trafico = {}
self.recaudo = {}
print(os.path.dirname(__file__))
home = os.getcwd()
file = "trafico.csv"
result = os.path.join(home, file)
with open(result, 'r') as f:
trafico_file = pd.read_csv(f, sep=",")
file = "recaudo.csv"
result = os.path.join(home, file)
with open(result, 'r') as f:
recaudo_file = | pd.read_csv(f, sep=",") | pandas.read_csv |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = | tm.box_expected(expected, box) | pandas.util.testing.box_expected |
import numpy, pandas
from sklearn.base import TransformerMixin
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression, mutual_info_regression
class SelectNAndKBest(
TransformerMixin,
):
"""
Selects the first N features plus the K best other features.
"""
def __init__(self, n, k, func=None):
self._n = n
self._k = k
self._func = mutual_info_regression if func is None else func
def fit(self, X, y):
if self._k > X.shape[1]-self._n:
use_k = 'all'
else:
use_k = self._k
if X.shape[1]-self._n <=0:
self._feature_selector = None
elif isinstance(X, pandas.DataFrame):
self._feature_selector = SelectKBest(self._func, k=use_k).fit(X.iloc[:,self._n:], y)
else:
self._feature_selector = SelectKBest(self._func, k=use_k).fit(X[:,self._n:], y)
return self
def transform(self, X):
if isinstance(X, pandas.DataFrame):
X_outside = X.iloc[:,self._n:]
if self._feature_selector is None:
X2 = X_outside
else:
X2 = self._feature_selector.transform(X_outside)
X2 = pandas.DataFrame(X2, index=X.index, columns=X_outside.columns[self._feature_selector.get_support()])
return | pandas.concat([X.iloc[:,:self._n], X2], axis=1) | pandas.concat |
from stix_shifter.stix_transmission.src.modules.cloudsql import cloudsql_connector
from stix_shifter.stix_transmission.src.modules.base.base_status_connector import Status
import pandas as pd
from unittest.mock import patch
import json
import unittest
@patch('ibmcloudsql.SQLQuery.__init__', autospec=True)
@patch('ibmcloudsql.SQLQuery.logon', autospec=True)
class TestCloudSQLConnection(unittest.TestCase, object):
def test_is_async(self, mock_api_client_logon, mock_api_client):
mock_api_client_logon.return_value = None
mock_api_client.return_value = None
module = cloudsql_connector
config = {
"auth": {
"bxapikey": "placeholder"
},
"client_info": "placeholder"
}
connection = {
"instance_crn": "placeholder",
"target_cos": "placeholder"
}
check_async = module.Connector(connection, config).is_async
assert check_async
@patch('ibmcloudsql.SQLQuery.get_jobs')
def test_ping_endpoint(self, mock_ping_response, mock_api_client_logon,
mock_api_client):
mock_api_client_logon.return_value = None
mock_api_client.return_value = None
mocked_return_value = '[{"job_id": "placeholder", "status": "placeholder",\
"user_id": "placeholder", "statement": "placeholder",\
"resultset_location": "placeholder", "submit_time": "placeholder",\
"end_time": "placeholder", "error": "placeholder", error_message": "placeholder"}]'
mock_ping_response.return_value = mocked_return_value
module = cloudsql_connector
config = {
"auth": {
"bxapikey": "placeholder"
},
"client_info": "placeholder"
}
connection = {
"instance_crn": "placeholder",
"target_cos": "placeholder"
}
ping_response = module.Connector(connection, config).ping()
assert ping_response is not None
assert ping_response['success']
@patch('ibmcloudsql.SQLQuery.submit_sql')
def test_query_response(self, mock_query_response, mock_api_client_logon,
mock_api_client):
mock_api_client_logon.return_value = None
mock_api_client.return_value = None
mocked_return_value = '108cb8b0-0744-4dd9-8e35-ea8311cd6211'
mock_query_response.return_value = mocked_return_value
module = cloudsql_connector
config = {
"auth": {
"bxapikey": "placeholder"
},
"client_info": "placeholder"
}
connection = {
"instance_crn": "placeholder",
"target_cos": "placeholder"
}
query = '{"query":"SELECT target.id from cos://us-geo/at-data/rest.1*.json STORED AS JSON c"}'
query_response = module.Connector(connection, config).create_query_connection(query)
assert query_response is not None
assert 'search_id' in query_response
assert query_response['search_id'] == "108cb8b0-0744-4dd9-8e35-ea8311cd6211"
@patch('ibmcloudsql.SQLQuery.get_job', autospec=True)
def test_status_response(self, mock_status_response,
mock_api_client_logon, mock_api_client):
mock_api_client_logon.return_value = None
mock_api_client.return_value = None
mocked_return_value = json.loads('{"status": "completed", "end_time": "2018-08-28T15:51:24.899Z", "submit_time": "2018-08-28T15:51:19.899Z"}')
mock_status_response.return_value = mocked_return_value
module = cloudsql_connector
config = {
"auth": {
"bxapikey": "placeholder"
},
"client_info": "placeholder"
}
connection = {
"instance_crn": "placeholder",
"target_cos": "placeholder"
}
search_id = "108cb8b0-0744-4dd9-8e35-ea8311cd6211"
status_response = module.Connector(connection, config).create_status_connection(search_id)
assert status_response is not None
assert status_response['success']
assert 'status' in status_response
assert status_response['status'] == Status.COMPLETED.value
@patch('stix_shifter.stix_transmission.src.modules.cloudsql.cloudsql_results_connector.CloudSQLResultsConnector.records', autospec=True)
def test_results_response(self, mock_results_response,
mock_api_client_logon, mock_api_client):
mock_api_client_logon.return_value = None
mock_api_client.return_value = None
mocked_return_value = pd.DataFrame(columns=['id'])
mocked_return_value = mocked_return_value.append([{'id': 'crn:v1:bluemix:public:iam-identity::a/::apikey:1234'}], ignore_index=True)
mock_results_response.return_value = mocked_return_value
module = cloudsql_connector
config = {
"auth": {
"bxapikey": "placeholder"
},
"client_info": "placeholder"
}
connection = {
"instance_crn": "placeholder",
"target_cos": "placeholder"
}
search_id = "108cb8b0-0744-4dd9-8e35-ea8311cd6211"
offset = 0
length = 1
results_response = module.Connector(connection, config).create_results_connection(search_id, offset, length)
assert results_response is not None
assert results_response['success']
assert 'data' in results_response
assert len(results_response['data']) > 0
@patch('ibmcloudsql.SQLQuery.delete_result', autospec=True)
def test_delete_response(self, mock_delete_response,
mock_api_client_logon, mock_api_client):
mock_api_client_logon.return_value = None
mock_api_client.return_value = None
mocked_return_value = | pd.DataFrame(columns=['Deleted Object']) | pandas.DataFrame |
"""
Procedures for fitting marginal regression models to dependent data
using Generalized Estimating Equations.
References
----------
<NAME> and <NAME>. "Longitudinal data analysis using
generalized linear models". Biometrika (1986) 73 (1): 13-22.
<NAME> and <NAME>. "Longitudinal Data Analysis for Discrete and
Continuous Outcomes". Biometrics Vol. 42, No. 1 (Mar., 1986),
pp. 121-130
<NAME> and <NAME> (1990). "Hypothesis testing of regression
parameters in semiparametric generalized linear models for cluster
correlated data", Biometrika, 77, 485-497.
<NAME> and <NAME> (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
<NAME>, <NAME> (2001). A covariance estimator for GEE with
improved small-sample properties. Biometrics. 2001 Mar;57(1):126-34.
"""
from __future__ import division
from statsmodels.compat.python import range, lzip, zip
import numpy as np
from scipy import stats
import pandas as pd
import patsy
from collections import defaultdict
from statsmodels.tools.decorators import cache_readonly
import statsmodels.base.model as base
# used for wrapper:
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import cov_struct as cov_structs
import statsmodels.genmod.families.varfuncs as varfuncs
from statsmodels.genmod.families.links import Link
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
DomainWarning,
IterationLimitWarning,
ValueWarning)
import warnings
from statsmodels.graphics._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
from statsmodels.discrete.discrete_margins import (
_get_margeff_exog, _check_margeff_args, _effects_at, margeff_cov_with_se,
_check_at_is_all, _transform_names, _check_discrete_args,
_get_dummy_index, _get_count_index)
class ParameterConstraint(object):
"""
A class for managing linear equality constraints for a parameter
vector.
"""
def __init__(self, lhs, rhs, exog):
"""
Parameters
----------
lhs : ndarray
A q x p matrix which is the left hand side of the
constraint lhs * param = rhs. The number of constraints is
q >= 1 and p is the dimension of the parameter vector.
rhs : ndarray
A 1-dimensional vector of length q which is the right hand
side of the constraint equation.
exog : ndarray
The n x p exognenous data for the full model.
"""
# In case a row or column vector is passed (patsy linear
# constraints passes a column vector).
rhs = np.atleast_1d(rhs.squeeze())
if rhs.ndim > 1:
raise ValueError("The right hand side of the constraint "
"must be a vector.")
if len(rhs) != lhs.shape[0]:
raise ValueError("The number of rows of the left hand "
"side constraint matrix L must equal "
"the length of the right hand side "
"constraint vector R.")
self.lhs = lhs
self.rhs = rhs
# The columns of lhs0 are an orthogonal basis for the
# orthogonal complement to row(lhs), the columns of lhs1 are
# an orthogonal basis for row(lhs). The columns of lhsf =
# [lhs0, lhs1] are mutually orthogonal.
lhs_u, lhs_s, lhs_vt = np.linalg.svd(lhs.T, full_matrices=1)
self.lhs0 = lhs_u[:, len(lhs_s):]
self.lhs1 = lhs_u[:, 0:len(lhs_s)]
self.lhsf = np.hstack((self.lhs0, self.lhs1))
# param0 is one solution to the underdetermined system
# L * param = R.
self.param0 = np.dot(self.lhs1, np.dot(lhs_vt, self.rhs) /
lhs_s)
self._offset_increment = np.dot(exog, self.param0)
self.orig_exog = exog
self.exog_fulltrans = np.dot(exog, self.lhsf)
def offset_increment(self):
"""
Returns a vector that should be added to the offset vector to
accommodate the constraint.
Parameters
----------
exog : array-like
The exogeneous data for the model.
"""
return self._offset_increment
def reduced_exog(self):
"""
Returns a linearly transformed exog matrix whose columns span
the constrained model space.
Parameters
----------
exog : array-like
The exogeneous data for the model.
"""
return self.exog_fulltrans[:, 0:self.lhs0.shape[1]]
def restore_exog(self):
"""
Returns the full exog matrix before it was reduced to
satisfy the constraint.
"""
return self.orig_exog
def unpack_param(self, params):
"""
Converts the parameter vector `params` from reduced to full
coordinates.
"""
return self.param0 + np.dot(self.lhs0, params)
def unpack_cov(self, bcov):
"""
Converts the covariance matrix `bcov` from reduced to full
coordinates.
"""
return np.dot(self.lhs0, np.dot(bcov, self.lhs0.T))
_gee_init_doc = """
Marginal regression model fit using Generalized Estimating Equations.
GEE can be used to fit Generalized Linear Models (GLMs) when the
data have a grouped structure, and the observations are possibly
correlated within groups but not between groups.
Parameters
----------
endog : array-like
1d array of endogenous values (i.e. responses, outcomes,
dependent variables, or 'Y' values).
exog : array-like
2d array of exogeneous values (i.e. covariates, predictors,
independent variables, regressors, or 'X' values). A `nobs x
k` array where `nobs` is the number of observations and `k` is
the number of regressors. An intercept is not included by
default and should be added by the user. See
`statsmodels.tools.add_constant`.
groups : array-like
A 1d array of length `nobs` containing the group labels.
time : array-like
A 2d array of time (or other index) values, used by some
dependence structures to define similarity relationships among
observations within a cluster.
family : family class instance
%(family_doc)s
cov_struct : CovStruct class instance
The default is Independence. To specify an exchangeable
structure use cov_struct = Exchangeable(). See
statsmodels.genmod.cov_struct.CovStruct for more
information.
offset : array-like
An offset to be included in the fit. If provided, must be
an array whose length is the number of rows in exog.
dep_data : array-like
Additional data passed to the dependence structure.
constraint : (ndarray, ndarray)
If provided, the constraint is a tuple (L, R) such that the
model parameters are estimated under the constraint L *
param = R, where L is a q x p matrix and R is a
q-dimensional vector. If constraint is provided, a score
test is performed to compare the constrained model to the
unconstrained model.
update_dep : bool
If true, the dependence parameters are optimized, otherwise
they are held fixed at their starting values.
weights : array-like
An array of weights to use in the analysis. The weights must
be constant within each group. These correspond to
probability weights (pweights) in Stata.
%(extra_params)s
See Also
--------
statsmodels.genmod.families.family
:ref:`families`
:ref:`links`
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inv Gaussian | x x x
binomial | x x x x x x x x x
Poission | x x x
neg binomial | x x x x
gamma | x x x
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer
to are already arrays and these arrays are changed, endog and
exog will change.
The "robust" covariance type is the standard "sandwich estimator"
(e.g. Liang and Zeger (1986)). It is the default here and in most
other packages. The "naive" estimator gives smaller standard
errors, but is only correct if the working correlation structure
is correctly specified. The "bias reduced" estimator of Mancl and
DeRouen (Biometrics, 2001) reduces the downard bias of the robust
estimator.
The robust covariance provided here follows Liang and Zeger (1986)
and agrees with R's gee implementation. To obtain the robust
standard errors reported in Stata, multiply by sqrt(N / (N - g)),
where N is the total sample size, and g is the average group size.
Examples
--------
%(example)s
"""
_gee_family_doc = """\
The default is Gaussian. To specify the binomial
distribution use `family=sm.families.Binomial()`. Each family
can take a link instance as an argument. See
statsmodels.genmod.families.family for more information."""
_gee_ordinal_family_doc = """\
The only family supported is `Binomial`. The default `Logit`
link may be replaced with `probit` if desired."""
_gee_nominal_family_doc = """\
The default value `None` uses a multinomial logit family
specifically designed for use with GEE. Setting this
argument to a non-default value is not currently supported."""
_gee_fit_doc = """
Fits a marginal regression model using generalized estimating
equations (GEE).
Parameters
----------
maxiter : integer
The maximum number of iterations
ctol : float
The convergence criterion for stopping the Gauss-Seidel
iterations
start_params : array-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
params_niter : integer
The number of Gauss-Seidel updates of the mean structure
parameters that take place prior to each update of the
dependence structure.
first_dep_update : integer
No dependence structure updates occur before this
iteration number.
cov_type : string
One of "robust", "naive", or "bias_reduced".
ddof_scale : scalar or None
The scale parameter is estimated as the sum of squared
Pearson residuals divided by `N - ddof_scale`, where N
is the total sample size. If `ddof_scale` is None, the
number of covariates (including an intercept if present)
is used.
scaling_factor : scalar
The estimated covariance of the parameter estimates is
scaled by this value. Default is 1, Stata uses N / (N - g),
where N is the total sample size and g is the average group
size.
Returns
-------
An instance of the GEEResults class or subclass
Notes
-----
If convergence difficulties occur, increase the values of
`first_dep_update` and/or `params_niter`. Setting
`first_dep_update` to a greater value (e.g. ~10-20) causes the
algorithm to move close to the GLM solution before attempting
to identify the dependence structure.
For the Gaussian family, there is no benefit to setting
`params_niter` to a value greater than 1, since the mean
structure parameters converge in one step.
"""
_gee_results_doc = """
Attributes
----------
cov_params_default : ndarray
default covariance of the parameter estimates. Is chosen among one
of the following three based on `cov_type`
cov_robust : ndarray
covariance of the parameter estimates that is robust
cov_naive : ndarray
covariance of the parameter estimates that is not robust to
correlation or variance misspecification
cov_robust_bc : ndarray
covariance of the parameter estimates that is robust and bias
reduced
converged : bool
indicator for convergence of the optimization.
True if the norm of the score is smaller than a threshold
cov_type : string
string indicating whether a "robust", "naive" or "bias_reduced"
covariance is used as default
fit_history : dict
Contains information about the iterations.
fittedvalues : array
Linear predicted values for the fitted model.
dot(exog, params)
model : class instance
Pointer to GEE model instance that called `fit`.
normalized_cov_params : array
See GEE docstring
params : array
The coefficients of the fitted model. Note that
interpretation of the coefficients often depends on the
distribution family and the data.
scale : float
The estimate of the scale / dispersion for the model fit.
See GEE.fit for more information.
score_norm : float
norm of the score at the end of the iterative estimation.
bse : array
The standard errors of the fitted GEE parameters.
"""
_gee_example = """
Logistic regression with autoregressive working dependence:
>>> import statsmodels.api as sm
>>> family = sm.families.Binomial()
>>> va = sm.cov_struct.Autoregressive()
>>> model = sm.GEE(endog, exog, group, family=family, cov_struct=va)
>>> result = model.fit()
>>> print(result.summary())
Use formulas to fit a Poisson GLM with independent working
dependence:
>>> import statsmodels.api as sm
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = sm.GEE.from_formula("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
Equivalent, using the formula API:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = smf.gee("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_ordinal_example = """
Fit an ordinal regression model using GEE, with "global
odds ratio" dependence:
>>> import statsmodels.api as sm
>>> gor = sm.cov_struct.GlobalOddsRatio("ordinal")
>>> model = sm.OrdinalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.formula.api as smf
>>> model = smf.ordinal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_nominal_example = """
Fit a nominal regression model using GEE:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> gor = sm.cov_struct.GlobalOddsRatio("nominal")
>>> model = sm.NominalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.api as sm
>>> model = sm.NominalGEE.from_formula("y ~ x1 + x2", groups,
data, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using the formula API:
>>> import statsmodels.formula.api as smf
>>> model = smf.nominal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
def _check_args(endog, exog, groups, time, offset, exposure):
if endog.size != exog.shape[0]:
raise ValueError("Leading dimension of 'exog' should match "
"length of 'endog'")
if groups.size != endog.size:
raise ValueError("'groups' and 'endog' should have the same size")
if time is not None and (time.size != endog.size):
raise ValueError("'time' and 'endog' should have the same size")
if offset is not None and (offset.size != endog.size):
raise ValueError("'offset and 'endog' should have the same size")
if exposure is not None and (exposure.size != endog.size):
raise ValueError("'exposure' and 'endog' should have the same size")
class GEE(base.Model):
__doc__ = (
" Estimation of marginal regression models using Generalized\n"
" Estimating Equations (GEE).\n" + _gee_init_doc %
{'extra_params': base._missing_param_doc,
'family_doc': _gee_family_doc,
'example': _gee_example})
cached_means = None
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
exposure=None, dep_data=None, constraint=None,
update_dep=True, weights=None, **kwargs):
if family is not None:
if not isinstance(family.link, tuple(family.safe_links)):
import warnings
msg = ("The {0} link function does not respect the "
"domain of the {1} family.")
warnings.warn(msg.format(family.link.__class__.__name__,
family.__class__.__name__),
DomainWarning)
groups = np.asarray(groups) # in case groups is pandas
if "missing_idx" in kwargs and kwargs["missing_idx"] is not None:
# If here, we are entering from super.from_formula; missing
# has already been dropped from endog and exog, but not from
# the other variables.
ii = ~kwargs["missing_idx"]
groups = groups[ii]
if time is not None:
time = time[ii]
if offset is not None:
offset = offset[ii]
if exposure is not None:
exposure = exposure[ii]
del kwargs["missing_idx"]
_check_args(endog, exog, groups, time, offset, exposure)
self.missing = missing
self.dep_data = dep_data
self.constraint = constraint
self.update_dep = update_dep
self._fit_history = defaultdict(list)
# Pass groups, time, offset, and dep_data so they are
# processed for missing data along with endog and exog.
# Calling super creates self.exog, self.endog, etc. as
# ndarrays and the original exog, endog, etc. are
# self.data.endog, etc.
super(GEE, self).__init__(endog, exog, groups=groups,
time=time, offset=offset,
exposure=exposure, weights=weights,
dep_data=dep_data, missing=missing,
**kwargs)
self._init_keys.extend(["update_dep", "constraint", "family",
"cov_struct"])
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("GEE: `family` must be a genmod "
"family instance")
self.family = family
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = cov_structs.Independence()
else:
if not issubclass(cov_struct.__class__, cov_structs.CovStruct):
raise ValueError("GEE: `cov_struct` must be a genmod "
"cov_struct instance")
self.cov_struct = cov_struct
# Handle the offset and exposure
self._offset_exposure = None
if offset is not None:
self._offset_exposure = self.offset.copy()
self.offset = offset
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
if self._offset_exposure is not None:
self._offset_exposure += np.log(exposure)
else:
self._offset_exposure = np.log(exposure)
self.exposure = exposure
# Handle the constraint
self.constraint = None
if constraint is not None:
if len(constraint) != 2:
raise ValueError("GEE: `constraint` must be a 2-tuple.")
if constraint[0].shape[1] != self.exog.shape[1]:
raise ValueError(
"GEE: the left hand side of the constraint must have "
"the same number of columns as the exog matrix.")
self.constraint = ParameterConstraint(constraint[0],
constraint[1],
self.exog)
if self._offset_exposure is not None:
self._offset_exposure += self.constraint.offset_increment()
else:
self._offset_exposure = (
self.constraint.offset_increment().copy())
self.exog = self.constraint.reduced_exog()
# Create list of row indices for each group
group_labels, ix = np.unique(self.groups, return_inverse=True)
se = pd.Series(index=np.arange(len(ix)))
gb = se.groupby(ix).groups
dk = [(lb, np.asarray(gb[k])) for k, lb in enumerate(group_labels)]
self.group_indices = dict(dk)
self.group_labels = group_labels
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
self.endog_li = self.cluster_list(self.endog)
self.exog_li = self.cluster_list(self.exog)
if self.weights is not None:
self.weights_li = self.cluster_list(self.weights)
self.weights_li = [x[0] for x in self.weights_li]
self.weights_li = np.asarray(self.weights_li)
self.num_group = len(self.endog_li)
# Time defaults to a 1d grid with equal spacing
if self.time is not None:
self.time = np.asarray(self.time, np.float64)
if self.time.ndim == 1:
self.time = self.time[:, None]
self.time_li = self.cluster_list(self.time)
else:
self.time_li = \
[np.arange(len(y), dtype=np.float64)[:, None]
for y in self.endog_li]
self.time = np.concatenate(self.time_li)
if self._offset_exposure is not None:
self.offset_li = self.cluster_list(self._offset_exposure)
else:
self.offset_li = None
if constraint is not None:
self.constraint.exog_fulltrans_li = \
self.cluster_list(self.constraint.exog_fulltrans)
self.family = family
self.cov_struct.initialize(self)
# Total sample size
group_ns = [len(y) for y in self.endog_li]
self.nobs = sum(group_ns)
# The following are column based, not on rank see #1928
self.df_model = self.exog.shape[1] - 1 # assumes constant
self.df_resid = self.nobs - self.exog.shape[1]
# Skip the covariance updates if all groups have a single
# observation (reduces to fitting a GLM).
maxgroup = max([len(x) for x in self.endog_li])
if maxgroup == 1:
self.update_dep = False
# Override to allow groups and time to be passed as variable
# names.
@classmethod
def from_formula(cls, formula, groups, data, subset=None,
time=None, offset=None, exposure=None,
*args, **kwargs):
"""
Create a GEE model instance from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
groups : array-like or string
Array of grouping labels. If a string, this is the name
of a variable in `data` that contains the grouping labels.
data : array-like
The data for the model.
subset : array-like
An array-like object of booleans, integers, or index
values that indicate the subset of the data to used when
fitting the model.
time : array-like or string
The time values, used for dependence structures involving
distances between observations. If a string, this is the
name of a variable in `data` that contains the time
values.
offset : array-like or string
The offset values, added to the linear predictor. If a
string, this is the name of a variable in `data` that
contains the offset values.
exposure : array-like or string
The exposure values, only used if the link function is the
logarithm function, in which case the log of `exposure`
is added to the offset (if any). If a string, this is the
name of a variable in `data` that contains the offset
values.
%(missing_param_doc)s
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with two exceptions. `dep_data`
is processed as described below. The ``eval_env`` keyword is
passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace.
If you wish to use a "clean" environment set ``eval_env=-1``.
Optional arguments
------------------
dep_data : string or array-like
Data used for estimating the dependence structure. See
specific dependence structure classes (e.g. Nested) for
details. If `dep_data` is a string, it is interpreted as
a formula that is applied to `data`. If it is an array, it
must be an array of strings corresponding to column names in
`data`. Otherwise it must be an array-like with the same
number of rows as data.
Returns
-------
model : GEE model instance
Notes
-----
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
""" % {'missing_param_doc': base._missing_param_doc}
groups_name = "Groups"
if isinstance(groups, str):
groups_name = groups
groups = data[groups]
if isinstance(time, str):
time = data[time]
if isinstance(offset, str):
offset = data[offset]
if isinstance(exposure, str):
exposure = data[exposure]
dep_data = kwargs.get("dep_data")
dep_data_names = None
if dep_data is not None:
if isinstance(dep_data, str):
dep_data = patsy.dmatrix(dep_data, data,
return_type='dataframe')
dep_data_names = dep_data.columns.tolist()
else:
dep_data_names = list(dep_data)
dep_data = data[dep_data]
kwargs["dep_data"] = np.asarray(dep_data)
model = super(GEE, cls).from_formula(formula, data=data, subset=subset,
groups=groups, time=time,
offset=offset,
exposure=exposure,
*args, **kwargs)
if dep_data_names is not None:
model._dep_data_names = dep_data_names
model._groups_name = groups_name
return model
def cluster_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
cluster structure.
"""
if array.ndim == 1:
return [np.array(array[self.group_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.group_indices[k], :])
for k in self.group_labels]
def compare_score_test(self, submodel):
"""
Perform a score test for the given submodel against this model.
Parameters
----------
submodel : GEEResults instance
A fitted GEE model that is a submodel of this model.
Returns
-------
A dictionary with keys "statistic", "p-value", and "df",
containing the score test statistic, its chi^2 p-value,
and the degrees of freedom used to compute the p-value.
Notes
-----
The score test can be performed without calling 'fit' on the
larger model. The provided submodel must be obtained from a
fitted GEE.
This method performs the same score test as can be obtained by
fitting the GEE with a linear constraint and calling `score_test`
on the results.
References
----------
<NAME> and <NAME> (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
# Check consistency between model and submodel (not a comprehensive
# check)
submod = submodel.model
if self.exog.shape[0] != submod.exog.shape[0]:
msg = "Model and submodel have different numbers of cases."
raise ValueError(msg)
if self.exog.shape[1] == submod.exog.shape[1]:
msg = "Model and submodel have the same number of variables"
warnings.warn(msg)
if not isinstance(self.family, type(submod.family)):
msg = "Model and submodel have different GLM families."
warnings.warn(msg)
if not isinstance(self.cov_struct, type(submod.cov_struct)):
warnings.warn("Model and submodel have different GEE covariance "
"structures.")
if not np.equal(self.weights, submod.weights).all():
msg = "Model and submodel should have the same weights."
warnings.warn(msg)
# Get the positions of the submodel variables in the
# parent model
qm, qc = _score_test_submodel(self, submodel.model)
if qm is None:
msg = "The provided model is not a submodel."
raise ValueError(msg)
# Embed the submodel params into a params vector for the
# parent model
params_ex = np.dot(qm, submodel.params)
# Attempt to preserve the state of the parent model
cov_struct_save = self.cov_struct
import copy
cached_means_save = copy.deepcopy(self.cached_means)
# Get the score vector of the submodel params in
# the parent model
self.cov_struct = submodel.cov_struct
self.update_cached_means(params_ex)
_, score = self._update_mean_params()
if score is None:
msg = "Singular matrix encountered in GEE score test"
warnings.warn(msg, ConvergenceWarning)
return None
if not hasattr(self, "ddof_scale"):
self.ddof_scale = self.exog.shape[1]
if not hasattr(self, "scaling_factor"):
self.scaling_factor = 1
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = np.dot(qc.T, score) / scale
amat = np.linalg.inv(ncov1)
bmat_11 = np.dot(qm.T, np.dot(cmat, qm))
bmat_22 = np.dot(qc.T, np.dot(cmat, qc))
bmat_12 = np.dot(qm.T, np.dot(cmat, qc))
amat_11 = np.dot(qm.T, np.dot(amat, qm))
amat_12 = np.dot(qm.T, np.dot(amat, qc))
score_cov = bmat_22 - np.dot(amat_12.T,
np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
# Attempt to restore state
self.cov_struct = cov_struct_save
self.cached_means = cached_means_save
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
return {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
def estimate_scale(self):
"""
Estimate the dispersion/scale.
The scale parameter for binomial, Poisson, and multinomial
families is fixed at 1, otherwise it is estimated from
the data.
"""
if isinstance(self.family, (families.Binomial, families.Poisson,
_Multinomial)):
return 1.
endog = self.endog_li
cached_means = self.cached_means
nobs = self.nobs
varfunc = self.family.variance
scale = 0.
fsum = 0.
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
expval, _ = cached_means[i]
f = self.weights_li[i] if self.weights is not None else 1.
sdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / sdev
scale += f * np.sum(resid ** 2)
fsum += f * len(endog[i])
scale /= (fsum * (nobs - self.ddof_scale) / float(nobs))
return scale
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array-like
The exogeneous data at which the derivative is computed.
lin_pred : array-like
The values of the linear predictor.
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
Notes
-----
If there is an offset or exposure, it should be added to
`lin_pred` prior to calling this function.
"""
idl = self.family.link.inverse_deriv(lin_pred)
dmat = exog * idl[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog.
Parameters
----------
exog : array-like
Values of the independent variables at which the derivative
is calculated.
params : array-like
Parameter values at which the derivative is calculated.
offset_exposure : array-like, optional
Combined offset and exposure.
Returns
-------
The derivative of the expected endog with respect to exog.
"""
lin_pred = np.dot(exog, params)
if offset_exposure is not None:
lin_pred += offset_exposure
idl = self.family.link.inverse_deriv(lin_pred)
dmat = np.outer(idl, params)
return dmat
def _update_mean_params(self):
"""
Returns
-------
update : array-like
The update vector such that params + update is the next
iterate when solving the score equations.
score : array-like
The current value of the score equations, not
incorporating the scale parameter. If desired,
multiply this vector by the scale parameter to
incorporate the scale.
"""
endog = self.endog_li
exog = self.exog_li
cached_means = self.cached_means
varfunc = self.family.variance
bmat, score = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (dmat, resid))
if rslt is None:
return None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
score += f * np.dot(dmat.T, vinv_resid)
update = np.linalg.solve(bmat, score)
self._fit_history["cov_adjust"].append(
self.cov_struct.cov_adjust)
return update, score
def update_cached_means(self, mean_params):
"""
cached_means should always contain the most recent calculation
of the group-wise mean vectors. This function should be
called every time the regression parameters are changed, to
keep the cached means up to date.
"""
endog = self.endog_li
exog = self.exog_li
offset = self.offset_li
linkinv = self.family.link.inverse
self.cached_means = []
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
lpr = np.dot(exog[i], mean_params)
if offset is not None:
lpr += offset[i]
expval = linkinv(lpr)
self.cached_means.append((expval, lpr))
def _covmat(self):
"""
Returns the sampling covariance matrix of the regression
parameters and related quantities.
Returns
-------
cov_robust : array-like
The robust, or sandwich estimate of the covariance, which
is meaningful even if the working covariance structure is
incorrectly specified.
cov_naive : array-like
The model-based estimate of the covariance, which is
meaningful if the covariance structure is correctly
specified.
cmat : array-like
The center matrix of the sandwich expression, used in
obtaining score test results.
"""
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
# Calculate the naive (model-based) and robust (sandwich)
# covariances.
bmat, cmat = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat, resid))
if rslt is None:
return None, None, None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
dvinv_resid = f * np.dot(dmat.T, vinv_resid)
cmat += np.outer(dvinv_resid, dvinv_resid)
scale = self.estimate_scale()
bmati = np.linalg.inv(bmat)
cov_naive = bmati * scale
cov_robust = np.dot(bmati, np.dot(cmat, bmati))
cov_naive *= self.scaling_factor
cov_robust *= self.scaling_factor
return cov_robust, cov_naive, cmat
# Calculate the bias-corrected sandwich estimate of Mancl and
# DeRouen.
def _bc_covmat(self, cov_naive):
cov_naive = cov_naive / self.scaling_factor
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
scale = self.estimate_scale()
bcm = 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat,))
if rslt is None:
return None
vinv_d = rslt[0]
vinv_d /= scale
hmat = np.dot(vinv_d, cov_naive)
hmat = np.dot(hmat, dmat.T).T
f = self.weights_li[i] if self.weights is not None else 1.
aresid = np.linalg.solve(np.eye(len(resid)) - hmat, resid)
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (aresid,))
if rslt is None:
return None
srt = rslt[0]
srt = f * np.dot(dmat.T, srt) / scale
bcm += np.outer(srt, srt)
cov_robust_bc = np.dot(cov_naive, np.dot(bcm, cov_naive))
cov_robust_bc *= self.scaling_factor
return cov_robust_bc
def predict(self, params, exog=None, offset=None,
exposure=None, linear=False):
"""
Return predicted values for a marginal regression model fit
using GEE.
Parameters
----------
params : array-like
Parameters / coefficients of a marginal regression model.
exog : array-like, optional
Design / exogenous data. If exog is None, model exog is
used.
offset : array-like, optional
Offset for exog if provided. If offset is None, model
offset is used.
exposure : array-like, optional
Exposure for exog, if exposure is None, model exposure is
used. Only allowed if link function is the logarithm.
linear : bool
If True, returns the linear predicted values. If False,
returns the value of the inverse of the model's link
function at the linear predicted values.
Returns
-------
An array of fitted values
Notes
-----
Using log(V) as the offset is equivalent to using V as the
exposure. If exposure U and offset V are both provided, then
log(U) + V is added to the linear predictor.
"""
# TODO: many paths through this, not well covered in tests
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
# This is the combined offset and exposure
_offset = 0.
# Using model exog
if exog is None:
exog = self.exog
if not isinstance(self.family.link, families.links.Log):
# Don't need to worry about exposure
if offset is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure.copy()
else:
_offset = offset
else:
if offset is None and exposure is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure
elif offset is None and exposure is not None:
_offset = np.log(exposure)
if hasattr(self, "offset"):
_offset = _offset + self.offset
elif offset is not None and exposure is None:
_offset = offset
if hasattr(self, "exposure"):
_offset = offset + np.log(self.exposure)
else:
_offset = offset + np.log(exposure)
# exog is provided: this is simpler than above because we
# never use model exog or exposure if exog is provided.
else:
if offset is not None:
_offset = _offset + offset
if exposure is not None:
_offset += np.log(exposure)
lin_pred = _offset + np.dot(exog, params)
if not linear:
return self.family.link.inverse(lin_pred)
return lin_pred
def _starting_params(self):
model = GLM(self.endog, self.exog, family=self.family,
offset=self._offset_exposure,
freq_weights=self.weights)
result = model.fit()
return result.params
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust', ddof_scale=None, scaling_factor=1.):
# Docstring attached below
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
self.scaling_factor = scaling_factor
self._fit_history = defaultdict(list)
if self.weights is not None and cov_type == 'naive':
raise ValueError("when using weights, cov_type may not be naive")
if start_params is None:
mean_params = self._starting_params()
else:
start_params = np.asarray(start_params)
mean_params = start_params.copy()
self.update_cached_means(mean_params)
del_params = -1.
num_assoc_updates = 0
for itr in range(maxiter):
update, score = self._update_mean_params()
if update is None:
warnings.warn("Singular matrix encountered in GEE update",
ConvergenceWarning)
break
mean_params += update
self.update_cached_means(mean_params)
# L2 norm of the change in mean structure parameters at
# this iteration.
del_params = np.sqrt(np.sum(score ** 2))
self._fit_history['params'].append(mean_params.copy())
self._fit_history['score'].append(score)
self._fit_history['dep_params'].append(
self.cov_struct.dep_params)
# Don't exit until the association parameters have been
# updated at least once.
if (del_params < ctol and
(num_assoc_updates > 0 or self.update_dep is False)):
break
# Update the dependence structure
if (self.update_dep and (itr % params_niter) == 0
and (itr >= first_dep_update)):
self._update_assoc(mean_params)
num_assoc_updates += 1
if del_params >= ctol:
warnings.warn("Iteration limit reached prior to convergence",
IterationLimitWarning)
if mean_params is None:
warnings.warn("Unable to estimate GEE parameters.",
ConvergenceWarning)
return None
bcov, ncov, _ = self._covmat()
if bcov is None:
warnings.warn("Estimated covariance structure for GEE "
"estimates is singular", ConvergenceWarning)
return None
bc_cov = None
if cov_type == "bias_reduced":
bc_cov = self._bc_covmat(ncov)
if self.constraint is not None:
x = mean_params.copy()
mean_params, bcov = self._handle_constraint(mean_params, bcov)
if mean_params is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
y, ncov = self._handle_constraint(x, ncov)
if y is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
if bc_cov is not None:
y, bc_cov = self._handle_constraint(x, bc_cov)
if x is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
scale = self.estimate_scale()
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type=cov_type,
cov_robust=bcov,
cov_naive=ncov,
cov_robust_bc=bc_cov)
# The superclass constructor will multiply the covariance
# matrix argument bcov by scale, which we don't want, so we
# divide bcov by the scale parameter here
results = GEEResults(self, mean_params, bcov / scale, scale,
cov_type=cov_type, use_t=False,
attr_kwds=res_kwds)
# attributes not needed during results__init__
results.fit_history = self._fit_history
self.fit_history = defaultdict(list)
results.score_norm = del_params
results.converged = (del_params < ctol)
results.cov_struct = self.cov_struct
results.params_niter = params_niter
results.first_dep_update = first_dep_update
results.ctol = ctol
results.maxiter = maxiter
# These will be copied over to subclasses when upgrading.
results._props = ["cov_type", "use_t",
"cov_params_default", "cov_robust",
"cov_naive", "cov_robust_bc",
"fit_history",
"score_norm", "converged", "cov_struct",
"params_niter", "first_dep_update", "ctol",
"maxiter"]
return GEEResultsWrapper(results)
fit.__doc__ = _gee_fit_doc
def _update_regularized(self, params, pen_wt, scad_param, eps):
sn, hm = 0, 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid, ex))
sn0 = rslt[0]
sn += np.dot(ex.T, sn0)
hm0 = rslt[1]
hm += np.dot(ex.T, hm0)
# Wang et al. divide sn here by num_group, but that
# seems to be incorrect
ap = np.abs(params)
clipped = np.clip(scad_param * pen_wt - ap, 0, np.inf)
en = pen_wt * clipped * (ap > pen_wt)
en /= (scad_param - 1) * pen_wt
en += pen_wt * (ap <= pen_wt)
en /= eps + ap
hm.flat[::hm.shape[0] + 1] += self.num_group * en
hm *= self.estimate_scale()
sn -= self.num_group * en * params
return np.linalg.solve(hm, sn), hm
def _regularized_covmat(self, mean_params):
self.update_cached_means(mean_params)
ma = 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid,))
ma0 = np.dot(ex.T, rslt[0])
ma += np.outer(ma0, ma0)
return ma
def fit_regularized(self, pen_wt, scad_param=3.7, maxiter=100,
ddof_scale=None, update_assoc=5,
ctol=1e-5, ztol=1e-3, eps=1e-6):
"""
Regularized estimation for GEE.
Parameters
----------
pen_wt : float
The penalty weight (a non-negative scalar).
scad_param : float
Non-negative scalar determining the shape of the Scad
penalty.
maxiter : integer
The maximum number of iterations.
ddof_scale : integer
Value to subtract from `nobs` when calculating the
denominator degrees of freedom for t-statistics, defaults
to the number of columns in `exog`.
update_assoc : integer
The dependence parameters are updated every `update_assoc`
iterations of the mean structure parameter updates.
ctol : float
Convergence criterion, default is one order of magnitude
smaller than proposed in section 3.1 of Wang et al.
ztol : float
Coefficients smaller than this value are treated as
being zero, default is based on section 5 of Wang et al.
eps : non-negative scalar
Numerical constant, see section 3.2 of Wang et al.
Returns
-------
GEEResults instance. Note that not all methods of the results
class make sense when the model has been fit with regularization.
Notes
-----
This implementation assumes that the link is canonical.
References
----------
<NAME>, <NAME>, <NAME>. (2012). Penalized generalized estimating
equations for high-dimensional longitudinal data analysis.
Biometrics. 2012 Jun;68(2):353-60.
doi: 10.1111/j.1541-0420.2011.01678.x.
https://www.ncbi.nlm.nih.gov/pubmed/21955051
http://users.stat.umn.edu/~wangx346/research/GEE_selection.pdf
"""
mean_params = np.zeros(self.exog.shape[1])
self.update_cached_means(mean_params)
converged = False
fit_history = defaultdict(list)
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
for itr in range(maxiter):
update, hm = self._update_regularized(
mean_params, pen_wt, scad_param, eps)
if update is None:
msg = "Singular matrix encountered in regularized GEE update",
warnings.warn(msg, ConvergenceWarning)
break
if np.sqrt(np.sum(update**2)) < ctol:
converged = True
break
mean_params += update
fit_history['params'].append(mean_params.copy())
self.update_cached_means(mean_params)
if itr != 0 and (itr % update_assoc == 0):
self._update_assoc(mean_params)
if not converged:
msg = "GEE.fit_regularized did not converge"
warnings.warn(msg)
mean_params[np.abs(mean_params) < ztol] = 0
self._update_assoc(mean_params)
ma = self._regularized_covmat(mean_params)
cov = np.linalg.solve(hm, ma)
cov = np.linalg.solve(hm, cov.T)
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type="robust", cov_robust=cov)
scale = self.estimate_scale()
rslt = GEEResults(self, mean_params, cov, scale,
regularized=True, attr_kwds=res_kwds)
rslt.fit_history = fit_history
return GEEResultsWrapper(rslt)
def _handle_constraint(self, mean_params, bcov):
"""
Expand the parameter estimate `mean_params` and covariance matrix
`bcov` to the coordinate system of the unconstrained model.
Parameters
----------
mean_params : array-like
A parameter vector estimate for the reduced model.
bcov : array-like
The covariance matrix of mean_params.
Returns
-------
mean_params : array-like
The input parameter vector mean_params, expanded to the
coordinate system of the full model
bcov : array-like
The input covariance matrix bcov, expanded to the
coordinate system of the full model
"""
# The number of variables in the full model
red_p = len(mean_params)
full_p = self.constraint.lhs.shape[1]
mean_params0 = np.r_[mean_params, np.zeros(full_p - red_p)]
# Get the score vector under the full model.
save_exog_li = self.exog_li
self.exog_li = self.constraint.exog_fulltrans_li
import copy
save_cached_means = copy.deepcopy(self.cached_means)
self.update_cached_means(mean_params0)
_, score = self._update_mean_params()
if score is None:
warnings.warn("Singular matrix encountered in GEE score test",
ConvergenceWarning)
return None, None
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = score[red_p:] / scale
amat = np.linalg.inv(ncov1)
bmat_11 = cmat[0:red_p, 0:red_p]
bmat_22 = cmat[red_p:, red_p:]
bmat_12 = cmat[0:red_p, red_p:]
amat_11 = amat[0:red_p, 0:red_p]
amat_12 = amat[0:red_p, red_p:]
score_cov = bmat_22 - np.dot(amat_12.T,
np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
self.score_test_results = {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
mean_params = self.constraint.unpack_param(mean_params)
bcov = self.constraint.unpack_cov(bcov)
self.exog_li = save_exog_li
self.cached_means = save_cached_means
self.exog = self.constraint.restore_exog()
return mean_params, bcov
def _update_assoc(self, params):
"""
Update the association parameters
"""
self.cov_struct.update(params)
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects, returns dF(XB) / dX where F(.)
is the fitted mean.
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# This form should be appropriate for group 1 probit, logit,
# logistic, cloglog, heckprob, xtprobit.
offset_exposure = None
if exog is None:
exog = self.exog
offset_exposure = self._offset_exposure
margeff = self.mean_deriv_exog(exog, params, offset_exposure)
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:, None]
if count_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_count_effects)
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_dummy_effects)
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
def qic(self, params, scale, cov_params):
"""
Returns quasi-information criteria and quasi-likelihood values.
Parameters
----------
params : array-like
The GEE estimates of the regression parameters.
scale : scalar
Estimated scale parameter
cov_params : array-like
An estimate of the covariance matrix for the
model parameters. Conventionally this is the robust
covariance matrix.
Returns
-------
ql : scalar
The quasi-likelihood value
qic : scalar
A QIC that can be used to compare the mean and covariance
structures of the model.
qicu : scalar
A simplified QIC that can be used to compare mean structures
but not covariance structures
Notes
-----
The quasi-likelihood used here is obtained by numerically evaluating
Wedderburn's integral representation of the quasi-likelihood function.
This approach is valid for all families and links. Many other
packages use analytical expressions for quasi-likelihoods that are
valid in special cases where the link function is canonical. These
analytical expressions may omit additive constants that only depend
on the data. Therefore, the numerical values of our QL and QIC values
will differ from the values reported by other packages. However only
the differences between two QIC values calculated for different models
using the same data are meaningful. Our QIC should produce the same
QIC differences as other software.
When using the QIC for models with unknown scale parameter, use a
common estimate of the scale parameter for all models being compared.
References
----------
.. [*] <NAME> (2001). Akaike's information criterion in generalized
estimating equations. Biometrics (57) 1.
"""
varfunc = self.family.variance
means = []
omega = 0.0
# omega^-1 is the model-based covariance assuming independence
for i in range(self.num_group):
expval, lpr = self.cached_means[i]
means.append(expval)
dmat = self.mean_deriv(self.exog_li[i], lpr)
omega += np.dot(dmat.T, dmat) / scale
means = np.concatenate(means)
# The quasi-likelihood, use change of variables so the integration is
# from -1 to 1.
du = means - self.endog
nstep = 10000
qv = np.empty(nstep)
xv = np.linspace(-0.99999, 1, nstep)
for i, g in enumerate(xv):
u = self.endog + (g + 1) * du / 2.0
vu = varfunc(u)
qv[i] = -np.sum(du**2 * (g + 1) / vu)
qv /= (4 * scale)
from scipy.integrate import trapz
ql = trapz(qv, dx=xv[1] - xv[0])
qicu = -2 * ql + 2 * self.exog.shape[1]
qic = -2 * ql + 2 * np.trace(np.dot(omega, cov_params))
return ql, qic, qicu
class GEEResults(base.LikelihoodModelResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model "
"using GEE.\n" + _gee_results_doc)
def __init__(self, model, params, cov_params, scale,
cov_type='robust', use_t=False, regularized=False,
**kwds):
super(GEEResults, self).__init__(
model, params, normalized_cov_params=cov_params,
scale=scale)
# not added by super
self.df_resid = model.df_resid
self.df_model = model.df_model
self.family = model.family
attr_kwds = kwds.pop('attr_kwds', {})
self.__dict__.update(attr_kwds)
# we don't do this if the cov_type has already been set
# subclasses can set it through attr_kwds
if not (hasattr(self, 'cov_type') and
hasattr(self, 'cov_params_default')):
self.cov_type = cov_type # keep alias
covariance_type = self.cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `cov_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if cov_type == "robust":
cov = self.cov_robust
elif cov_type == "naive":
cov = self.cov_naive
elif cov_type == "bias_reduced":
cov = self.cov_robust_bc
self.cov_params_default = cov
else:
if self.cov_type != cov_type:
raise ValueError('cov_type in argument is different from '
'already attached cov_type')
def standard_errors(self, cov_type="robust"):
"""
This is a convenience function that returns the standard
errors for any covariance type. The value of `bse` is the
standard errors for whichever covariance type is specified as
an argument to `fit` (defaults to "robust").
Parameters
----------
cov_type : string
One of "robust", "naive", or "bias_reduced". Determines
the covariance used to compute standard errors. Defaults
to "robust".
"""
# Check covariance_type
covariance_type = cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `covariance_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if covariance_type == "robust":
return np.sqrt(np.diag(self.cov_robust))
elif covariance_type == "naive":
return np.sqrt(np.diag(self.cov_naive))
elif covariance_type == "bias_reduced":
if self.cov_robust_bc is None:
raise ValueError(
"GEE: `bias_reduced` covariance not available")
return np.sqrt(np.diag(self.cov_robust_bc))
# Need to override to allow for different covariance types.
@cache_readonly
def bse(self):
return self.standard_errors(self.cov_type)
@cache_readonly
def resid(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model.
"""
return self.model.endog - self.fittedvalues
def score_test(self):
"""
Return the results of a score test for a linear constraint.
Returns
-------
Adictionary containing the p-value, the test statistic,
and the degrees of freedom for the score test.
Notes
-----
See also GEE.compare_score_test for an alternative way to perform
a score test. GEEResults.score_test is more general, in that it
supports testing arbitrary linear equality constraints. However
GEE.compare_score_test might be easier to use when comparing
two explicit models.
References
----------
<NAME> and <NAME> (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
if not hasattr(self.model, "score_test_results"):
msg = "score_test on results instance only available when "
msg += " model was fit with constraints"
raise ValueError(msg)
return self.model.score_test_results
@cache_readonly
def resid_split(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model. The residuals are returned as a list
of arrays containing the residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.resid[ii])
return sresid
@cache_readonly
def resid_centered(self):
"""
Returns the residuals centered within each group.
"""
cresid = self.resid.copy()
for v in self.model.group_labels:
ii = self.model.group_indices[v]
cresid[ii] -= cresid[ii].mean()
return cresid
@cache_readonly
def resid_centered_split(self):
"""
Returns the residuals centered within each group. The
residuals are returned as a list of arrays containing the
centered residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.centered_resid[ii])
return sresid
def qic(self, scale=None):
"""
Returns the QIC and QICu information criteria.
For families with a scale parameter (e.g. Gaussian), provide
as the scale argument the estimated scale from the largest
model under consideration.
If the scale parameter is not provided, the estimated scale
parameter is used. Doing this does not allow comparisons of
QIC values between models.
"""
# It is easy to forget to set the scale parameter. Sometimes
# this is intentional, so we warn.
if scale is None:
warnings.warn("QIC values obtained using scale=None are not "
"appropriate for comparing models")
if scale is None:
scale = self.scale
_, qic, qicu = self.model.qic(self.params, scale,
self.cov_params())
return qic, qicu
# FIXME: alias to be removed, temporary backwards compatibility
split_resid = resid_split
centered_resid = resid_centered
split_centered_resid = resid_centered_split
@cache_readonly
def resid_response(self):
return self.model.endog - self.fittedvalues
@cache_readonly
def resid_pearson(self):
val = self.model.endog - self.fittedvalues
val = val / np.sqrt(self.family.variance(self.fittedvalues))
return val
@cache_readonly
def resid_working(self):
val = self.resid_response
val = val * self.family.link.deriv(self.fittedvalues)
return val
@cache_readonly
def resid_anscombe(self):
return self.family.resid_anscombe(self.model.endog, self.fittedvalues)
@cache_readonly
def resid_deviance(self):
return self.family.resid_dev(self.model.endog, self.fittedvalues)
@cache_readonly
def fittedvalues(self):
"""
Returns the fitted values from the model.
"""
return self.model.family.link.inverse(np.dot(self.model.exog,
self.params))
def plot_added_variable(self, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_added_variable
fig = plot_added_variable(self, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs, ax=ax)
return fig
plot_added_variable.__doc__ = _plot_added_variable_doc % {
'extra_params_doc': ''}
def plot_partial_residuals(self, focus_exog, ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_partial_residuals
return plot_partial_residuals(self, focus_exog, ax=ax)
plot_partial_residuals.__doc__ = _plot_partial_residuals_doc % {
'extra_params_doc': ''}
def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_ceres_residuals
return plot_ceres_residuals(self, focus_exog, frac,
cond_means=cond_means, ax=ax)
plot_ceres_residuals.__doc__ = _plot_ceres_residuals_doc % {
'extra_params_doc': ''}
def conf_int(self, alpha=.05, cols=None, cov_type=None):
"""
Returns confidence intervals for the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. i.e., The
default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
cov_type : string
The covariance type used for computing standard errors;
must be one of 'robust', 'naive', and 'bias reduced'.
See `GEE` for details.
Notes
-----
The confidence interval is based on the Gaussian distribution.
"""
# super doesn't allow to specify cov_type and method is not
# implemented,
# FIXME: remove this method here
if cov_type is None:
bse = self.bse
else:
bse = self.standard_errors(cov_type=cov_type)
params = self.params
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = params[cols] - q * bse[cols]
upper = params[cols] + q * bse[cols]
return np.asarray(lzip(lower, upper))
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the GEE regression results
Parameters
----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
cov_type : string
The covariance type used to compute the standard errors;
one of 'robust' (the usual robust sandwich-type covariance
estimate), 'naive' (ignores dependence), and 'bias
reduced' (the Mancl/DeRouen estimate).
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Generalized']),
('', ['Estimating Equations']),
('Family:', [self.model.family.__class__.__name__]),
('Dependence structure:',
[self.model.cov_struct.__class__.__name__]),
('Date:', None),
('Covariance type: ', [self.cov_type, ])
]
NY = [len(y) for y in self.model.endog_li]
top_right = [('No. Observations:', [sum(NY)]),
('No. clusters:', [len(self.model.endog_li)]),
('Min. cluster size:', [min(NY)]),
('Max. cluster size:', [max(NY)]),
('Mean cluster size:', ["%.1f" % np.mean(NY)]),
('Num. iterations:', ['%d' %
len(self.fit_history['params'])]),
('Scale:', ["%.3f" % self.scale]),
('Time:', None),
]
# The skew of the residuals
skew1 = stats.skew(self.resid)
kurt1 = stats.kurtosis(self.resid)
skew2 = stats.skew(self.centered_resid)
kurt2 = stats.kurtosis(self.centered_resid)
diagn_left = [('Skew:', ["%12.4f" % skew1]),
('Centered skew:', ["%12.4f" % skew2])]
diagn_right = [('Kurtosis:', ["%12.4f" % kurt1]),
('Centered kurtosis:', ["%12.4f" % kurt2])
]
if title is None:
title = self.model.__class__.__name__ + ' ' +\
"Regression Results"
# Override the exog variable names if xname is provided as an
# argument.
if xname is None:
xname = self.model.exog_names
if yname is None:
yname = self.model.endog_names
# Create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname,
title=title)
smry.add_table_params(self, yname=yname, xname=xname,
alpha=alpha, use_t=False)
smry.add_table_2cols(self, gleft=diagn_left,
gright=diagn_right, yname=yname,
xname=xname, title="")
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is 'all'
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array-like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
if self.model.constraint is not None:
warnings.warn("marginal effects ignore constraints",
ValueWarning)
return GEEMargins(self, (at, method, atexog, dummy, count))
def plot_isotropic_dependence(self, ax=None, xpoints=10,
min_n=50):
"""
Create a plot of the pairwise products of within-group
residuals against the corresponding time differences. This
plot can be used to assess the possible form of an isotropic
covariance structure.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
xpoints : scalar or array-like
If scalar, the number of points equally spaced points on
the time difference axis used to define bins for
calculating local means. If an array, the specific points
that define the bins.
min_n : integer
The minimum sample size in a bin for the mean residual
product to be included on the plot.
"""
from statsmodels.graphics import utils as gutils
resid = self.model.cluster_list(self.resid)
time = self.model.cluster_list(self.model.time)
# All within-group pairwise time distances (xdt) and the
# corresponding products of scaled residuals (xre).
xre, xdt = [], []
for re, ti in zip(resid, time):
ix = np.tril_indices(re.shape[0], 0)
re = re[ix[0]] * re[ix[1]] / self.scale ** 2
xre.append(re)
dists = np.sqrt(((ti[ix[0], :] - ti[ix[1], :]) ** 2).sum(1))
xdt.append(dists)
xre = np.concatenate(xre)
xdt = np.concatenate(xdt)
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# Convert to a correlation
ii = np.flatnonzero(xdt == 0)
v0 = np.mean(xre[ii])
xre /= v0
# Use the simple average to smooth, since fancier smoothers
# that trim and downweight outliers give biased results (we
# need the actual mean of a skewed distribution).
if np.isscalar(xpoints):
xpoints = np.linspace(0, max(xdt), xpoints)
dg = np.digitize(xdt, xpoints)
dgu = np.unique(dg)
hist = np.asarray([np.sum(dg == k) for k in dgu])
ii = np.flatnonzero(hist >= min_n)
dgu = dgu[ii]
dgy = np.asarray([np.mean(xre[dg == k]) for k in dgu])
dgx = np.asarray([np.mean(xdt[dg == k]) for k in dgu])
ax.plot(dgx, dgy, '-', color='orange', lw=5)
ax.set_xlabel("Time difference")
ax.set_ylabel("Product of scaled residuals")
return fig
def sensitivity_params(self, dep_params_first,
dep_params_last, num_steps):
"""
Refits the GEE model using a sequence of values for the
dependence parameters.
Parameters
----------
dep_params_first : array-like
The first dep_params in the sequence
dep_params_last : array-like
The last dep_params in the sequence
num_steps : int
The number of dep_params in the sequence
Returns
-------
results : array-like
The GEEResults objects resulting from the fits.
"""
model = self.model
import copy
cov_struct = copy.deepcopy(self.model.cov_struct)
# We are fixing the dependence structure in each run.
update_dep = model.update_dep
model.update_dep = False
dep_params = []
results = []
for x in np.linspace(0, 1, num_steps):
dp = x * dep_params_last + (1 - x) * dep_params_first
dep_params.append(dp)
model.cov_struct = copy.deepcopy(cov_struct)
model.cov_struct.dep_params = dp
rslt = model.fit(start_params=self.params,
ctol=self.ctol,
params_niter=self.params_niter,
first_dep_update=self.first_dep_update,
cov_type=self.cov_type)
results.append(rslt)
model.update_dep = update_dep
return results
# FIXME: alias to be removed, temporary backwards compatibility
params_sensitivity = sensitivity_params
class GEEResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {
'centered_resid': 'rows',
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(GEEResultsWrapper, GEEResults) # noqa:E305
class OrdinalGEE(GEE):
__doc__ = (
" Estimation of ordinal response marginal regression models\n"
" using Generalized Estimating Equations (GEE).\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_ordinal_family_doc,
'example': _gee_ordinal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
if family is None:
family = families.Binomial()
else:
if not isinstance(family, families.Binomial):
raise ValueError("ordinal GEE must use a Binomial family")
if cov_struct is None:
cov_struct = cov_structs.OrdinalIndependence()
endog, exog, groups, time, offset = self.setup_ordinal(
endog, exog, groups, time, offset)
super(OrdinalGEE, self).__init__(endog, exog, groups, time,
family, cov_struct, missing,
offset, dep_data, constraint)
def setup_ordinal(self, endog, exog, groups, time, offset):
"""
Restructure ordinal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
nrows = ncut * len(endog)
exog_out = np.zeros((nrows, exog.shape[1]),
dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
intercepts = np.zeros((nrows, ncut), dtype=np.float64)
groups_out = np.zeros(nrows, dtype=groups.dtype)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
exog_out[jrow, :] = exog_row
endog_out[jrow] = (int(endog_value > thresh))
intercepts[jrow, thresh_ix] = 1
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
exog_out = np.concatenate((intercepts, exog_out), axis=1)
# exog column names, including intercepts
xnames = ["I(y>%.1f)" % v for v in endog_cuts]
if type(self.exog_orig) == pd.DataFrame:
xnames.extend(self.exog_orig.columns)
else:
xnames.extend(["x%d" % k for k in range(1, exog.shape[1] + 1)])
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve the endog name if there is one
if type(self.endog_orig) == pd.Series:
endog_out = pd.Series(endog_out, name=self.endog_orig.name)
return endog_out, exog_out, groups_out, time_out, offset_out
def _starting_params(self):
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=self.exposure)
result = model.fit()
return result.params
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust'):
rslt = super(OrdinalGEE, self).fit(maxiter, ctol, start_params,
params_niter, first_dep_update,
cov_type=cov_type)
rslt = rslt._results # use unwrapped instance
res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))
# Convert the GEEResults to an OrdinalGEEResults
ord_rslt = OrdinalGEEResults(self, rslt.params,
rslt.cov_params() / rslt.scale,
rslt.scale,
cov_type=cov_type,
attr_kwds=res_kwds)
# for k in rslt._props:
# setattr(ord_rslt, k, getattr(rslt, k))
return OrdinalGEEResultsWrapper(ord_rslt)
fit.__doc__ = _gee_fit_doc
class OrdinalGEEResults(GEEResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model"
"for an ordinal response using GEE.\n"
+ _gee_results_doc)
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an ordinal model,
for specifed values of the predictors.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array-like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and 'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ev = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ev)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [{}, ]
exog_means = self.model.exog.mean(0)
ix_icept = [i for i, x in enumerate(self.model.exog_names) if
x.startswith("I(")]
for ev in exog_values:
for k in ev.keys():
if k not in self.model.exog_names:
raise ValueError("%s is not a variable in the model"
% k)
# Get the fitted probability for each level, at the given
# covariate values.
pr = []
for j in ix_icept:
xp = np.zeros_like(self.params)
xp[j] = 1.
for i, vn in enumerate(self.model.exog_names):
if i in ix_icept:
continue
# User-specified value
if vn in ev:
xp[i] = ev[vn]
# Mean value
else:
xp[i] = exog_means[i]
p = 1 / (1 + np.exp(-np.dot(xp, self.params)))
pr.append(p)
pr.insert(0, 1)
pr.append(0)
pr = np.asarray(pr)
prd = -np.diff(pr)
ax.plot(self.model.endog_values, prd, 'o-')
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_ylim(0, 1)
return fig
def _score_test_submodel(par, sub):
"""
Return transformation matrices for design matrices.
Parameters
----------
par : instance
The parent model
sub : instance
The sub-model
Returns
-------
qm : array-like
Matrix mapping the design matrix of the parent to the design matrix
for the sub-model.
qc : array-like
Matrix mapping the design matrix of the parent to the orthogonal
complement of the columnspace of the submodel in the columnspace
of the parent.
Notes
-----
Returns None, None if the provided submodel is not actually a submodel.
"""
x1 = par.exog
x2 = sub.exog
u, s, vt = np.linalg.svd(x1, 0)
# Get the orthogonal complement of col(x2) in col(x1).
a, _, _ = np.linalg.svd(x2, 0)
a = u - np.dot(a, np.dot(a.T, u))
x2c, sb, _ = np.linalg.svd(a, 0)
x2c = x2c[:, sb > 1e-12]
# x1 * qm = x2
qm = np.dot(vt.T, np.dot(u.T, x2) / s[:, None])
e = np.max(np.abs(x2 - np.dot(x1, qm)))
if e > 1e-8:
return None, None
# x1 * qc = x2c
qc = np.dot(vt.T, np.dot(u.T, x2c) / s[:, None])
return qm, qc
class OrdinalGEEResultsWrapper(GEEResultsWrapper):
pass
wrap.populate_wrapper(OrdinalGEEResultsWrapper, OrdinalGEEResults) # noqa:E305
class NominalGEE(GEE):
__doc__ = (
" Estimation of nominal response marginal regression models\n"
" using Generalized Estimating Equations (GEE).\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_nominal_family_doc,
'example': _gee_nominal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
endog, exog, groups, time, offset = self.setup_nominal(
endog, exog, groups, time, offset)
if family is None:
family = _Multinomial(self.ncut + 1)
if cov_struct is None:
cov_struct = cov_structs.NominalIndependence()
super(NominalGEE, self).__init__(
endog, exog, groups, time, family, cov_struct, missing,
offset, dep_data, constraint)
def _starting_params(self):
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=self.exposure)
result = model.fit()
return result.params
def setup_nominal(self, endog, exog, groups, time, offset):
"""
Restructure nominal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
self.ncut = ncut
nrows = len(endog_cuts) * exog.shape[0]
ncols = len(endog_cuts) * exog.shape[1]
exog_out = np.zeros((nrows, ncols), dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
groups_out = np.zeros(nrows, dtype=np.float64)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
u = np.zeros(len(endog_cuts), dtype=np.float64)
u[thresh_ix] = 1
exog_out[jrow, :] = np.kron(u, exog_row)
endog_out[jrow] = (int(endog_value == thresh))
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
# exog names
if isinstance(self.exog_orig, pd.DataFrame):
xnames_in = self.exog_orig.columns
else:
xnames_in = ["x%d" % k for k in range(1, exog.shape[1] + 1)]
xnames = []
for tr in endog_cuts:
xnames.extend(["%s[%.1f]" % (v, tr) for v in xnames_in])
exog_out = pd.DataFrame(exog_out, columns=xnames)
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve endog name if there is one
if isinstance(self.endog_orig, pd.Series):
endog_out = pd.Series(endog_out, name=self.endog_orig.name)
return endog_out, exog_out, groups_out, time_out, offset_out
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array-like
The exogeneous data at which the derivative is computed,
number of rows must be a multiple of `ncut`.
lin_pred : array-like
The values of the linear predictor, length must be multiple
of `ncut`.
Returns
-------
The derivative of the expected endog with respect to the
parameters.
"""
expval = np.exp(lin_pred)
# Reshape so that each row contains all the indicators
# corresponding to one multinomial observation.
expval_m = np.reshape(expval, (len(expval) // self.ncut,
self.ncut))
# The normalizing constant for the multinomial probabilities.
denom = 1 + expval_m.sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
# The multinomial probabilities
mprob = expval / denom
# First term of the derivative: denom * expval' / denom^2 =
# expval' / denom.
dmat = mprob[:, None] * exog
# Second term of the derivative: -expval * denom' / denom^2
ddenom = expval[:, None] * exog
dmat -= mprob[:, None] * ddenom / denom[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog for the
multinomial model, used in analyzing marginal effects.
Parameters
----------
exog : array-like
The exogeneous data at which the derivative is computed,
number of rows must be a multiple of `ncut`.
lpr : array-like
The linear predictor values, length must be multiple of
`ncut`.
Returns
-------
The value of the derivative of the expected endog with respect
to exog.
Notes
-----
offset_exposure must be set at None for the multinoial family.
"""
if offset_exposure is not None:
warnings.warn("Offset/exposure ignored for the multinomial family",
ValueWarning)
lpr = np.dot(exog, params)
expval = np.exp(lpr)
expval_m = np.reshape(expval, (len(expval) // self.ncut,
self.ncut))
denom = 1 + expval_m.sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
bmat0 = np.outer(np.ones(exog.shape[0]), params)
# Masking matrix
qmat = []
for j in range(self.ncut):
ee = np.zeros(self.ncut, dtype=np.float64)
ee[j] = 1
qmat.append(np.kron(ee, np.ones(len(params) // self.ncut)))
qmat = np.array(qmat)
qmat = np.kron(np.ones((exog.shape[0] // self.ncut, 1)), qmat)
bmat = bmat0 * qmat
dmat = expval[:, None] * bmat / denom[:, None]
expval_mb = np.kron(expval_m, np.ones((self.ncut, 1)))
expval_mb = np.kron(expval_mb, np.ones((1, self.ncut)))
dmat -= expval[:, None] * (bmat * expval_mb) / denom[:, None] ** 2
return dmat
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust'):
rslt = super(NominalGEE, self).fit(maxiter, ctol, start_params,
params_niter, first_dep_update,
cov_type=cov_type)
if rslt is None:
warnings.warn("GEE updates did not converge",
ConvergenceWarning)
return None
rslt = rslt._results # use unwrapped instance
res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))
# Convert the GEEResults to a NominalGEEResults
nom_rslt = NominalGEEResults(self, rslt.params,
rslt.cov_params() / rslt.scale,
rslt.scale,
cov_type=cov_type,
attr_kwds=res_kwds)
# for k in rslt._props:
# setattr(nom_rslt, k, getattr(rslt, k))
return NominalGEEResultsWrapper(nom_rslt)
fit.__doc__ = _gee_fit_doc
class NominalGEEResults(GEEResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model"
"for a nominal response using GEE.\n"
+ _gee_results_doc)
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an nominal model,
for specifed values of the predictors.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array-like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and 'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ex = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ex)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [{}, ]
link = self.model.family.link.inverse
ncut = self.model.family.ncut
k = int(self.model.exog.shape[1] / ncut)
exog_means = self.model.exog.mean(0)[0:k]
exog_names = self.model.exog_names[0:k]
exog_names = [x.split("[")[0] for x in exog_names]
params = np.reshape(self.params,
(ncut, len(self.params) // ncut))
for ev in exog_values:
exog = exog_means.copy()
for k in ev.keys():
if k not in exog_names:
raise ValueError("%s is not a variable in the model"
% k)
ii = exog_names.index(k)
exog[ii] = ev[k]
lpr = np.dot(params, exog)
pr = link(lpr)
pr = np.r_[pr, 1 - pr.sum()]
ax.plot(self.model.endog_values, pr, 'o-')
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_xticks(self.model.endog_values)
ax.set_xticklabels(self.model.endog_values)
ax.set_ylim(0, 1)
return fig
class NominalGEEResultsWrapper(GEEResultsWrapper):
pass
wrap.populate_wrapper(NominalGEEResultsWrapper, NominalGEEResults) # noqa:E305
class _MultinomialLogit(Link):
"""
The multinomial logit transform, only for use with GEE.
Notes
-----
The data are assumed coded as binary indicators, where each
observed multinomial value y is coded as I(y == S[0]), ..., I(y ==
S[-1]), where S is the set of possible response labels, excluding
the largest one. Thererefore functions in this class should only
be called using vector argument whose length is a multiple of |S|
= ncut, which is an argument to be provided when initializing the
class.
call and derivative use a private method _clean to trim p by 1e-10
so that p is in (0, 1)
"""
def __init__(self, ncut):
self.ncut = ncut
def inverse(self, lpr):
"""
Inverse of the multinomial logit transform, which gives the
expected values of the data as a function of the linear
predictors.
Parameters
----------
lpr : array-like (length must be divisible by `ncut`)
The linear predictors
Returns
-------
prob : array
Probabilities, or expected values
"""
expval = np.exp(lpr)
denom = 1 + np.reshape(expval, (len(expval) // self.ncut,
self.ncut)).sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
prob = expval / denom
return prob
class _Multinomial(families.Family):
"""
Pseudo-link function for fitting nominal multinomial models with
GEE. Not for use outside the GEE class.
"""
links = [_MultinomialLogit, ]
variance = varfuncs.binary
safe_links = [_MultinomialLogit, ]
def __init__(self, nlevels):
"""
Parameters
----------
nlevels : integer
The number of distinct categories for the multinomial
distribution.
"""
self.initialize(nlevels)
def initialize(self, nlevels):
self.ncut = nlevels - 1
self.link = _MultinomialLogit(self.ncut)
class GEEMargins(object):
"""
Estimated marginal effects for a regression model fit with GEE.
Parameters
----------
results : GEEResults instance
The results instance of a fitted discrete choice model
args : tuple
Args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
kwargs : dict
Keyword args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
"""
def __init__(self, results, args, kwargs={}):
self._cache = {}
self.results = results
self.get_margeff(*args, **kwargs)
def _reset(self):
self._cache = {}
@cache_readonly
def tvalues(self):
_check_at_is_all(self.margeff_options)
return self.margeff / self.margeff_se
def summary_frame(self, alpha=.05):
"""
Returns a DataFrame summarizing the marginal effects.
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
frame : DataFrames
A DataFrame summarizing the marginal effects.
"""
_check_at_is_all(self.margeff_options)
from pandas import DataFrame
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0) != 0 # True if not a constant
exog_names = self.results.model.exog_names
var_names = [name for i, name in enumerate(exog_names) if ind[i]]
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
return | DataFrame(table, columns=names, index=var_names) | pandas.DataFrame |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
tm.assert_frame_equal(df, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = tm.makeCustomDataframe(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.map(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_headers__") as path:
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=["A", "B"])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
tm.assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
tm.assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return DataFrame(
np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(df, result)
# column is mi
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(df, result)
# dup column names?
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
tm.assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert com.all_none(*result.columns.names)
result.columns.names = df.columns.names
tm.assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(path)
for i in [6, 7]:
msg = f"len of {i}, but only 5 lines in file"
with pytest.raises(ParserError, match=msg):
read_csv(path, header=list(range(i)), index_col=0)
# write with cols
msg = "cannot specify cols with a MultiIndex"
with pytest.raises(TypeError, match=msg):
df.to_csv(path, columns=["foo", "bar"])
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# empty
tsframe[:0].to_csv(path)
recons = self.read_csv(path)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_interval_index(self):
# GH 28210
df = DataFrame({"A": list("abc"), "B": range(3)}, index=pd.interval_range(0, 3))
with tm.ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
df.to_csv(path)
result = self.read_csv(path, index_col=0)
# can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
expected = df.copy()
expected.index = expected.index.astype(str)
tm.assert_frame_equal(result, expected)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with tm.ensure_clean("__tmp_to_csv_float32_nanrep__.csv") as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
assert lines[1].split(",")[2] == "999"
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({"A": [1, 2, 3], "B": ["5,6", "7,8", "9,0"]})
with tm.ensure_clean("__tmp_to_csv_withcommas__.csv") as path:
df.to_csv(path)
df2 = self.read_csv(path)
tm.assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return [f"{name}{i:03d}" for i in range(5)]
df_float = DataFrame(
np.random.randn(100, 5), dtype="float64", columns=create_cols("float")
)
df_int = DataFrame(
np.random.randn(100, 5).astype("int64"),
dtype="int64",
columns=create_cols("int"),
)
df_bool = DataFrame(True, index=df_float.index, columns=create_cols("bool"))
df_object = DataFrame(
"foo", index=df_float.index, columns=create_cols("object")
)
df_dt = DataFrame(
Timestamp("20010101"), index=df_float.index, columns=create_cols("date")
)
# add in some nans
df_float.iloc[30:50, 1:3] = np.nan
# ## this is a bug in read_csv right now ####
# df_dt.loc[30:50,1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = {}
for n, dtype in [
("float", np.float64),
("int", np.int64),
("bool", np.bool_),
("object", object),
]:
for c in create_cols(n):
dtypes[c] = dtype
with tm.ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(
filename, index_col=0, dtype=dtypes, parse_dates=create_cols("date")
)
tm.assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(
np.random.randn(1000, 30),
columns=list(range(15)) + list(range(15)),
dtype="float64",
)
with tm.ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename, index_col=0)
result.columns = df.columns
tm.assert_frame_equal(result, df)
df_float = DataFrame(np.random.randn(1000, 3), dtype="float64")
df_int = DataFrame(np.random.randn(1000, 3)).astype("int64")
df_bool = DataFrame(True, index=df_float.index, columns=range(3))
df_object = DataFrame("foo", index=df_float.index, columns=range(3))
df_dt = DataFrame(Timestamp("20010101"), index=df_float.index, columns=range(3))
df = pd.concat(
[df_float, df_int, df_bool, df_object, df_dt], axis=1, ignore_index=True
)
df.columns = [0, 1, 2] * 5
with tm.ensure_clean() as filename:
df.to_csv(filename)
result = | read_csv(filename, index_col=0) | pandas.read_csv |
import os
import sys
from datetime import datetime
from collections import OrderedDict
import pandas as pd
from loguru import logger
DATA_INDEX = 0
TRANSLATION_INDEX = 1
def convert_dataframe_to_matrix(data_frame_list):
converted_data = OrderedDict()
converted_data.setdefault('peer_id', [])
for value in data_frame_list[0]['question_number']:
converted_data.setdefault(int(value) + 1, [])
for data in data_frame_list:
for key, value in data.items():
if key == 'peer_id':
for item in set(value):
converted_data[key].append(item)
for i in range(len(data['answer'])):
converted_data[int(data['question_number'][i])+1].append(data['answer'][i])
return [converted_data]
class ResultWriter:
def __init__(self, exel_file_path):
self.path = exel_file_path
def _create_data_frame(self, data_tuple):
result = OrderedDict()
for data in data_tuple[DATA_INDEX]:
for key, value in data.__dict__.items():
if key == '_sa_instance_state' or key == 'question_answer_flag':
continue
result.setdefault(key, []).append(value) if len(data_tuple) == 1 \
else result.setdefault(
data_tuple[TRANSLATION_INDEX][key], []
).append(value)
return result
def write_to_excel(self, data_list, sheet_name_list):
data_frame_list = []
if os.path.exists(self.path):
os.remove(self.path)
# for data_tuple in data_list:
# data_frame_list.append(self._create_data_frame(data_tuple))
# data_frame_list = convert_dataframe_to_matrix(data_frame_list)
writer = pd.ExcelWriter(self.path, engine='xlsxwriter')
try:
pandas_dataframe = | pd.DataFrame(data_list) | pandas.DataFrame |
import math
import os
import pathlib
from functools import reduce
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from experiment_definitions import ExperimentDefinitions
from data_collectors import MemtierCollector, MiddlewareCollector
class PlottingFunctions:
@staticmethod
def lineplot(dataframe, experiment_title, save_as_filename,
x=None, y=None, hue=None, style=None, ci='sd', err_style='band',
xlabel=None, ylabel=None, huelabel=None, stylelabel=None,
xlim=(0, None), ylim=(0, None),
xticks=None):
# markers = hue if style is None else True
# print(markers)
sns.lineplot(x, y, data=dataframe, legend="full", hue=hue, style=style, markers=True,
ci=ci, err_style='band').set(xlabel=xlabel, ylabel=ylabel,
title=experiment_title,
xlim=xlim, ylim=ylim)
sns.scatterplot(x, y, data=dataframe, legend=False, hue=hue, style=style,
ci=None).set(xlabel=xlabel, ylabel=ylabel,
title=experiment_title,
xlim=xlim, ylim=ylim)
if isinstance(xticks, tuple):
plt.xticks(xticks[0], xticks[1], rotation=45)
else:
if xticks[0] == 6 or xticks[0] == 2:
np.insert(xticks, 0, 0)
plt.xticks(xticks, rotation=45)
if huelabel is not None or stylelabel is not None:
legend = plt.legend(bbox_to_anchor=(1, 1), loc='upper left')
for txt in legend.get_texts():
if txt.get_text() is hue and huelabel is not None:
txt.set_text(huelabel)
continue
if txt.get_text() is style and stylelabel is not None:
txt.set_text(stylelabel)
continue
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def barplot(dataframe, experiment_title, save_as_filename,
x=None, y=None, hue=None, ci='sd',
xlabel=None, ylabel=None, huelabel=None,
xlim=(None, None), ylim=(0, None),
xticks=None):
sns.barplot(x, y, hue, data=dataframe,
ci=ci, capsize=.1, errwidth=1.5).set(xlabel=xlabel, ylabel=ylabel, title=experiment_title,
xlim=xlim, ylim=ylim)
if isinstance(xticks, tuple):
plt.xticks(xticks[0], xticks[1], rotation=45)
else:
plt.xticks(xticks, rotation=45)
if huelabel is not None:
legend = plt.legend()
for txt in legend.get_texts():
if txt.get_text() is hue and huelabel is not None:
txt.set_text(huelabel)
continue
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def distplot(histogram, experiment_title, save_as_filename,
bins=200, kde=False,
xlabel=None, ylabel=None, xlim=(0, None), ylim=(0, None),
xticks=None):
sns.distplot(histogram, bins=bins, kde=kde, hist=True).set(xlabel=xlabel, ylabel=ylabel,
title=experiment_title,
xlim=xlim, ylim=ylim)
if xticks is not None:
plt.xticks(*xticks)
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def resplot(dataframe, experiment_title, save_as_filename,
x=None, y=None,
xlabel=None, ylabel=None):
sns.residplot(x, y, dataframe).set(xlabel=xlabel, ylabel=ylabel, title=experiment_title)
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def qqplot(dataframe, experiment_title, save_as_filename,
x=None, fit_line=False):
stats.probplot(dataframe[x], dist="norm", fit=fit_line, plot=plt)
plt.title(experiment_title)
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def plot_throughput_by_type(dataframe, experiment_title, save_as_filename,
x='Num_Clients', y='Request_Throughput', hue='RequestType', style='Worker_Threads',
ci='sd',
err_style='bars',
xlabel='Memtier Client Count', ylabel='Throughput (req/s)', huelabel='Request Type',
stylelabel='Worker Threads',
xlim=(0, None), ylim=(0, None),
xticks=None):
if xticks is None:
xticks = dataframe[x].unique()
PlottingFunctions.lineplot(dataframe, experiment_title, save_as_filename, x, y, hue, style, ci, err_style,
xlabel, ylabel, huelabel, stylelabel, xlim, ylim, xticks)
@staticmethod
def plot_throughput_family(dataframe, experiment_title, save_as_filename,
x='Num_Clients', y='Request_Throughput', hue='Worker_Threads', style=None,
ci='sd', err_style='bars',
xlabel='Memtier Client Count', ylabel='Throughput (req/s)', huelabel='Worker Threads',
stylelabel=None,
xlim=(0, None), ylim=(0, None),
xticks=None):
if xticks is None:
xticks = dataframe[x].unique()
PlottingFunctions.lineplot(dataframe, experiment_title, save_as_filename, x, y, hue, style, ci, err_style,
xlabel, ylabel, huelabel, stylelabel, xlim, ylim, xticks)
@staticmethod
def plot_response_time_by_type(dataframe, experiment_title, save_as_filename,
x='Num_Clients', y='Response_Time', hue='RequestType', style='Worker_Threads',
ci='sd',
err_style='bars',
xlabel='Memtier Client Count', ylabel='Response Time (ms)', huelabel='Request Type',
stylelabel='Worker Threads',
xlim=(0, None), ylim=(0, None),
xticks=None):
if xticks is None:
xticks = dataframe[x].unique()
PlottingFunctions.lineplot(dataframe, experiment_title, save_as_filename, x, y, hue, style, ci, err_style,
xlabel, ylabel, huelabel, stylelabel, xlim, ylim, xticks)
@staticmethod
def plot_response_time_family(dataframe, experiment_title, save_as_filename,
x='Num_Clients', y='Response_Time', hue='Worker_Threads', style=None, ci='sd',
err_style='bars',
xlabel='Memtier Client Count', ylabel='Response Time (ms)', huelabel='Worker Threads',
stylelabel=None,
xlim=(0, None), ylim=(0, None),
xticks=None):
if xticks is None:
xticks = dataframe[x].unique()
PlottingFunctions.lineplot(dataframe, experiment_title, save_as_filename, x, y, hue, style, ci, err_style,
xlabel, ylabel, huelabel, stylelabel, xlim, ylim, xticks)
@staticmethod
def plot_histogram(histogram, experiment_title, save_as_filename, bins=200, kde=False,
xlabel='Buckets (ms)', ylabel='Request Count', xlim=(0, 20), ylim=(0, 35000),
xticks=None):
if xticks is None:
xticks = (np.arange(0, (bins / 10) + 0.1, step=2.5), np.linspace(0, bins / 10, 9))
PlottingFunctions.distplot(histogram, experiment_title, save_as_filename, bins, kde,
xlabel, ylabel, xlim, ylim, xticks)
class StatisticsFunctions:
@staticmethod
def get_average_and_std(dataframe, aggregate_on):
return dataframe[aggregate_on].agg(['mean', 'std']).reset_index().rename(index=str,
columns={
"mean": aggregate_on + '_Mean',
"std": aggregate_on + '_Std'})
@staticmethod
def get_sum(dataframe, aggregate_on):
return dataframe[aggregate_on].agg(['sum']).reset_index().rename(index=str, columns={"sum": aggregate_on})
@staticmethod
def get_weighted_average(dataframe, aggregate_on):
return dataframe.apply(lambda x: np.average(x[aggregate_on], weights=x['Request_Throughput'])).reset_index() \
.rename(index=str, columns={0: aggregate_on})
@staticmethod
def get_arithmetic_mean(dataframe, aggregate_on):
return dataframe[aggregate_on].agg(['mean']).reset_index().rename(index=str, columns={"mean": aggregate_on})
@staticmethod
def get_percentiles(dataframe):
return dataframe.quantile(([.01, .05, .1, .15, .2, .25, .3, .35, .4, .45, .5, .525, .55, .575, .6, .625, .65,
.675, .7, .725, .75, .775, .8, .825, .85, .875, .90, .925, .95, .975, .99, 1])).reset_index().rename(
index=str,
columns={"level_2": 'Percentile'})
@staticmethod
def get_report_percentiles(dataframe):
return dataframe.quantile(([.25, .5, .75, .90, .99])).reset_index().rename(index=str,
columns={"level_2": 'Percentile'})
@staticmethod
def mm1(summary_table, plot=False):
calculations = []
for row in summary_table.itertuples():
lamb = row[4]
muh = row[-1]
measured_response_time = row[5]
measured_queue_waiting_time = row[6]
measured_queue_size = row[8]
traffic_intensity = lamb / muh
mean_nr_jobs_in_system = traffic_intensity / (1 - traffic_intensity)
mean_nr_jobs_in_queue = traffic_intensity * mean_nr_jobs_in_system
mean_response_time = (1 / muh) / (1 - traffic_intensity)
mean_waiting_time = traffic_intensity * mean_response_time
calculations.append({'Num_Clients': row[1],
'Worker_Threads': row[2],
'Maximum_Service_Rate': muh,
'Arrival_Rate': lamb,
'Traffic_Intensity': traffic_intensity,
'Mean_Number_Jobs_System': mean_nr_jobs_in_system,
'Measured_Response_Time': measured_response_time,
'Estimated_Response_Time': mean_response_time * 1000,
'Measured_Queue_Waiting_Time': measured_queue_waiting_time,
'Estimated_Queue_Waiting_Time': mean_waiting_time * 1000,
'Measured_Queue_Size': measured_queue_size,
'Estimated_Queue_Size': mean_nr_jobs_in_queue})
mm1_analysis = pd.DataFrame(calculations)
mm1_analysis = mm1_analysis[['Num_Clients', 'Worker_Threads', 'Maximum_Service_Rate', 'Arrival_Rate',
'Traffic_Intensity', 'Mean_Number_Jobs_System', 'Measured_Response_Time',
'Estimated_Response_Time', 'Measured_Queue_Waiting_Time',
'Estimated_Queue_Waiting_Time', 'Measured_Queue_Size', 'Estimated_Queue_Size']]
return mm1_analysis
@staticmethod
def mmm(summary_table, plot=False):
calculations = []
for row in summary_table.itertuples():
lamb = row[4]
servers = row[2] * 2
muh = row[-1] / servers
measured_response_time = row[5]
measured_queue_waiting_time = row[6]
measured_queue_size = row[8]
traffic_intensity = lamb / (muh * servers)
_param1 = math.pow(servers * traffic_intensity, servers) / (
math.factorial(servers) * (1 - traffic_intensity))
probability_zero_jobs_in_system = 1 / (1 + _param1 +
sum([pow(servers * traffic_intensity, n) / math.factorial(n) for n in
range(1, servers)]))
probability_of_queueing = probability_zero_jobs_in_system * _param1
mean_number_jobs_in_queue = (traffic_intensity * probability_of_queueing) / (1 - traffic_intensity)
mean_number_jobs_in_system = servers * traffic_intensity + mean_number_jobs_in_queue
average_utilization_each_server = traffic_intensity
mean_response_time = (1 / muh) * (1 + probability_of_queueing / (servers * (1 - traffic_intensity)))
mean_waiting_time = mean_number_jobs_in_queue / lamb
calculations.append({'Num_Clients': row[1],
'Worker_Threads': row[2],
'Maximum_Service_Rate': muh,
'Arrival_Rate': lamb,
'Traffic_Intensity': traffic_intensity,
'Mean_Number_Jobs_System': mean_number_jobs_in_system,
'Measured_Response_Time': measured_response_time,
'Estimated_Response_Time': mean_response_time * 1000,
'Measured_Queue_Waiting_Time': measured_queue_waiting_time,
'Estimated_Queue_Waiting_Time': mean_waiting_time * 1000,
'Measured_Queue_Size': measured_queue_size,
'Estimated_Queue_Size': mean_number_jobs_in_queue,
'Probability_Zero_Jobs_System': probability_zero_jobs_in_system,
'Probability_Queueing': probability_of_queueing,
'Mean_Average_Utilization_Each_Server': average_utilization_each_server})
mmm_analysis = pd.DataFrame(calculations)
mmm_analysis = mmm_analysis[['Num_Clients', 'Worker_Threads', 'Maximum_Service_Rate', 'Arrival_Rate',
'Traffic_Intensity', 'Mean_Number_Jobs_System', 'Measured_Response_Time',
'Estimated_Response_Time', 'Measured_Queue_Waiting_Time',
'Estimated_Queue_Waiting_Time', 'Measured_Queue_Size', 'Estimated_Queue_Size',
'Probability_Zero_Jobs_System', 'Probability_Queueing',
'Mean_Average_Utilization_Each_Server']]
return mmm_analysis
class ExperimentPlotter:
@staticmethod
def save_figure(save_as_filename):
current_dir = pathlib.Path(__file__).parent
figure_path = current_dir.joinpath("figures")
if not os.path.exists(figure_path):
os.makedirs(figure_path)
figure_path = figure_path.joinpath(save_as_filename + ".png")
plt.savefig(figure_path, dpi=150, bbox_inches='tight')
plt.close()
@staticmethod
def memtier_experiment(experiment_definition, histogram=False):
memtier_collector = MemtierCollector(experiment_definition)
memtier_collector.generate_dataframe(histogram)
return [[memtier_collector.dataframe_set, memtier_collector.dataframe_get],
[memtier_collector.dataframe_histogram_set, memtier_collector.dataframe_histogram_get]]
@staticmethod
def middleware_experiment(experiment_definition, histogram=False):
middleware_collector = MiddlewareCollector(experiment_definition)
middleware_collector.generate_dataframe(histogram)
return [[middleware_collector.dataframe_set, middleware_collector.dataframe_get],
[middleware_collector.dataframe_histogram_set, middleware_collector.dataframe_histogram_get]]
@staticmethod
def memtier_statistics_get_set(flattened, subexperiment, plot=True, throughput_y=(0, None),
response_time_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Memtier')
set_group = flattened[0].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
get_group = flattened[1].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
throughput_set = StatisticsFunctions.get_sum(set_group, 'Request_Throughput')
throughput_get = StatisticsFunctions.get_sum(get_group, 'Request_Throughput')
response_time_set = StatisticsFunctions.get_weighted_average(set_group, 'Response_Time')
response_time_get = StatisticsFunctions.get_weighted_average(get_group, 'Response_Time')
hits_get = StatisticsFunctions.get_sum(get_group, 'Hits')
misses_get = StatisticsFunctions.get_sum(get_group, 'Misses')
if plot:
concatenated_throughput = pd.concat([throughput_set.assign(RequestType='SET'),
throughput_get.assign(RequestType='GET')])
concatenated_response_time = pd.concat([response_time_set.assign(RequestType='SET'),
response_time_get.assign(RequestType='GET')])
throughput_measured = concatenated_throughput[~concatenated_throughput.Type.str.contains('Interactive')]
throughput_interactive = concatenated_throughput[
concatenated_throughput.Type.str.contains('Interactive')]
response_time_measured = concatenated_response_time[
~concatenated_response_time.Type.str.contains('Interactive')]
response_time_interactive = concatenated_response_time[
concatenated_response_time.Type.str.contains('Interactive')]
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.plot_throughput_by_type(throughput_measured, exp_name, plot_base + 'mt_throughput',
ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_measured, exp_name,
plot_base + 'mt_response_time', ylim=response_time_y)
PlottingFunctions.plot_throughput_by_type(throughput_interactive, exp_name + ' Interactive Law',
plot_base + 'mt_throughput-il', ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_interactive, exp_name + ' Interactive Law',
plot_base + 'mt_response-time-il', ylim=response_time_y)
response_time_set = pd.merge(throughput_set, response_time_set)
response_time_get = pd.merge(throughput_get, response_time_get)
hits_get = pd.merge(throughput_get, hits_get)
misses_get = pd.merge(throughput_get, misses_get)
plotted_throughput_set = throughput_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_throughput_get = throughput_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_set = response_time_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_get = response_time_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
hits_get = StatisticsFunctions.get_weighted_average(hits_get.groupby(['Num_Clients', 'Worker_Threads', 'Type']),
'Hits')
misses_get = StatisticsFunctions.get_weighted_average(
misses_get.groupby(['Num_Clients', 'Worker_Threads', 'Type']),
'Misses')
throughput_set_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_set, 'Request_Throughput')
throughput_get_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_get, 'Request_Throughput')
response_time_set_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_set, 'Response_Time')
response_time_get_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_get, 'Response_Time')
set_table_list = [throughput_set_plotted, response_time_set_plotted]
get_table_list = [throughput_get_plotted, response_time_get_plotted, misses_get, hits_get]
set_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type']), set_table_list)
get_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type']), get_table_list)
print(exp_name + " SET:")
print(set_summary)
print("====================\n")
print(exp_name + " GET:")
print(get_summary)
print("====================\n")
return [set_summary, get_summary]
@staticmethod
def memtier_statistics_request_family(flattened, subexperiment, r_type='SET', plot=True, throughput_y=(0, None),
response_time_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Memtier')
family = flattened[0].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
throughput_family = StatisticsFunctions.get_sum(family, 'Request_Throughput')
response_time_family = StatisticsFunctions.get_weighted_average(family, 'Response_Time')
if plot:
concatenated_throughput = pd.concat([throughput_family.assign(RequestType=r_type)])
concatenated_response_time = pd.concat([response_time_family.assign(RequestType=r_type)])
throughput_measured = concatenated_throughput[~concatenated_throughput.Type.str.contains('Interactive')]
throughput_interactive = concatenated_throughput[concatenated_throughput.Type.str.contains('Interactive')]
response_time_measured = concatenated_response_time[
~concatenated_response_time.Type.str.contains('Interactive')]
response_time_interactive = concatenated_response_time[
concatenated_response_time.Type.str.contains('Interactive')]
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.plot_throughput_by_type(throughput_measured, exp_name, plot_base + 'mt_throughput',
ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_measured, exp_name,
plot_base + 'mt_response_time', ylim=response_time_y)
PlottingFunctions.plot_throughput_by_type(throughput_interactive, exp_name + ' Interactive Law',
plot_base + 'mt_throughput-il', ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_interactive, exp_name + ' Interactive Law',
plot_base + 'mt_response-time-il', ylim=response_time_y)
response_time_family = pd.merge(throughput_family, response_time_family)
plotted_throughput_family = throughput_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_family = response_time_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
throughput_family_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_family,
'Request_Throughput')
response_time_family_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_family,
'Response_Time')
family_table_list = [throughput_family_plotted, response_time_family_plotted]
family_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type']),
family_table_list)
print(exp_name + " " + r_type + ":")
print(family_summary)
print("====================\n")
return family_summary
@staticmethod
def memtier_statistics_multiget(flattened, subexperiment, plot=True, throughput_y=(0, None),
response_time_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Memtier')
if subexperiment['subexperiment_id'] == 2:
req_types = 'Non-sharded MultiGET'
type_to_number_dict = {
"MULTIGET_1": 1,
"MULTIGET_3": 3,
"MULTIGET_6": 6,
"MULTIGET_9": 9
}
else:
req_types = 'Sharded MultiGET'
type_to_number_dict = {
"SHARDED_1": 1,
"SHARDED_3": 3,
"SHARDED_6": 6,
"SHARDED_9": 9
}
get_group = flattened[1][~flattened[1].Type.str.contains('Interactive')]
get_group['Type'] = get_group['Type'].replace(type_to_number_dict, regex=True)
pd.to_numeric(get_group['Type'])
get_group = get_group.groupby(['Type', 'Repetition', 'Worker_Threads'])
summed_get_throughput = StatisticsFunctions.get_sum(get_group, 'Request_Throughput')
average_get_response_time = StatisticsFunctions.get_weighted_average(get_group, 'Response_Time')
hits_get = StatisticsFunctions.get_sum(get_group, 'Hits')
misses_get = StatisticsFunctions.get_sum(get_group, 'Misses')
concatenated_throughput = pd.concat([summed_get_throughput.assign(RequestType='GET')])
concatenated_response_time = pd.concat([average_get_response_time.assign(RequestType='GET')])
if plot:
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.lineplot(concatenated_throughput, exp_name, plot_base + 'mt_throughput', x='Type',
y='Request_Throughput',
xlabel=req_types, ylabel='Throughput (req/s)',
xlim=(0, None), ylim=throughput_y, xticks=[1, 3, 6, 9])
PlottingFunctions.lineplot(concatenated_response_time, exp_name, plot_base + 'mt_response-time', x='Type',
y='Response_Time',
xlabel=req_types, ylabel='Response Time (ms)',
xlim=(0, None), ylim=response_time_y, xticks=[1, 3, 6, 9])
average_get_response_time = pd.merge(summed_get_throughput, average_get_response_time)
hits_get = pd.merge(summed_get_throughput, hits_get)
misses_get = pd.merge(summed_get_throughput, misses_get)
plotted_throughput_get = summed_get_throughput.groupby(['Type'])
plotted_response_time_get = average_get_response_time.groupby(['Type'])
hits_get = StatisticsFunctions.get_weighted_average(hits_get.groupby(['Type']), 'Hits')
misses_get = StatisticsFunctions.get_weighted_average(misses_get.groupby(['Type']), 'Misses')
throughput_get_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_get, 'Request_Throughput')
response_time_get_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_get, 'Response_Time')
get_table_list = [throughput_get_plotted, response_time_get_plotted, misses_get, hits_get]
get_summary = reduce(lambda left, right: pd.merge(left, right, on=['Type']), get_table_list)
print(exp_name + " GET:")
print(get_summary)
print("====================\n\n")
return get_summary
@staticmethod
def middleware_statistics_get_set(flattened, subexperiment, plot=True, throughput_y=(0, None),
response_time_y=(0, None), queue_waiting_y=(0, None),
memcached_handling_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Middleware')
set_group = flattened[0].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
get_group = flattened[1].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
throughput_set = StatisticsFunctions.get_sum(set_group, 'Request_Throughput')
throughput_get = StatisticsFunctions.get_sum(get_group, 'Request_Throughput')
response_time_set = StatisticsFunctions.get_weighted_average(set_group, 'Response_Time')
response_time_get = StatisticsFunctions.get_weighted_average(get_group, 'Response_Time')
set_group = flattened[0][~flattened[0].Type.str.contains('Interactive')]
set_group = set_group.groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
get_group = flattened[1][~flattened[1].Type.str.contains('Interactive')]
get_group = get_group.groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
queue_waiting_time_set = StatisticsFunctions.get_weighted_average(set_group, 'Queue_Waiting_Time')
queue_waiting_time_get = StatisticsFunctions.get_weighted_average(get_group, 'Queue_Waiting_Time')
memcached_communication_set = StatisticsFunctions.get_weighted_average(set_group, 'Memcached_Communication')
memcached_communication_get = StatisticsFunctions.get_weighted_average(get_group, 'Memcached_Communication')
queue_size_set = StatisticsFunctions.get_weighted_average(set_group, 'Queue_Size')
queue_size_get = StatisticsFunctions.get_weighted_average(get_group, 'Queue_Size')
hits_get = StatisticsFunctions.get_sum(get_group, 'Hits')
misses_get = StatisticsFunctions.get_sum(get_group, 'Misses')
if plot:
xticks = flattened[0]['Num_Clients'].unique()
concatenated_throughput = pd.concat([throughput_set.assign(RequestType='SET'),
throughput_get.assign(RequestType='GET')])
concatenated_response_time = pd.concat([response_time_set.assign(RequestType='SET'),
response_time_get.assign(RequestType='GET')])
concatenated_queue_waiting_time = pd.concat([queue_waiting_time_set.assign(RequestType='SET'),
queue_waiting_time_get.assign(RequestType='GET')])
concatenated_memcached_communication = pd.concat([memcached_communication_set.assign(RequestType='SET'),
memcached_communication_get.assign(RequestType='GET')])
concatenated_queue_size = pd.concat([queue_size_set.assign(RequestType='SET'),
queue_size_get.assign(RequestType='GET')])
throughput_measured = concatenated_throughput[~concatenated_throughput.Type.str.contains('Interactive')]
throughput_interactive = concatenated_throughput[
concatenated_throughput.Type.str.contains('Interactive')]
response_time_measured = concatenated_response_time[
~concatenated_response_time.Type.str.contains('Interactive')]
response_time_interactive = concatenated_response_time[
concatenated_response_time.Type.str.contains('Interactive')]
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.plot_throughput_by_type(throughput_measured, exp_name, plot_base + 'mw_throughput',
ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_measured, exp_name,
plot_base + 'mw_response_time', ylim=response_time_y)
PlottingFunctions.plot_throughput_by_type(throughput_interactive, exp_name + ' Interactive Law',
plot_base + 'mw_throughput-il', ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_interactive, exp_name + ' Interactive Law',
plot_base + 'mw_response-time-il', ylim=response_time_y)
PlottingFunctions.lineplot(concatenated_queue_waiting_time, exp_name, plot_base + "mw_queue-wait-time",
x='Num_Clients', y='Queue_Waiting_Time', hue='RequestType',
style='Worker_Threads', xlabel='Number Memtier Clients',
ylabel='Queue Waiting Time (ms)', huelabel='Request Type',
stylelabel='Worker Threads', xlim=(0, None), ylim=queue_waiting_y, xticks=xticks)
PlottingFunctions.lineplot(concatenated_memcached_communication, exp_name, plot_base + "mw_mc-comm-time",
x='Num_Clients', y='Memcached_Communication', hue='RequestType',
style='Worker_Threads', xlabel='Number Memtier Clients',
ylabel='Memcached Handling (ms)',
huelabel='Request Type', stylelabel='Worker Threads',
xlim=(0, None), ylim=memcached_handling_y, xticks=xticks)
PlottingFunctions.lineplot(concatenated_queue_size, exp_name, plot_base + "mw_queue-size", x='Num_Clients',
y='Queue_Size', hue='RequestType', style='Worker_Threads',
xlabel='Number Memtier Clients', ylabel='Queue Size',
huelabel='Request Type', stylelabel='Worker Threads',
xlim=(0, None), ylim=(0, None), xticks=xticks)
response_time_set = pd.merge(throughput_set, response_time_set)
response_time_get = pd.merge(throughput_get, response_time_get)
queue_waiting_time_set = pd.merge(throughput_set, queue_waiting_time_set)
queue_waiting_time_get = pd.merge(throughput_get, queue_waiting_time_get)
memcached_communication_set = pd.merge(throughput_set, memcached_communication_set)
memcached_communication_get = pd.merge(throughput_get, memcached_communication_get)
queue_size_set = pd.merge(throughput_set, queue_size_set)
queue_size_get = pd.merge(throughput_get, queue_size_get)
hits_get = pd.merge(throughput_get, hits_get)
misses_get = pd.merge(throughput_get, misses_get)
plotted_throughput_set = throughput_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_throughput_get = throughput_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_set = response_time_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_get = response_time_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_queue_waiting_time_set = queue_waiting_time_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_queue_waiting_time_get = queue_waiting_time_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_memcached_communication_set = memcached_communication_set.groupby(
['Num_Clients', 'Worker_Threads', 'Type'])
plotted_memcached_communication_get = memcached_communication_get.groupby(
['Num_Clients', 'Worker_Threads', 'Type'])
plotted_queue_size_set = queue_size_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_queue_size_get = queue_size_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
hits_get = StatisticsFunctions.get_weighted_average(hits_get.groupby(['Num_Clients', 'Worker_Threads', 'Type']),
'Hits')
misses_get = StatisticsFunctions.get_weighted_average(
misses_get.groupby(['Num_Clients', 'Worker_Threads', 'Type']), 'Misses')
throughput_set_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_set,
'Request_Throughput')
throughput_get_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_get,
'Request_Throughput')
response_time_set_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_set,
'Response_Time')
response_time_get_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_get,
'Response_Time')
queue_waiting_time_set_plotted = StatisticsFunctions.get_weighted_average(plotted_queue_waiting_time_set,
'Queue_Waiting_Time')
queue_waiting_time_get_plotted = StatisticsFunctions.get_weighted_average(plotted_queue_waiting_time_get,
'Queue_Waiting_Time')
memcached_communication_set_plotted = StatisticsFunctions.get_weighted_average(
plotted_memcached_communication_set, 'Memcached_Communication')
memcached_communication_get_plotted = StatisticsFunctions.get_weighted_average(
plotted_memcached_communication_get, 'Memcached_Communication')
queue_size_set_plotted = StatisticsFunctions.get_weighted_average(plotted_queue_size_set, 'Queue_Size')
queue_size_get_plotted = StatisticsFunctions.get_weighted_average(plotted_queue_size_get, 'Queue_Size')
set_table_list = [throughput_set_plotted, response_time_set_plotted, queue_waiting_time_set_plotted,
memcached_communication_set_plotted, queue_size_set_plotted]
get_table_list = [throughput_get_plotted, response_time_get_plotted, queue_waiting_time_get_plotted,
memcached_communication_get_plotted, queue_size_get_plotted, misses_get, hits_get]
set_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type'], how='outer'),
set_table_list)
get_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type'], how='outer'),
get_table_list)
print(exp_name + " SET:")
print(set_summary)
print("====================\n")
print(exp_name + " GET:")
print(get_summary)
print("====================\n")
return [set_summary, get_summary]
@staticmethod
def middleware_statistics_request_family(flattened, subexperiment, r_type='SET', plot=True, throughput_y=(0, None),
response_time_y=(0, None), queue_waiting_y=(0, None),
memcached_handling_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Middleware')
family = flattened[0].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
throughput_family = StatisticsFunctions.get_sum(family, 'Request_Throughput')
response_time_family = StatisticsFunctions.get_weighted_average(family, 'Response_Time')
family = flattened[0][~flattened[0].Type.str.contains('Interactive')]
family = family.groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
queue_waiting_time_family = StatisticsFunctions.get_weighted_average(family, 'Queue_Waiting_Time')
memcached_communication_family = StatisticsFunctions.get_weighted_average(family, 'Memcached_Communication')
queue_size_family = StatisticsFunctions.get_weighted_average(family, 'Queue_Size')
if plot:
xticks = flattened[0]['Num_Clients'].unique()
concatenated_throughput = pd.concat([throughput_family.assign(RequestType=r_type)])
concatenated_response_time = pd.concat([response_time_family.assign(RequestType=r_type)])
concatenated_queue_waiting_time = pd.concat([queue_waiting_time_family.assign(RequestType=r_type)])
concatenated_memcached_communication = pd.concat(
[memcached_communication_family.assign(RequestType=r_type)])
concatenated_queue_size = pd.concat([queue_size_family.assign(RequestType=r_type)])
throughput_measured = concatenated_throughput[~concatenated_throughput.Type.str.contains('Interactive')]
throughput_interactive = concatenated_throughput[concatenated_throughput.Type.str.contains('Interactive')]
response_time_measured = concatenated_response_time[
~concatenated_response_time.Type.str.contains('Interactive')]
response_time_interactive = concatenated_response_time[
concatenated_response_time.Type.str.contains('Interactive')]
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.plot_throughput_by_type(throughput_measured, exp_name, plot_base + 'mw_throughput',
ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_measured, exp_name,
plot_base + 'mw_response_time', ylim=response_time_y)
PlottingFunctions.plot_throughput_by_type(throughput_interactive, exp_name + ' Interactive Law',
plot_base + 'mw_throughput-il', ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_interactive, exp_name + ' Interactive Law',
plot_base + 'mw_response-time-il', ylim=response_time_y)
PlottingFunctions.lineplot(concatenated_queue_waiting_time, exp_name, plot_base + "mw_queue-wait-time",
x='Num_Clients', y='Queue_Waiting_Time', hue='Worker_Threads',
xlabel='Number Memtier Clients', ylabel='Queue Waiting Time (ms)',
huelabel='Worker Threads', xlim=(0, None), ylim=queue_waiting_y, xticks=xticks)
PlottingFunctions.lineplot(concatenated_memcached_communication, exp_name, plot_base + "mw_mc-comm-time",
x='Num_Clients', y='Memcached_Communication', hue='Worker_Threads',
xlabel='Number Memtier Clients',
ylabel='Memcached Handling (ms)',
huelabel='Worker Threads', xlim=(0, None), ylim=memcached_handling_y,
xticks=xticks)
PlottingFunctions.lineplot(concatenated_queue_size, exp_name, plot_base + "mw_queue-size", x='Num_Clients',
y='Queue_Size', hue='Worker_Threads', xlabel='Number Memtier Clients',
ylabel='Queue Size', huelabel='Worker Threads', xlim=(0, None), ylim=(0, None),
xticks=xticks)
response_time_family = pd.merge(throughput_family, response_time_family)
queue_waiting_time_family = pd.merge(throughput_family, queue_waiting_time_family)
memcached_communication_family = pd.merge(throughput_family, memcached_communication_family)
queue_size_family = pd.merge(throughput_family, queue_size_family)
plotted_throughput_family = throughput_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_family = response_time_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_queue_waiting_time_family = queue_waiting_time_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_memcached_communication_family = memcached_communication_family.groupby(
['Num_Clients', 'Worker_Threads', 'Type'])
plotted_queue_size_family = queue_size_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
throughput_family_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_family,
'Request_Throughput')
response_time_family_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_family,
'Response_Time')
queue_waiting_time_family_plotted = StatisticsFunctions.get_weighted_average(plotted_queue_waiting_time_family,
'Queue_Waiting_Time')
memcached_communication_family_plotted = StatisticsFunctions.get_weighted_average(
plotted_memcached_communication_family, 'Memcached_Communication')
queue_size_family_plotted = StatisticsFunctions.get_weighted_average(plotted_queue_size_family, 'Queue_Size')
family_table_list = [throughput_family_plotted, response_time_family_plotted, queue_waiting_time_family_plotted,
memcached_communication_family_plotted, queue_size_family_plotted]
family_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type'], how='outer'),
family_table_list)
print(exp_name + " " + r_type + ":")
print(family_summary)
print("====================\n")
return family_summary
@staticmethod
def middleware_statistics_multiget(flattened, subexperiment, plot=True, throughput_y=(0, None),
response_time_y=(0, None), queue_waiting_y=(0, None),
memcached_handling_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Middleware')
if subexperiment['subexperiment_id'] == 2:
req_types = 'Non-sharded MultiGET'
type_to_number_dict = {
"MULTIGET_1": 1,
"MULTIGET_3": 3,
"MULTIGET_6": 6,
"MULTIGET_9": 9
}
else:
req_types = 'Sharded MultiGET'
type_to_number_dict = {
"SHARDED_1": 1,
"SHARDED_3": 3,
"SHARDED_6": 6,
"SHARDED_9": 9
}
get_group = flattened[1][~flattened[1].Type.str.contains('Interactive')]
get_group['Type'] = get_group['Type'].replace(type_to_number_dict, regex=True)
pd.to_numeric(get_group['Type'])
server_load = get_group['Key_Distribution'].apply(pd.Series).apply(pd.Series)
server_load.rename(columns={0: 'Server1', 1: 'Server2', 2: 'Server3'}, inplace=True)
server_load.fillna(value=0, inplace=True)
get_group = get_group.join(server_load)
get_group.drop(columns=['Key_Distribution'], inplace=True)
names = get_group.columns.tolist()
names.remove('Server1')
names.remove('Server2')
names.remove('Server3')
get_copy = get_group
get_group = get_group.groupby(['Type', 'Repetition'])
throughput_get = StatisticsFunctions.get_sum(get_group, 'Request_Throughput')
response_time_get = StatisticsFunctions.get_weighted_average(get_group, 'Response_Time')
queue_waiting_time_get = StatisticsFunctions.get_weighted_average(get_group, 'Queue_Waiting_Time')
memcached_communication_get = StatisticsFunctions.get_weighted_average(get_group, 'Memcached_Communication')
queue_size_get = StatisticsFunctions.get_weighted_average(get_group, 'Queue_Size')
hits_get = StatisticsFunctions.get_sum(get_group, 'Hits')
misses_get = StatisticsFunctions.get_sum(get_group, 'Misses')
keysize_get = StatisticsFunctions.get_weighted_average(get_group, 'Request_Size')
key_throughput_get = StatisticsFunctions.get_sum(get_group, 'Key_Throughput')
server_loads = pd.wide_to_long(get_copy, stubnames='Server', i=names, j='Server_ID')
server_loads = server_loads.reset_index()
server_loads = server_loads.groupby(['Type', 'Repetition', 'Server_ID'])
server_loads_average = StatisticsFunctions.get_weighted_average(server_loads, 'Server')
if plot:
concatenated_throughput = pd.concat([throughput_get.assign(RequestType='GET')])
concatenated_response_time = pd.concat([response_time_get.assign(RequestType='GET')])
concatenated_queue_waiting_time = pd.concat([queue_waiting_time_get.assign(RequestType='GET')])
concatenated_memcached_communication = pd.concat([memcached_communication_get.assign(RequestType='GET')])
concatenated_queue_size = pd.concat([queue_size_get.assign(RequestType='GET')])
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.lineplot(concatenated_throughput, exp_name, plot_base + "mw_thoughput", x='Type',
y='Request_Throughput',
xlabel=req_types, ylabel='Throughput (req/s)',
xlim=(0, None), ylim=throughput_y, xticks=[1, 3, 6, 9])
PlottingFunctions.lineplot(concatenated_response_time, exp_name, plot_base + "mw_response-time", x='Type',
y='Response_Time',
xlabel=req_types, ylabel='Response Time (ms)',
xlim=(0, None), ylim=response_time_y, xticks=[1, 3, 6, 9])
PlottingFunctions.lineplot(concatenated_queue_waiting_time, exp_name, plot_base + "mw_queue-wait-time",
x='Type',
y='Queue_Waiting_Time',
xlabel=req_types, ylabel='Queue Waiting Time (ms)',
xlim=(0, None), ylim=queue_waiting_y, xticks=[1, 3, 6, 9])
PlottingFunctions.lineplot(concatenated_memcached_communication, exp_name, plot_base + "mw_mc-comm-time",
x='Type',
y='Memcached_Communication',
xlabel=req_types, ylabel='Memcached Handling (ms)',
xlim=(0, None), ylim=memcached_handling_y, xticks=[1, 3, 6, 9])
PlottingFunctions.lineplot(concatenated_queue_size, exp_name, plot_base + "mw_queue-size", x='Type',
y='Queue_Size',
xlabel=req_types, ylabel='Queue Size',
xlim=(0, None), ylim=(0, None), xticks=[1, 3, 6, 9])
PlottingFunctions.barplot(server_loads_average, exp_name, plot_base + "key_distribution", x='Server_ID',
y='Server', hue='Type', xlabel='Server ID',
ylabel='Average Load', huelabel='MultiGET Type', ylim=(0, 4))
response_time_get = pd.merge(throughput_get, response_time_get)
queue_waiting_time_get = pd.merge(throughput_get, queue_waiting_time_get)
memcached_communication_get = pd.merge(throughput_get, memcached_communication_get)
queue_size_get = pd.merge(throughput_get, queue_size_get)
hits_get = pd.merge(throughput_get, hits_get)
misses_get = pd.merge(throughput_get, misses_get)
keysize_get = | pd.merge(throughput_get, keysize_get) | pandas.merge |
import asyncio
import itertools
from datetime import timedelta
from typing import Coroutine, Dict, List, Union
import pandas as pd
from celery.utils.log import get_task_logger
from celery.utils.time import humanize_seconds
import calc.prod # noqa
import config as conf
import cq.signals # noqa
import cq.util
import db.models
import ext.metrics as metrics
import util
from collector import IHSClient
from const import HoleDirection, IHSPath, Provider
from cq.worker import celery_app
from executors import BaseExecutor, GeomExecutor, ProdExecutor, WellExecutor # noqa
logger = get_task_logger(__name__)
RETRY_BASE_DELAY = 15
# TODO: add retries
# TODO: tenacity?
# TODO: asynchronously fracture failed batches
# TODO: circuit breakers?
# TODO: add task meta
@celery_app.task
def log():
"""Print some log messages"""
logger.warning("task-check")
@celery_app.task
def smoke_test():
""" Verify an arbitrary Celery task can run """
return "verified"
def run_executors(
hole_dir: HoleDirection,
api14s: List[str] = None,
api10s: List[str] = None,
executors: List[BaseExecutor] = None,
batch_size: int = None,
log_vs: float = None,
log_hs: float = None,
):
executors = executors or [WellExecutor, GeomExecutor, ProdExecutor]
batch_size = batch_size or conf.TASK_BATCH_SIZE
if api14s is not None:
id_name = "api14s"
ids = api14s
elif api10s is not None:
id_name = "api10s"
ids = api10s
else:
raise ValueError("One of [api14s, api10s] must be specified")
# TODO: move chunking to run_executor?
for idx, chunk in enumerate(util.chunks(ids, n=batch_size)):
for executor in executors:
kwargs = {
"hole_dir": hole_dir,
"executor_name": executor.__name__,
id_name: chunk,
}
countdown = cq.util.spread_countdown(idx, vs=log_vs, hs=log_hs)
logger.info(
f"({executor.__name__}[{hole_dir.value}]) submitting task: {id_name}={len(chunk)} countdown={countdown}" # noqa
)
run_executor.apply_async(
args=[],
kwargs=kwargs,
countdown=countdown,
ignore_result=False,
routing_key=hole_dir,
)
@celery_app.task(is_eager=True)
def post_heartbeat():
""" Send heartbeat to metrics backend"""
return metrics.post_heartbeat()
@celery_app.task
def run_executor(hole_dir: HoleDirection, executor_name: str, **kwargs):
# logger.warning(f"running {executor_name=} {hole_dir=} {kwargs=}")
executor = globals()[executor_name]
count, dataset = executor(hole_dir).run(**kwargs)
@celery_app.task
def run_next_available(
hole_dir: Union[HoleDirection, str], force: bool = False, **kwargs
):
""" Run next available area """
# TODO: set task meta
hole_dir = HoleDirection(hole_dir)
async def coro():
# await db.startup()
# hole_dir = HoleDirection.H
# TODO: move to Router
if hole_dir == HoleDirection.H:
ids_path = IHSPath.well_h_ids
else:
ids_path = IHSPath.well_v_ids
area_obj, attr, is_ready, cooldown_hours = await db.models.Area.next_available(
hole_dir
)
utcnow = util.utcnow()
prev_run = getattr(area_obj, attr)
if is_ready or force:
api14s: List[str] = await IHSClient.get_ids_by_area(
path=ids_path, area=area_obj.area
) # pull from IDMaster once implmented
# api14s = api14s[:10]
run_executors(hole_dir=hole_dir, api14s=api14s, **kwargs)
await area_obj.update(**{attr: utcnow}).apply()
prev_run = (
prev_run.strftime(util.dt.formats.no_seconds) if prev_run else None
)
utcnow = utcnow.strftime(util.dt.formats.no_seconds)
print(
f"({db.models.Area.__name__}[{hole_dir}]) updated {area_obj.area}.{attr}: {prev_run} -> {utcnow}" # noqa
)
else:
next_run_in_seconds = (
(prev_run + timedelta(hours=cooldown_hours)) - utcnow
).total_seconds()
print(
f"({db.models.Area.__name__}[{hole_dir}]) Skipping {area_obj.area} next available for run in {humanize_seconds(next_run_in_seconds)}" # noqa
) # noqa
return util.aio.async_to_sync(coro())
@celery_app.task()
def sync_area_manifest(): # FIXME: change to use Counties endpoint (and add Counties endpoint to IHS service :/) # noqa
""" Ensure the local list of areas is up to date """
loop = asyncio.get_event_loop()
async def wrapper(path: IHSPath, hole_dir: HoleDirection) -> List[Dict]:
records: List[Dict] = []
areas = await IHSClient.get_areas(path=path, name_only=False)
records = [
{"area": area["name"], "providers": [Provider.IHS]} for area in areas
]
return records
coros: List[Coroutine] = []
for args in [
(IHSPath.well_h_ids, HoleDirection.H),
(IHSPath.well_v_ids, HoleDirection.V),
(IHSPath.prod_h_ids, HoleDirection.H),
(IHSPath.prod_v_ids, HoleDirection.V),
]:
coros.append(wrapper(*args))
results = loop.run_until_complete(asyncio.gather(*coros))
inbound_df = pd.DataFrame(list(itertools.chain(*results))).set_index("area")
inbound_areas = inbound_df.groupby(level=0).first().sort_index()
existing_areas = util.aio.async_to_sync(db.models.Area.df()).sort_index()
# get unique area names that dont already exist
for_insert = inbound_areas[~inbound_areas.isin(existing_areas)].dropna()
# for_insert["h_last_run_at"] = util.utcnow()
if for_insert.shape[0] > 0:
coro = db.models.Area.bulk_upsert(
for_insert,
update_on_conflict=True,
reset_index=True,
conflict_constraint=db.models.Area.constraints["uq_areas_area"],
)
affected = loop.run_until_complete(coro)
logger.info(
f"({db.models.Area.__name__}) synchronized manifest: added {affected} areas"
)
else:
logger.info(f"({db.models.Area.__name__}) synchronized manifest: no updates")
@celery_app.task
def sync_known_entities(hole_dir: HoleDirection):
hole_dir = HoleDirection(hole_dir)
if hole_dir == HoleDirection.H:
path = IHSPath.well_h_ids
else:
path = IHSPath.well_v_ids
areas: List[Dict] = util.aio.async_to_sync(IHSClient.get_areas(path=path))
for idx, area in enumerate(areas):
sync_known_entities_for_area.apply_async(
args=(hole_dir, area), kwargs={}, countdown=idx + 30
)
@celery_app.task
def sync_known_entities_for_area(hole_dir: HoleDirection, area: str):
async def wrapper(hole_dir: HoleDirection, area: str):
hole_dir = HoleDirection(hole_dir)
index_cols = ["entity_id", "entity_type"]
if hole_dir == HoleDirection.H:
path = IHSPath.well_h_ids
else:
path = IHSPath.well_v_ids
# fetch ids from remote service
ids = await IHSClient.get_ids_by_area(path, area=area)
df = pd.Series(ids, name="entity_id").to_frame()
df["ihs_last_seen_at"] = util.utcnow()
df["entity_type"] = "api14"
df = df.set_index(index_cols)
# query matching records existing in the known_entities model
objs: List[db.models.KnownEntity] = await db.models.KnownEntity.query.where(
db.models.KnownEntity.entity_id.in_(ids)
).gino.all()
obj_df = pd.DataFrame([x.to_dict() for x in objs]).set_index(index_cols)
fresh = | pd.DataFrame(index=obj_df.index, columns=obj_df.columns) | pandas.DataFrame |
from fastai.conv_learner import *
from fastai.dataset import *
from tensorboard_cb_old import *
import cv2
import pandas as pd
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
import scipy.optimize as opt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import warnings
warnings.filterwarnings("ignore")
#=======================================================================================================================
PATH = './'
TRAIN = '../input/train/'
TEST = '../input/test/'
LABELS = '../input/train.csv'
SAMPLE = '../input/sample_submission.csv'
name_label_dict = {
0: 'Nucleoplasm',
1: 'Nuclear membrane',
2: 'Nucleoli',
3: 'Nucleoli fibrillar center',
4: 'Nuclear speckles',
5: 'Nuclear bodies',
6: 'Endoplasmic reticulum',
7: 'Golgi apparatus',
8: 'Peroxisomes',
9: 'Endosomes',
10: 'Lysosomes',
11: 'Intermediate filaments',
12: 'Actin filaments',
13: 'Focal adhesion sites',
14: 'Microtubules',
15: 'Microtubule ends',
16: 'Cytokinetic bridge',
17: 'Mitotic spindle',
18: 'Microtubule organizing center',
19: 'Centrosome',
20: 'Lipid droplets',
21: 'Plasma membrane',
22: 'Cell junctions',
23: 'Mitochondria',
24: 'Aggresome',
25: 'Cytosol',
26: 'Cytoplasmic bodies',
27: 'Rods & rings' }
nw = 4 #number of workers for data loader
arch = inceptionresnet_2 #specify target architecture
#=======================================================================================================================
#=======================================================================================================================
# Data
#=======================================================================================================================
# faulty image : dc756dea-bbb4-11e8-b2ba-ac1f6b6435d0
#=================
TRAIN_IMAGES_PER_CATEGORY = 1000
image_df = pd.read_csv(LABELS)
image_df = image_df[(image_df.Id != 'dc756dea-bbb4-11e8-b2ba-ac1f6b6435d0') &
(image_df.Id != 'c861eb54-bb9f-11e8-b2b9-ac1f6b6435d0') &
(image_df.Id != '7a88f200-bbc3-11e8-b2bc-ac1f6b6435d0')]
image_df['target_list'] = image_df['Target'].map(lambda x: [int(a) for a in x.split(' ')])
all_labels = list(chain.from_iterable(image_df['target_list'].values))
c_val = Counter(all_labels)
n_keys = c_val.keys()
max_idx = max(n_keys)
#==================================================================================
# visualize train distribution
# fig, ax1 = plt.subplots(1,1, figsize = (10, 5))
# ax1.bar(n_keys, [c_val[k] for k in n_keys])
# ax1.set_xticks(range(max_idx))
# ax1.set_xticklabels([name_label_dict[k] for k in range(max_idx)], rotation=90)
# plt.show()
#==================================================================================
for k,v in c_val.items():
print(name_label_dict[k], 'count:', v)
# create a categorical vector
image_df['target_vec'] = image_df['target_list'].map(lambda ck: [i in ck for i in range(max_idx+1)])
raw_train_df, valid_df = train_test_split(image_df,
test_size = 0.15,
# hack to make stratification work
stratify = image_df['Target'].map(lambda x: x[:3] if '27' not in x else '0'),
random_state= 42)
print(raw_train_df.shape[0], 'training masks')
print(valid_df.shape[0], 'validation masks')
tr_n = raw_train_df['Id'].values.tolist()
val_n = valid_df['Id'].values.tolist()
tr_n = tr_n[:-2] # pytorch has problems if last batch has one sample
test_names = list({f[:36] for f in os.listdir(TEST)})
# #=================================================================================
# # # Balance data
# #================================================================================
# # keep labels with more then 50 objects
# out_df_list = []
# for k,v in c_val.items():
# if v>50:
# keep_rows = raw_train_df['target_list'].map(lambda x: k in x)
# out_df_list += [raw_train_df[keep_rows].sample(TRAIN_IMAGES_PER_CATEGORY,
# replace=True)]
# train_df = pd.concat(out_df_list, ignore_index=True)
#
# tr_n = train_df['Id'].values.tolist()
# val_n = valid_df['Id'].values.tolist()
# tr_n = tr_n[:-2] # pytorch has problems if last batch has one sample
#
# print(train_df.shape[0])
# print(len(tr_n))
# print('unique train:',len(train_df['Id'].unique().tolist()))
#
# #=========================================================================
# #show balanced class graph
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (10, 5))
train_sum_vec = np.sum(np.stack(raw_train_df['target_vec'].values, 0), 0)
valid_sum_vec = np.sum(np.stack(valid_df['target_vec'].values, 0), 0)
ax1.bar(n_keys, [train_sum_vec[k] for k in n_keys])
ax1.set_title('Training Distribution')
ax2.bar(n_keys, [valid_sum_vec[k] for k in n_keys])
ax2.set_title('Validation Distribution')
plt.show()
#=======================================================================================================================
# Dataset loading helpers
#=======================================================================================================================
def open_rgby(path,id): #a function that reads RGBY image
#print(id)
colors = ['red','green','blue','yellow']
flags = cv2.IMREAD_GRAYSCALE
img = [cv2.imread(os.path.join(path, id+'_'+color+'.png'), flags).astype(np.float32)/255
for color in colors]
img[0] = img[0] * 0.85
img[1] = img[1] * 1.0
img[2] = img[2] * 0.85
img[3] = img[3] * 0.85
img = np.stack(img, axis=-1)
#print('img loaded:', id)
return img
class pdFilesDataset(FilesDataset):
def __init__(self, fnames, path, transform):
self.labels = pd.read_csv(LABELS).set_index('Id')
self.labels['Target'] = [[int(i) for i in s.split()] for s in self.labels['Target']]
super().__init__(fnames, transform, path)
def get_x(self, i):
img = open_rgby(self.path, self.fnames[i])
if self.sz == 512:
return img
else:
return cv2.resize(img, (self.sz, self.sz), cv2.INTER_AREA)
def get_y(self, i):
if (self.path == TEST):
return np.zeros(len(name_label_dict), dtype=np.int)
else:
labels = self.labels.loc[self.fnames[i]]['Target']
return np.eye(len(name_label_dict), dtype=np.float)[labels].sum(axis=0)
@property
def is_multi(self):
return True
@property
def is_reg(self):
return True
# this flag is set to remove the output sigmoid that allows log(sigmoid) optimization
# of the numerical stability of the loss function
def get_c(self):
return len(name_label_dict) # number of classes
def get_data(sz,bs):
#data augmentation
aug_tfms = [RandomRotate(30, tfm_y=TfmType.NO),
RandomDihedral(tfm_y=TfmType.NO),
RandomLighting(0.05, 0.05, tfm_y=TfmType.NO)]
#mean and std in of each channel in the train set
stats = A([0.08069, 0.05258, 0.05487, 0.08282], [0.13704, 0.10145, 0.15313, 0.13814])
#stats = A([0.08069, 0.05258, 0.05487], [0.13704, 0.10145, 0.15313])
tfms = tfms_from_stats(stats, sz, crop_type=CropType.NO, tfm_y=TfmType.NO,
aug_tfms=aug_tfms)
ds = ImageData.get_ds(pdFilesDataset, (tr_n[:-(len(tr_n)%bs)],TRAIN),
(val_n,TRAIN), tfms, test=(test_names,TEST))
md = ImageData(PATH, ds, bs, num_workers=nw, classes=None)
return md
#=======================================================================================================================
bs = 16
sz = 256
md = get_data(sz,bs)
x,y = next(iter(md.trn_dl))
print(x.shape, y.shape)
#=======================================================================================================================
# Display images
#=======================================================================================================================
# def display_imgs(x):
# columns = 4
# bs = x.shape[0]
# rows = min((bs + 3) // 4, 4)
# fig = plt.figure(figsize=(columns * 4, rows * 4))
# for i in range(rows):
# for j in range(columns):
# idx = i + j * columns
# fig.add_subplot(rows, columns, idx + 1)
# plt.axis('off')
# plt.imshow((x[idx, :, :, :3] * 255).astype(np.int))
# plt.show()
#
#
# display_imgs(np.asarray(md.trn_ds.denorm(x)))
#=======================================================================================================================
# compute dataset stats
#=======================================================================================================================
# x_tot = np.zeros(4)
# x2_tot = np.zeros(4)
# for x,y in iter(md.trn_dl):
# tmp = md.trn_ds.denorm(x).reshape(16,-1)
# x = md.trn_ds.denorm(x).reshape(-1,4)
# x_tot += x.mean(axis=0)
# x2_tot += (x**2).mean(axis=0)
#
# channel_avr = x_tot/len(md.trn_dl)
# channel_std = np.sqrt(x2_tot/len(md.trn_dl) - channel_avr**2)
# print(channel_avr,channel_std)
#=======================================================================================================================
# Loss and metrics
#=======================================================================================================================
class FocalLoss(nn.Module):
def __init__(self, gamma=1):
super().__init__()
self.gamma = gamma
def forward(self, input, target):
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})"
.format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + \
((-max_val).exp() + (-input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
return loss.sum(dim=1).mean()
def acc(preds,targs,th=0.0):
preds = (preds > th).int()
targs = targs.int()
return (preds==targs).float().mean()
def recall(preds, targs, thresh=0.5):
pred_pos = preds > thresh
tpos = torch.mul((targs.byte() == pred_pos), targs.byte())
tp = tpos.sum().item()
tr = targs.sum().item()
return float(tp+0.000001)/float( tr + 0.000001)
def precision(preds, targs, thresh=0.5):
pred_pos = preds > thresh
tpos = torch.mul((targs.byte() == pred_pos), targs.byte())
tp = tpos.sum().item()
pp = pred_pos.sum().item()
return float(tp+0.000001)/float(pp + 0.000001)
def fbeta(preds, targs, beta, thresh=0.5):
"""Calculates the F-beta score (the weighted harmonic mean of precision and recall).
This is the micro averaged version where the true positives, false negatives and
false positives are calculated globally (as opposed to on a per label basis).
beta == 1 places equal weight on precision and recall, b < 1 emphasizes precision and
beta > 1 favors recall.
"""
assert beta > 0, 'beta needs to be greater than 0'
beta2 = beta ** 2
rec = recall(preds, targs, thresh)
prec = precision(preds, targs, thresh)
return float((1 + beta2) * prec * rec) / float(beta2 * prec + rec + 0.00000001)
def f1(preds, targs, thresh=0.5): return float(fbeta(preds, targs, 1, thresh))
########################################################################################################################
# Training
########################################################################################################################
class ConvnetBuilder_custom():
def __init__(self, f, c, is_multi, is_reg, ps=None, xtra_fc=None, xtra_cut=0,
custom_head=None, pretrained=True):
self.f, self.c, self.is_multi, self.is_reg, self.xtra_cut = f, c, is_multi, is_reg, xtra_cut
if xtra_fc is None: xtra_fc = [512]
if ps is None: ps = [0.25] * len(xtra_fc) + [0.5]
self.ps, self.xtra_fc = ps, xtra_fc
if f in model_meta:
cut, self.lr_cut = model_meta[f]
else:
cut, self.lr_cut = 0, 0
cut -= xtra_cut
layers = cut_model(f(pretrained), cut)
# replace first convolutional layer by 4->32 while keeping corresponding weights
# and initializing new weights with zeros
w = layers[00].conv.weight
layers[00].conv = nn.Conv2d(4, 32, kernel_size=(3, 3), stride=(2, 2), bias=False)
layers[00].conv.weight = torch.nn.Parameter(torch.cat((w, torch.zeros(32, 1, 3, 3)), dim=1))
self.nf = model_features[f] if f in model_features else (num_features(layers) * 2)
if not custom_head: layers += [AdaptiveConcatPool2d(), Flatten()]
self.top_model = nn.Sequential(*layers)
n_fc = len(self.xtra_fc) + 1
if not isinstance(self.ps, list): self.ps = [self.ps] * n_fc
if custom_head:
fc_layers = [custom_head]
else:
fc_layers = self.get_fc_layers()
self.n_fc = len(fc_layers)
self.fc_model = to_gpu(nn.Sequential(*fc_layers))
if not custom_head: apply_init(self.fc_model, kaiming_normal)
self.model = to_gpu(nn.Sequential(*(layers + fc_layers)))
@property
def name(self):
return f'{self.f.__name__}_{self.xtra_cut}'
def create_fc_layer(self, ni, nf, p, actn=None):
res = [nn.BatchNorm1d(num_features=ni)]
if p: res.append(nn.Dropout(p=p))
res.append(nn.Linear(in_features=ni, out_features=nf))
if actn: res.append(actn)
return res
def get_fc_layers(self):
res = []
ni = self.nf
for i, nf in enumerate(self.xtra_fc):
res += self.create_fc_layer(ni, nf, p=self.ps[i], actn=nn.ReLU())
ni = nf
final_actn = nn.Sigmoid() if self.is_multi else nn.LogSoftmax()
if self.is_reg: final_actn = None
res += self.create_fc_layer(ni, self.c, p=self.ps[-1], actn=final_actn)
return res
def get_layer_groups(self, do_fc=False):
if do_fc:
return [self.fc_model]
idxs = [self.lr_cut]
c = children(self.top_model)
if len(c) == 3: c = children(c[0]) + c[1:]
lgs = list(split_by_idxs(c, idxs))
return lgs + [self.fc_model]
class ConvLearner(Learner):
def __init__(self, data, models, precompute=False, **kwargs):
self.precompute = False
super().__init__(data, models, **kwargs)
if hasattr(data, 'is_multi') and not data.is_reg and self.metrics is None:
self.metrics = [accuracy_thresh(0.5)] if self.data.is_multi else [accuracy]
if precompute: self.save_fc1()
self.freeze()
self.precompute = precompute
def _get_crit(self, data):
if not hasattr(data, 'is_multi'): return super()._get_crit(data)
return F.l1_loss if data.is_reg else F.binary_cross_entropy if data.is_multi else F.nll_loss
@classmethod
def pretrained(cls, f, data, ps=None, xtra_fc=None, xtra_cut=0, custom_head=None, precompute=False,
pretrained=True, **kwargs):
models = ConvnetBuilder_custom(f, data.c, data.is_multi, data.is_reg,
ps=ps, xtra_fc=xtra_fc, xtra_cut=xtra_cut, custom_head=custom_head,
pretrained=pretrained)
return cls(data, models, precompute, **kwargs)
@classmethod
def lsuv_learner(cls, f, data, ps=None, xtra_fc=None, xtra_cut=0, custom_head=None, precompute=False,
needed_std=1.0, std_tol=0.1, max_attempts=10, do_orthonorm=False, **kwargs):
models = ConvnetBuilder(f, data.c, data.is_multi, data.is_reg,
ps=ps, xtra_fc=xtra_fc, xtra_cut=xtra_cut, custom_head=custom_head, pretrained=False)
convlearn = cls(data, models, precompute, **kwargs)
convlearn.lsuv_init()
return convlearn
@property
def model(self):
return self.models.fc_model if self.precompute else self.models.model
def half(self):
if self.fp16: return
self.fp16 = True
if type(self.model) != FP16: self.models.model = FP16(self.model)
if not isinstance(self.models.fc_model, FP16): self.models.fc_model = FP16(self.models.fc_model)
def float(self):
if not self.fp16: return
self.fp16 = False
if type(self.models.model) == FP16: self.models.model = self.model.module.float()
if type(self.models.fc_model) == FP16: self.models.fc_model = self.models.fc_model.module.float()
@property
def data(self):
return self.fc_data if self.precompute else self.data_
def create_empty_bcolz(self, n, name):
return bcolz.carray(np.zeros((0, n), np.float32), chunklen=1, mode='w', rootdir=name)
def set_data(self, data, precompute=False):
super().set_data(data)
if precompute:
self.unfreeze()
self.save_fc1()
self.freeze()
self.precompute = True
else:
self.freeze()
def get_layer_groups(self):
return self.models.get_layer_groups(self.precompute)
def summary(self):
precompute = self.precompute
self.precompute = False
res = super().summary()
self.precompute = precompute
return res
def get_activations(self, force=False):
tmpl = f'_{self.models.name}_{self.data.sz}.bc'
# TODO: Somehow check that directory names haven't changed (e.g. added test set)
names = [os.path.join(self.tmp_path, p + tmpl) for p in ('x_act', 'x_act_val', 'x_act_test')]
if os.path.exists(names[0]) and not force:
self.activations = [bcolz.open(p) for p in names]
else:
self.activations = [self.create_empty_bcolz(self.models.nf, n) for n in names]
def save_fc1(self):
self.get_activations()
act, val_act, test_act = self.activations
m = self.models.top_model
if len(self.activations[0]) != len(self.data.trn_ds):
predict_to_bcolz(m, self.data.fix_dl, act)
if len(self.activations[1]) != len(self.data.val_ds):
predict_to_bcolz(m, self.data.val_dl, val_act)
if self.data.test_dl and (len(self.activations[2]) != len(self.data.test_ds)):
if self.data.test_dl: predict_to_bcolz(m, self.data.test_dl, test_act)
self.fc_data = ImageClassifierData.from_arrays(self.data.path,
(act, self.data.trn_y), (val_act, self.data.val_y), self.data.bs,
classes=self.data.classes,
test=test_act if self.data.test_dl else None, num_workers=8)
def freeze(self):
self.freeze_to(-1)
def unfreeze(self):
self.freeze_to(0)
self.precompute = False
def predict_array(self, arr):
precompute = self.precompute
self.precompute = False
pred = super().predict_array(arr)
self.precompute = precompute
return pred
#=======================================================================================================================
sz = 512 #image size
bs = 8 #batch size
md = get_data(sz,bs)
learner = ConvLearner.pretrained(arch, md, ps=0.2) #dropout 50%
learner.opt_fn = optim.Adam
learner.clip = 1.0 #gradient clipping
learner.crit = FocalLoss()
#learner.crit = f2_loss
learner.metrics = [precision, recall, f1]
print(learner.summary)
#learner.lr_find()
#learner.sched.plot()
#plt.show()
tb_logger = TensorboardLogger(learner.model, md, "inres_512_val3", metrics_names=["precision", 'recall', 'f1'])
lr = 1e-3
lrs=np.array([lr/10,lr/3,lr])
#learner.fit(lr,1, best_save_name='inres_512_0.3', callbacks=[tb_logger])
learner.unfreeze()
#learner.load('wrn_512_3.3')
#learner.fit(lrs/4,4,cycle_len=2,use_clr=(10,20),best_save_name='inres_512_1.3', callbacks=[tb_logger])
#learner.fit(lrs/4,2,cycle_len=4,use_clr=(10,20), best_save_name='inres_512_2.3', callbacks=[tb_logger])
#learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='inres_512_3.3', callbacks=[tb_logger])
# learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='wrn_512_4.3_best', callbacks=[tb_logger] )
# learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='wrn_512_5.3_best', callbacks=[tb_logger])
#learner.save('inres_512_unbalanced_grn+')
learner.load('inres_512_unbalanced_grn+')
#learner.load('wrn_512_balanced')
learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='inres_512_4.3_best_unbalanced_grn+', callbacks=[tb_logger] )
learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='inres_512_5.3_best_unbalanced_grn+', callbacks=[tb_logger])
learner.save('inres_512_unbalanced_grn+_focalgamma1')
# swa
#learner.fit(lrs/160,1,cycle_len=8,use_clr=(5,20), best_save_name='wrn_512_4', callbacks=[tb_logger])
#learner.load('Res34_512_grn4-swa')
#learner.fit(lrs/16, n_cycle=4, cycle_len=4,use_clr=(5,20), best_save_name='Res34_512_grn4', use_swa=True, swa_start=1, swa_eval_freq=5,callbacks=[tb_logger])
#learner.load('Res34_512_grn4-swa')
#======================================================================================================================
# Validation
#=======================================================================================================================
def sigmoid_np(x):
return 1.0/(1.0 + np.exp(-x))
preds,y = learner.TTA(n_aug=16)
preds = np.stack(preds, axis=-1)
preds = sigmoid_np(preds)
pred = preds.max(axis=-1)
def F1_soft(preds,targs,th=0.5,d=50.0):
preds = sigmoid_np(d*(preds - th))
targs = targs.astype(np.float)
score = 2.0*(preds*targs).sum(axis=0)/((preds+targs).sum(axis=0) + 1e-6)
return score
def fit_val(x,y):
params = 0.5*np.ones(len(name_label_dict))
wd = 1e-5
error = lambda p: np.concatenate((F1_soft(x,y,p) - 1.0,
wd*(p - 0.5)), axis=None)
p, success = opt.leastsq(error, params)
return p
th = fit_val(pred,y)
th[th<0.1] = 0.1
print('Thresholds: ',th)
print('F1 macro: ',f1_score(y, pred>th, average='macro'))
print('F1 macro (th = 0.5): ',f1_score(y, pred>0.5, average='macro'))
print('F1 micro: ',f1_score(y, pred>th, average='micro'))
print('Fractions: ',(pred > th).mean(axis=0))
print('Fractions (true): ',(y > th).mean(axis=0))
#=======================================================================================================================
# Submission
#=======================================================================================================================
preds_t,y_t = learner.TTA(n_aug=16,is_test=True)
preds_t = np.stack(preds_t, axis=-1)
preds_t = sigmoid_np(preds_t)
pred_t = preds_t.max(axis=-1) #max works better for F1 macro score
def save_pred(pred, th=0.5, fname='protein_classification.csv'):
pred_list = []
for line in pred:
s = ' '.join(list([str(i) for i in np.nonzero(line > th)[0]]))
pred_list.append(s)
sample_df = pd.read_csv(SAMPLE)
sample_list = list(sample_df.Id)
pred_dic = dict((key, value) for (key, value)
in zip(learner.data.test_ds.fnames, pred_list))
pred_list_cor = [pred_dic[id] for id in sample_list]
df = | pd.DataFrame({'Id': sample_list, 'Predicted': pred_list_cor}) | pandas.DataFrame |
import sqlite3
import pandas as pd
conn = sqlite3.connect('rpg_db.sqlite3')
cur = conn.cursor()
# How many total Characters are there?
query1 = """
SELECT COUNT(character_id)
FROM charactercreator_character cc
"""
cur.execute(query1)
result_list = cur.fetchall()
cols = [ii[0] for ii in cur.description]
df1 = pd.DataFrame(result_list, columns=cols)
my_conn1 = sqlite3.connect("my_db1.sqlite")
df1.to_sql('my_table1', my_conn1, index=False, if_exists='replace')
# How many of each specific subclass?
query2 = """
SELECT COUNT(DISTINCT cf.character_ptr_id) AS TotalFighter,
COUNT(DISTINCT cc.character_ptr_id) AS TotalCleric,
COUNT(DISTINCT cm.character_ptr_id) AS TotalMage,
COUNT(DISTINCT cn.mage_ptr_id) AS TotalNecromancer,
COUNT(DISTINCT ct.character_ptr_id) AS TotalThief
FROM charactercreator_fighter cf, charactercreator_cleric cc,
charactercreator_mage cm, charactercreator_necromancer cn,
charactercreator_thief ct
"""
df2 = pd.read_sql(query2, conn)
my_conn2 = sqlite3.connect("my_db2.sqlite")
df2.to_sql('my_table2', my_conn2, index=False, if_exists='replace')
# How many total Items?
query3 = """
SELECT COUNT(item_id) AS ItemTotal
FROM armory_item ai
"""
df3 = pd.read_sql(quer3, conn)
my_conn3 = sqlite3.connect("my_db3.sqlite")
df3.to_sql('my_table3', my_conn3, index=False, if_exists='replace')
# How many of the Items are weapons? How many are not?
query4 = """
SELECT COUNT(DISTINCT aw.item_ptr_id) AS TotalWeapons,
COUNT(DISTINCT ai.item_id) - COUNT(DISTINCT aw.item_ptr_id) AS TotalNotWeapons
FROM armory_item ai, armory_weapon aw
"""
df4 = pd.read_sql(quer4, conn)
my_conn4 = sqlite3.connect("my_db4.sqlite")
df4.to_sql('my_table', my_conn4, index=False, if_exists='replace')
# How many Items does each character have? (Return first 20 rows)
query5 = """
SELECT character_id, COUNT(item_id) AS ItemsTotal
FROM charactercreator_character_inventory cci
GROUP BY character_id
LIMIT 20
"""
df5 = pd.read_sql(quer5, conn)
my_conn5 = sqlite3.connect("my_db5.sqlite")
df5.to_sql('my_table5', my_conn5, index=False, if_exists='replace')
# How many Weapons does each character have? (Return first 20 rows)
query6 = """
SELECT cci.character_id, COUNT(aw.item_ptr_id) AS Weapons
FROM charactercreator_character_inventory cci
LEFT JOIN armory_weapon aw
ON cci.item_id = aw.item_ptr_id
GROUP BY cci.character_id
LIMIT 20
"""
df6 = | pd.read_sql(quer6, conn) | pandas.read_sql |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
def test_expand_months_calendardays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-01 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-02 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-08 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-09 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-15 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-16 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-17 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-22 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-23 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-29 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-30 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df, trade_days=False)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
class TestPortfolio(DataFrameTest):
def test_portfolio_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 2),
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
p = dero.pandas.portfolio(self.df, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
def test_portfolio_with_nan_and_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', nan, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1), #changed from 2 to 1 when updated nan handling
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
indf = self.df.copy()
indf.loc[0, 'RET'] = nan
p = dero.pandas.portfolio(indf, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
class TestConvertSASDateToPandasDate:
df_sasdate = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
df_sasdate_nan = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', nan),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
def test_convert(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate['datadate']))
assert_frame_equal(expect_df, converted)
def test_convert_nan(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('NaT'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate_nan['datadate']))
assert_frame_equal(expect_df, converted)
class TestMapWindows(DataFrameTest):
times = [
[-4, -2, 0],
[-3, 1, 2],
[4, 5, 6],
[0, 1, 2],
[-1, 0, 1]
]
df_period_str = pd.DataFrame([
(10516, '1/1/2000', 1.01),
(10516, '1/2/2000', 1.02),
(10516, '1/3/2000', 1.03),
(10516, '1/4/2000', 1.04),
(10516, '1/5/2000', 1.05),
(10516, '1/6/2000', 1.06),
(10516, '1/7/2000', 1.07),
(10516, '1/8/2000', 1.08),
(10517, '1/1/2000', 1.09),
(10517, '1/2/2000', 1.10),
(10517, '1/3/2000', 1.11),
(10517, '1/4/2000', 1.12),
(10517, '1/5/2000', 1.05),
(10517, '1/6/2000', 1.06),
(10517, '1/7/2000', 1.07),
(10517, '1/8/2000', 1.08),
], columns = ['PERMNO','Date', 'RET'])
df_period = df_period_str.copy()
df_period['Date'] = pd.to_datetime(df_period['Date'])
expect_dfs = [
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 2),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 2),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 1),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 1),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__'])
]
expect_df_first = pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 1),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 1),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 1),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 1),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 1),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 1),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 1),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 1),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__'])
def run_for_each_time(func):
"""
Decorator that can be applied to any function whose args are (self, time, expect_df) which runs the function
for each time in self.times and picks the appropriate matching expect_df
"""
def run(self):
for t, time in enumerate(self.times):
func(self, time, self.expect_dfs[t])
return run
def test_method_first(self):
result = dero.pandas._map_windows(self.df_period, self.times[0], method='first',
periodvar='Date', byvars=['PERMNO'])
assert_frame_equal(result, self.expect_df_first)
@run_for_each_time
def test_method_between(self, time, expect_df):
result = dero.pandas._map_windows(self.df_period, time, method='between',
periodvar='Date', byvars=['PERMNO'])
assert_frame_equal(result, expect_df)
class TestLeftMergeLatest(DataFrameTest):
def test_left_merge_latest(self):
expect_df = pd.DataFrame(data = [
('001076', Timestamp('1995-03-01 00:00:00'), Timestamp('1995-02-01 00:00:00')),
('001076', Timestamp('1995-04-01 00:00:00'), Timestamp('1995-03-02 00:00:00')),
('001722', Timestamp('2012-01-01 00:00:00'), Timestamp('2011-11-01 00:00:00')),
('001722', Timestamp('2012-07-01 00:00:00'), Timestamp('2011-11-01 00:00:00')),
('001722', numpy.timedelta64('NaT','ns'), numpy.timedelta64('NaT','ns')),
(numpy.datetime64('NaT'), numpy.datetime64('2012-01-01T00:00:00.000000000'), numpy.datetime64('NaT')),
], columns = ['GVKEY', 'Date', 'Date_y'])
lm = dero.pandas.left_merge_latest(self.df_gvkey_str, self.df_gvkey_str2, on='GVKEY')
lm_low_mem = dero.pandas.left_merge_latest(self.df_gvkey_str, self.df_gvkey_str2, on='GVKEY', low_memory=True)
lm_sql = dero.pandas.left_merge_latest(self.df_gvkey_str, self.df_gvkey_str2,
on='GVKEY', backend='sql')
assert_frame_equal(expect_df, lm, check_dtype=False)
assert_frame_equal(expect_df.iloc[:-1], lm_low_mem, check_dtype=False)
assert_frame_equal(expect_df, lm_sql, check_dtype=False)
class TestVarChangeByGroups(DataFrameTest):
def test_multi_byvar_single_var(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, nan),
(10516, 'a', '1/2/2000', 1.02, 0.010000000000000009),
(10516, 'a', '1/3/2000', 1.03, 0.010000000000000009),
(10516, 'a', '1/4/2000', 1.04, 0.010000000000000009),
(10516, 'b', '1/1/2000', 1.05, nan),
(10516, 'b', '1/2/2000', 1.06, 0.010000000000000009),
(10516, 'b', '1/3/2000', 1.07, 0.010000000000000009),
(10516, 'b', '1/4/2000', 1.08, 0.010000000000000009),
(10517, 'a', '1/1/2000', 1.09, nan),
(10517, 'a', '1/2/2000', 1.1, 0.010000000000000009),
(10517, 'a', '1/3/2000', 1.11, 0.010000000000000009),
(10517, 'a', '1/4/2000', 1.12, 0.010000000000000009),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'RET_change'])
vc = dero.pandas.var_change_by_groups(self.df, 'RET', ['PERMNO','byvar'])
assert_frame_equal(expect_df, vc)
def test_multi_byvar_multi_var(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, nan, nan),
(10516, 'a', '1/2/2000', 1.02, 1, 0.010000000000000009, 1.0),
(10516, 'a', '1/3/2000', 1.03, 1, 0.010000000000000009, 0.0),
(10516, 'a', '1/4/2000', 1.04, 0, 0.010000000000000009, -1.0),
(10516, 'b', '1/1/2000', 1.05, 1, nan, nan),
(10516, 'b', '1/2/2000', 1.06, 1, 0.010000000000000009, 0.0),
(10516, 'b', '1/3/2000', 1.07, 1, 0.010000000000000009, 0.0),
(10516, 'b', '1/4/2000', 1.08, 1, 0.010000000000000009, 0.0),
(10517, 'a', '1/1/2000', 1.09, 0, nan, nan),
(10517, 'a', '1/2/2000', 1.1, 0, 0.010000000000000009, 0.0),
(10517, 'a', '1/3/2000', 1.11, 0, 0.010000000000000009, 0.0),
(10517, 'a', '1/4/2000', 1.12, 1, 0.010000000000000009, 1.0),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight',
'RET_change', 'weight_change'])
vc = dero.pandas.var_change_by_groups(self.df_weight, ['RET','weight'], ['PERMNO','byvar'])
assert_frame_equal(expect_df, vc)
class TestFillExcludedRows(DataFrameTest):
expect_df_nofill = pd.DataFrame(data = [
('001076', Timestamp('1995-03-01 00:00:00')),
('001076', Timestamp('1995-04-01 00:00:00')),
('001076', Timestamp('2012-01-01 00:00:00')),
('001076', Timestamp('2012-07-01 00:00:00')),
('001722', Timestamp('1995-03-01 00:00:00')),
('001722', Timestamp('1995-04-01 00:00:00')),
('001722', Timestamp('2012-01-01 00:00:00')),
('001722', Timestamp('2012-07-01 00:00:00')),
], columns = ['GVKEY', 'Date'])
def test_no_fillvars_str_byvars(self):
result = dero.pandas.fill_excluded_rows(self.df_gvkey_str, ['GVKEY','Date'])
assert_frame_equal(self.expect_df_nofill, result)
def test_no_fillvars_series_byvars(self):
result = dero.pandas.fill_excluded_rows(self.df_gvkey_str, [self.df_gvkey_str['GVKEY'],'Date'])
assert_frame_equal(self.expect_df_nofill, result)
def test_fillvars(self):
var_df = self.df_gvkey_str.copy()
var_df['var'] = 1
expect_df = pd.DataFrame(data = [
('001076', Timestamp('1995-03-01 00:00:00'), 1.0),
('001076', Timestamp('1995-04-01 00:00:00'), 1.0),
('001076', Timestamp('2012-01-01 00:00:00'), 0.0),
('001076', Timestamp('2012-07-01 00:00:00'), 0.0),
('001722', Timestamp('1995-03-01 00:00:00'), 0.0),
('001722', Timestamp('1995-04-01 00:00:00'), 0.0),
('001722', Timestamp('2012-01-01 00:00:00'), 1.0),
('001722', | Timestamp('2012-07-01 00:00:00') | pandas.Timestamp |
'''
https://note.youdao.com/share/?id=50ade2586b4ccbfc5da4c5d6199db863&type=note#/
标题:Python 爬取淘宝商品数据挖掘分析实战
项目内容:
本案例选择>> 商品类目:沙发;
筛选条件:天猫、销量从高到低、价格500元以上;
数量:共100页 4400个商品。
分析目的:
1. 对商品标题进行文本分析 词云可视化
2. 不同关键词word对应的sales的统计分析
3. 商品的价格分布情况分析
4. 商品的销量分布情况分析
5. 不同价格区间的商品的平均销量分布
6. 商品价格对销量的影响分析
7. 商品价格对销售额的影响分析
8. 不同省份或城市的商品数量分布
9. 不同省份的商品平均销量分布
注:本项目仅以以上几项分析为例。
项目步骤:
1. 数据采集:Python爬取淘宝网商品数据
2. 对数据进行清洗和处理
3. 文本分析:jieba分词、wordcloud可视化
4. 数据柱形图可视化 barh
5. 数据直方图可视化 hist
6. 数据散点图可视化 scatter
7. 数据回归分析可视化 regplot
工具&模块:
工具:本案例使用的代码编辑工具 Anaconda的Spyder
模块:requests、retrying、jieba、missingno、wordcloud、imread、matplotlib、seaborn等。
原代码和相关文档 下载链接:https://pan.baidu.com/s/1nwEx949 密码:<PASSWORD>
'''
'''
一、爬取数据:
说明:淘宝商品页为JSON格式 这里使用正则表达式进行解析;
因淘宝网是反爬虫的,虽然使用多线程、修改headers参数,但仍然不能保证每次100%爬取,
所以,我增加了循环爬取,每次循环爬取未爬取成功的页 直至所有页爬取成功才停止。
代码如下:
'''
import re
import time
import requests
import pandas as pd
from retrying import retry
from concurrent.futures import ThreadPoolExecutor
start = time.clock() #计时-开始
#plist 为1-100页的URL的编号num
plist = []
for i in range(1,101):
j = 44*(i-1)
plist.append(j)
listno = plist
datatmsp = pd.DataFrame(columns=[])
while True:
@retry(stop_max_attempt_number = 8) #设置最大重试次数
def network_programming(num):
url='https://s.taobao.com/search?q=%E6%B2%99%E5%8F%91&imgfile= \
&js=1&stats_click=search_radio_all%3A1&initiative_id=staobaoz_ \
20180207&ie=utf8&sort=sale-desc&style=list&fs=1&filter_tianmao \
=tmall&filter=reserve_price%5B500%2C%5D&bcoffset=0& \
p4ppushleft=%2C44&s=' + str(num)
web = requests.get(url, headers=headers)
web.encoding = 'utf-8'
return web
# 多线程
def multithreading():
number = listno #每次爬取未爬取成功的页
event = []
with ThreadPoolExecutor(max_workers=10) as executor:
for result in executor.map(network_programming,
number, chunksize=10):
event.append(result)
return event
# 隐藏:修改headers参数
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) \
AppleWebKit/537.36(KHTML, like Gecko) \
Chrome/55.0.2883.87 Safari/537.36'}
listpg = []
event = multithreading()
for i in event:
json = re.findall('"auctions":(.*?),"recommendAuctions"', i.text)
if len(json):
table = pd.read_json(json[0])
datatmsp = | pd.concat([datatmsp,table],axis=0,ignore_index=True) | pandas.concat |
from experiments.utils import save_to_HDF5, update_experiment_run_log
from radcad import Model, Simulation, Experiment
from radcad.engine import Engine, Backend
from models.system_model_v3.model.partial_state_update_blocks import partial_state_update_blocks
from models.system_model_v3.model.params.init import params
from models.system_model_v3.model.state_variables.init import state_variables
from models.system_model_v3.model.params.init import eth_price_df
import logging
import datetime
import subprocess
import time
import os
import dill
import pandas as pd
import pprint
# Set according to environment
os.environ['NUMEXPR_MAX_THREADS'] = '8'
# Get experiment details
hash = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).strip().decode("utf-8")
now = datetime.datetime.now()
# Set the number of simulation timesteps, with a maximum of `len(debt_market_df) - 1`
#SIMULATION_TIMESTEPS = 24 * 30 * 6 # len(eth_price_df) - 1
#MONTE_CARLO_RUNS = 1
def configure_logging(output_directory, date):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(filename=f'{output_directory}/{date}.log')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def run_experiment(results_id=None, output_directory=None, experiment_metrics=None, timesteps=24*30*12,
runs=1, params=params, initial_state=state_variables,
state_update_blocks=partial_state_update_blocks,
save_file=False, save_logs=False):
if save_logs:
configure_logging(output_directory + '/logs', now)
passed = False
experiment_time = 0.0
exceptions = []
try:
start = time.time()
# Run experiment
logging.info("Starting experiment")
logging.debug(experiment_metrics)
logging.info(pprint.pformat(params))
# Run cadCAD simulation
model = Model(
initial_state=state_variables,
state_update_blocks=partial_state_update_blocks,
params=params
)
simulation = Simulation(model=model, timesteps=timesteps, runs=runs)
experiment = Experiment([simulation])
experiment.engine = Engine(
backend=Backend.PATHOS,
raise_exceptions=False,
deepcopy=False,
processes=8,
drop_substeps=True,
)
if save_file:
experiment.after_experiment = lambda experiment: save_to_HDF5(experiment,
output_directory + '/experiment_results.hdf5', results_id, now)
experiment.run()
exceptions = pd.DataFrame(experiment.exceptions)
logging.debug(exceptions)
#print(exceptions)
passed = True
end = time.time()
experiment_time = end - start
logging.info(f"Experiment completed in {experiment_time} seconds")
#update_experiment_run_log(output_directory, passed, results_id, hash, exceptions, experiment_metrics, experiment_time, now)
return | pd.DataFrame(experiment.results) | pandas.DataFrame |
import sys
import pandas as pd
def optimize_dataframe(df, down_int='integer'):
# down_int can also be 'unsigned'
converted_df = | pd.DataFrame() | pandas.DataFrame |
from .Forg import forg
import time
import pandas as pd
#from statsmodels.iolib.tableformatting import (gen_fmt, fmt_2)
from itertools import zip_longest
from .TableFormat import gen_fmt, fmt_2
from statsmodels.iolib.table import SimpleTable
from statsmodels.compat.python import lrange, lmap, lzip
from scipy.stats import t
class OLSFixed(object):
def __init(self):
self.model = None
self.params = None
self.df = None
self.bse = None
self.tvalues = None
self.pvalues = None
self.summary = None
self.covar_matrix = None
self.fittedvalues = None
self.rsquared = None
self.rsquared_adj = None
self.full_rsquared = None
self.full_rsquared_adj = None
self.fvalue = None
self.f_pvalue = None
self.full_fvalue = None
self.full_f_pvalue = None
self.variance_matrix = None
self.resid = None
self.nobs = None
self.yname = None
self.xname = None
self.resid_std_err = None
self.Covariance_Type = None
self.cluster_method = None
self.demeaned_df = None
self.data_df = None
self.f_df_full = None
self.f_df_proj = None
self.general_table = None
self.std_err_name = None
self.old_x = None
self.consist_col = None
self.category_col = None
self.out_col = None
self.treatment_input = None
# 2021/01/07 - iv related test
self.endog_x = None
self.exog_x = None
self.orignal_exog_x = None
self.cluster = None
self.iv = None
self.f_stat_first_stage = None
self.f_stat_first_stage_pval = None
self.x_second_stage = None
self.x_first_stage = None
def conf_int(self, conf=0.05):
tmpdf = pd.DataFrame(columns=[0, 1], index=list(self.params.index))
tmpdf[0] = self.params - t.ppf(1 - conf / 2, self.df) * self.bse
tmpdf[1] = self.params + t.ppf(1 - conf / 2, self.df) * self.bse
return tmpdf
def summary(self, yname=None, xname=None, title=0, alpha=.05):
if title == 0:
title = 'High Dimensional Fixed Effect Regression Results'
if type(xname) == str: xname = [xname]
if type(yname) == str: yname = [yname]
if xname is not None and len(xname) != len(self.xname):
# GH 2298
raise ValueError('User supplied xnames must have the same number of '
'entries as the number of model parameters '
'({0})'.format(len(self.xname)))
if yname is not None and len(yname) != len(self.yname):
raise ValueError('User supplied ynames must have the same number of '
'entries as the number of model dependent variables '
'({0})'.format(len(self.yname)))
if xname is None:
xname = self.xname
if yname is None:
yname = self.yname
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
nobs = int(self.nobs)
df_model = self.df
resid_std_err = forg(self.resid_std_err, 4)
Covariance_Type = self.Covariance_Type
cluster_method = self.cluster_method
gen_left = [('Dep. Variable:', yname),
('No. Observations:', [nobs]), # TODO: What happens with multiple names?
('DoF of residual:', [df_model]),
('Residual std err:', [resid_std_err]),
('Covariance Type:', [Covariance_Type]),
('Cluster Method:', [cluster_method])
]
r_squared = forg(self.rsquared, 4)
rsquared_adj = forg(self.rsquared_adj, 4)
full_rsquared = forg(self.full_rsquared, 4)
full_rsquared_adj = forg(self.full_rsquared_adj, 4)
fvalue = forg(self.fvalue, 4)
f_pvalue = forg(self.f_pvalue, 4)
full_fvalue = forg(self.full_fvalue, 4)
full_f_pvalue = forg(self.full_f_pvalue, 4)
# gen_right = [('R-squared(proj model):', [r_squared]),
# ('Adj. R-squared(proj model):', [rsquared_adj]),
# ('R-squared(full model):', [full_rsquared]),
# ('Adj. R-squared(full model):', [full_rsquared_adj]),
# ('F-statistic(proj model):', [fvalue]),
# ('Prob (F-statistic (proj model)):', [f_pvalue]),
# ('DoF of F-test (proj model):', [self.f_df_proj]),
# ('F-statistic(full model):', [full_fvalue]),
# ('Prob (F-statistic (full model)):', [full_f_pvalue]),
# ('DoF of F-test (full model):', [self.f_df_full])
# ]
#2021/09/26
if (self.model =='ivgmm') or (self.model=='iv2sls'):
gen_right = [('R-squared:', [r_squared]),
('Adj. R-squared:', [rsquared_adj]),
('F-statistic:', [fvalue]),
('Prob (F-statistic):', [f_pvalue]),
('DoF of F-test:', [self.f_df_proj]),
#('F-statistic(full model):', [full_fvalue]),
#('Prob (F-statistic (full model)):', [full_f_pvalue]),
#('DoF of F-test (full model):', [self.f_df_full])
]
else:
gen_right = [('R-squared(proj model):', [r_squared]),
('Adj. R-squared(proj model):', [rsquared_adj]),
('R-squared(full model):', [full_rsquared]),
('Adj. R-squared(full model):', [full_rsquared_adj]),
('F-statistic(proj model):', [fvalue]),
('Prob (F-statistic (proj model)):', [f_pvalue]),
('DoF of F-test (proj model):', [self.f_df_proj]),
('F-statistic(full model):', [full_fvalue]),
('Prob (F-statistic (full model)):', [full_f_pvalue]),
('DoF of F-test (full model):', [self.f_df_full])
]
# pad both tables to equal number of rows
if len(gen_right) < len(gen_left):
# fill up with blank lines to same length
gen_right += [(' ', ' ')] * (len(gen_left) - len(gen_right))
elif len(gen_right) > len(gen_left):
# fill up with blank lines to same length, just to keep it symmetric
gen_left += [(' ', ' ')] * (len(gen_right) - len(gen_left))
gen_stubs_left, gen_data_left = zip_longest(*gen_left)
gen_title = title
gen_header = None
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title=gen_title,
txt_fmt=gen_fmt
)
gen_stubs_right, gen_data_right = zip_longest(*gen_right)
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title=gen_title,
txt_fmt=gen_fmt
)
gen_table_left.extend_right(gen_table_right)
self.general_table = gen_table_left
# Parameters part of the summary table
s_alp = alpha / 2
c_alp = 1 - alpha / 2
if Covariance_Type == 'nonrobust':
self.std_err_name = 'nonrobust std err'
elif Covariance_Type == 'robust':
self.std_err_name = 'robust std err'
elif Covariance_Type == 'clustered':
self.std_err_name = 'cluster std err'
else:
self.std_err_name = 'std err'
param_header = ['coef', self.std_err_name,
't',
'P>|t|',
'[' + str(s_alp),
str(c_alp) + ']'] # alp + ' Conf. Interval'
params_stubs = xname
params = self.params.copy()
conf_int = self.conf_int(alpha)
std_err = self.bse.copy()
exog_len = lrange(len(xname))
tstat = self.tvalues.copy()
prob_stat = self.pvalues.copy()
for i in range(len(self.params)):
params[i] = forg(self.params[i], 5)
std_err[i] = forg(self.bse[i], 5)
tstat[i] = forg(self.tvalues[i], 4)
prob_stat[i] = forg(self.pvalues[i], 4)
# Simpletable should be able to handle the formating
params_data = lzip(["%#6.5f" % (params[i]) for i in exog_len],
["%#6.5f" % (std_err[i]) for i in exog_len],
["%#6.4f" % (tstat[i]) for i in exog_len],
["%#6.4f" % (prob_stat[i]) for i in exog_len],
["%#6.4f" % conf_int[0][i] for i in exog_len],
["%#6.4f" % conf_int[1][i] for i in exog_len])
self.parameter_table = SimpleTable(params_data,
param_header,
params_stubs,
title=None,
txt_fmt=fmt_2)
# 2020/01/07 iv part of the summary table
if len(self.endog_x) > 0:
iv_header = ['First-Stage F-stat', 'P > F']
gen_iv = []
for i in self.endog_x:
f_stat_iv_i = forg(self.f_stat_first_stage[self.endog_x.index(i)], 4)
f_stat_iv_pval_i = forg(self.f_stat_first_stage_pval[self.endog_x.index(i)], 4)
endog_list_i = f_stat_iv_i, f_stat_iv_pval_i
gen_iv.append(endog_list_i)
gen_data_iv = gen_iv
gen_stubs_iv = self.endog_x
self.gen_table_iv = SimpleTable(gen_data_iv,
iv_header,
gen_stubs_iv,
title = None,
txt_fmt = fmt_2)
print(self.general_table)
print(self.parameter_table)
if len(self.endog_x) > 0:
print(self.gen_table_iv)
return
def to_excel(self, file=None):
df_tmp = pd.DataFrame(columns=['coef', self.std_err_name, 't', 'p', 'conf_int_lower', 'conf_int_upper'],
index=self.xname)
df_tmp.coef = self.params
df_tmp[self.std_err_name] = self.bse
df_tmp.t = self.tvalues
df_tmp.p = self.pvalues
df_tmp.conf_int_lower = self.conf_int()[0]
df_tmp.conf_int_upper = self.conf_int()[1]
df_tmp2 = pd.DataFrame(
columns=['dep_variable', 'no_obs', 'df_model', 'resid_std_err', 'Covariance_Type', 'cluster_method',
'proj_Rsquared', 'proj_Rsquared_adj', 'full_Rsquared', 'full_Rsquared_adj',
'proj_fvalue', 'proj_f_pvalue', 'full_fvalue', 'full_f_pvalue'])
df_tmp2.dep_variable = self.yname # y不止一个怎么办
df_tmp2.no_obs = self.nobs
df_tmp2.df_model = self.df
df_tmp2.resid_std_err = self.resid_std_err
df_tmp2.Covariance_Type = self.Covariance_Type
df_tmp2.cluster_method = self.cluster_method
df_tmp2.proj_Rsquared = self.rsquared
df_tmp2.proj_Rsquared_adj = self.rsquared_adj
df_tmp2.full_Rsquared = self.full_rsquared
df_tmp2.full_Rsquared_adj = self.full_rsquared_adj
df_tmp2.proj_fvalue = self.fvalue
df_tmp2.proj_f_pvalue = self.f_pvalue
df_tmp2.full_fvalue = self.full_fvalue
df_tmp2.full_f_pvalue = self.full_f_pvalue
if file is None:
file = 'output.xls'
writer = | pd.ExcelWriter(file) | pandas.ExcelWriter |
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
train = pd.read_csv("data/train.csv")
test = pd.read_csv("data/test.csv")
"""
Researching use of additional columns
"""
# Split val and train
X_train, X_val, y_train, y_val = train_test_split(train[["keyword", "location"]],
train["target"],
test_size=0.2, random_state=1)
def do_padding(sentences, tokenizer, maxlen, padding, truncating):
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(sequences, maxlen=maxlen, padding=padding, truncating=truncating)
return padded, sequences
# Convert keywords to array of shape (, 3)
def process_keyword(keywords):
keywords = keywords.str.lower()
keywords = keywords.str.replace("%20", " ")
keywords = np.where(keywords.isna(), "", keywords)
return keywords
# Convert top 30 locations to sparse array of 1/0 flags.
# This isn't a great way to do this. It would be much better to preprocess
# this into e.g. [country, city] per row
def process_location(location):
location = location.to_frame()
location_counts = location.value_counts().rename("counts").reset_index(). \
rename(columns = {"index":"location"})
location_counts["order"] = list(range(1, len(location_counts)+1))
location = location.merge(location_counts, how="left",
left_on = "location", right_on = "location")
locations = np.select([location["order"] <= 30, location["order"].isna()],
[location["location"], "other"],
"missing")
location_dummies = | pd.get_dummies(locations) | pandas.get_dummies |
import addfips
import os
import pandas as pd
import datetime
variables = {
'hash': 'hash',
'date': 'date_stamp',
'dateChecked': 'datetime_checked',
'state': 'us_state_postal',
'fips': 'us_state_fips',
'positive': 'cnt_tested_pos',
'positiveIncrease': 'cnt_tested_pos_new',
'negative': 'cnt_tested_neg',
'negativeIncrease': 'cnt_tested_neg_new',
'pending': 'cnt_tested_pending',
'recovered': 'cnt_recovered',
'death': 'cnt_death',
'deathIncrease': 'cnt_death_new',
'hospitalized': 'cnt_hospitalized',
'hospitalizedIncrease': 'cnt_hospitalized_new',
'hospitalizedCurrently': 'cnt_hospitalized_current',
'hospitalizedCumulative': 'cnt_hospitalized_total',
'inIcuCurrently': 'cnt_icu_current',
'inIcuCumulative': 'cnt_icu_total',
'onVentilatorCurrently': 'cnt_vent_current',
'onVentilatorCumulative': 'cnt_vent_total',
'totalTestResults': 'cnt_tested',
'totalTestResultsIncrease': 'cnt_tested_new'
}
def convertDateTimes(df):
dates = df.filter(like='date')
for (label, content) in dates.iteritems():
df[label] = pd.to_datetime(content).dt.strftime('%Y-%m-%d')
# have found some dates to be 1900-01-01 for death / recovery / hospitalization, we will clean these up
df[label].replace({ '1900-01-01': None},inplace =True)
return df
def convertStateFIPS(name):
af = addfips.AddFIPS()
codes = []
for index, value in name.items():
codes.append(af.get_state_fips(value))
return pd.Series(codes)
def cleanData(data):
df = pd.DataFrame(data)
# Rename the file headers
df.rename(variables, axis="columns", inplace=True)
# Reformat date as ISO 8601 date
df['date_stamp'] = pd.to_datetime(df['date_stamp'], format='%Y%m%d').dt.strftime('%Y-%m-%d')
#on 9/23/2020, CTP replaced actually invalid date times with the string "Invalid DateTime". This removes the string.
df = df.replace({'Invalid DateTime': None})
#starting on 11.5.2020, CTP switched from using 00 as indication of midnight to using 24.
#we will recode hours beginning with 24 to begin with zero and leave the day the same.
df["datetime_checked"] = df["datetime_checked"].str.replace("24:", "00:")
# Reformat datetime to SQL timestamp
df['datetime_checked'] = pd.to_datetime(df['datetime_checked']).dt.strftime('%Y-%m-%d %H:%M:%S')
#df['datetime_checked'] =
# get the 0 padded fips codes
df['us_state_fips'] = convertStateFIPS(df['us_state_postal'])
# convert to integers as these will never be decimals and should be integers in the database
df['cnt_tested_pos'] = df['cnt_tested_pos'].astype(pd.Int32Dtype())
df['cnt_tested_pos_new'] = df['cnt_tested_pos_new'].astype(pd.Int32Dtype())
df['cnt_tested_neg'] = df['cnt_tested_neg'].astype(pd.Int32Dtype())
df['cnt_tested_neg_new'] = df['cnt_tested_neg_new'].astype(pd.Int32Dtype())
df['cnt_tested_pending'] = df['cnt_tested_pending'].astype( | pd.Int32Dtype() | pandas.Int32Dtype |
from http.server import BaseHTTPRequestHandler, HTTPServer
import socketserver
import pickle
import urllib.request
import json
from pprint import pprint
from pandas.io.json import json_normalize
import pandas as pd
from sklearn import preprocessing
from sklearn.preprocessing import PolynomialFeatures
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Ridge
from math import sqrt
import os
import errno
from pymongo import MongoClient
import urllib.parse as urlparse
from influxdb import InfluxDBClient
from pymongo import MongoClient
import pandas as pd
from pandas.io.json import json_normalize
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import TheilSenRegressor
from sklearn.datasets import make_regression
class Terminus(BaseHTTPRequestHandler):
def getAllNodeNames(self,client):
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY=nodename;")
nodeNames_temp = list(queryResult.get_points())
dfnodeNames = pd.DataFrame(nodeNames_temp)
allNodeNames = dfnodeNames[:]["value"]
return allNodeNames
def getNamespaceNames(self,client,node):
nsQuery = client.query("SHOW TAG VALUES FROM uptime WITH KEY=namespace_name WHERE nodename = '"+node+"';")
nsQuery_temp = list(nsQuery.get_points())
dfnsNames = pd.DataFrame(nsQuery_temp)
allnsNames = dfnsNames[:]["value"]
return allnsNames
def getAllPodNames(self,client,node,ns_name):
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY = pod_name WHERE namespace_name = '"+ns_name+"' AND nodename = '"+node+"';")
podNames_temp = list(queryResult.get_points())
dfpodNames = pd.DataFrame(podNames_temp)
if dfpodNames.empty:
return dfpodNames
else:
allpodNames = dfpodNames[:]["value"]
return allpodNames
def getCPUUtilizationNode(self,client, node):
queryResult = client.query('SELECT * FROM "cpu/node_utilization" where nodename = \''+node+'\' AND type=\'node\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/node_utilization'])
return dfcpuUtilization
def getCPUUtilizationPod(self,client, node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "cpu/usage_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/usage_rate'])
return dfcpuUtilization
def getCPUUtilizationPodContainer(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "cpu/usage_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod_container\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/usage_rate'])
return dfcpuUtilization
def prepareCpuUtilization(self,client,node,ns_name, pod_name):
cpuUtilization = self.getCPUUtilizationNode(client,node)
podCpuUtilization = self.getCPUUtilizationPod(client,node,ns_name, pod_name)
containercpuUtilization = self.getCPUUtilizationPodContainer(client,node,ns_name, pod_name)
plt.plot(cpuUtilization.index, cpuUtilization['value'] *1000, 'r', label="node") # plotting t, a separately
plt.plot(podCpuUtilization.index, podCpuUtilization['value'], 'b', label="pod") # plotting t, b separately
plt.plot(containercpuUtilization.index, containercpuUtilization['value'], 'g', label="container") # plotting t, c separately
plt.legend(loc='upper left')
plt.show()
def getMemoryUtilizationNode(self,client,node):
queryResult = client.query('SELECT * FROM "memory/node_utilization" where nodename = \''+node+'\' AND type=\'node\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/node_utilization'])
return dfmemUtilization
def getMemoryUtilizationPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "memory/usage" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/usage'])
return dfmemUtilization
def getMemoryUtilizationPodContainer(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "memory/usage" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod_container\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/usage'])
return dfmemUtilization
def prepareMemoryUtilization(self,client,node,ns_name, pod_name):
memoryUtilization = self.getMemoryUtilizationNode(client,node)
podMemoryUtilization = self.getMemoryUtilizationPod(client,node,ns_name, pod_name)
containerMemoryUtilization = self.getMemoryUtilizationPodContainer(client,node,ns_name, pod_name)
plt.plot(memoryUtilization.index, memoryUtilization['value'], 'r', label="node") # plotting t, a separately
plt.plot(podMemoryUtilization.index, podMemoryUtilization['value'], 'b', label="pod") # plotting t, b separately
plt.plot(containerMemoryUtilization.index, containerMemoryUtilization['value'], 'g', label="container") # plotting t, c separately
plt.legend(loc='upper left')
plt.show()
def getNetworkTxRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_rate'])
return dfmemUtilization
def getNetworkTxPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx'])
return dfmemUtilization
def getNetworkTxErrorsPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_errors" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_errors'])
return dfmemUtilization
def getNetworkTxErrorsRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_errors_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_errors_rate'])
return dfmemUtilization
def prepareNetworkTxRateUtilization(self,client,node,ns_name, pod_name):
podNetworTxRate = self.getNetworkTxRatePod(client,node,ns_name, pod_name)
podNetworTx = self.getNetworkTxPod(client,node,ns_name, pod_name)
podNetworkError = self.getNetworkTxErrorsPod(client,node,ns_name, pod_name)
podNetworkErrorRate = self.getNetworkTxErrorsRatePod(client,node,ns_name, pod_name)
plt.plot(podNetworTxRate.index, podNetworTxRate['value'], 'b') # plotting t, b separately
#plt.plot(podNetworTx.index, podNetworTx['value'], 'g') # plotting t, b separately
#plt.plot(podNetworkError.index, podNetworkError['value'], 'y') # plotting t, b separately
plt.plot(podNetworkErrorRate.index, podNetworkErrorRate['value'], 'r') # plotting t, b separately
plt.show()
def getNetworkRxRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_rate'])
return dfmemUtilization
def getNetworkRxPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx'])
return dfmemUtilization
def getNetworkRxErrorsPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_errors" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_errors'])
return dfmemUtilization
def getNetworkRxErrorsRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_errors_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_errors_rate'])
return dfmemUtilization
def prepareNetworkRxRateUtilization(self,client,node,ns_name, pod_name):
podNetworRxRate = self.getNetworkRxRatePod(client,node,ns_name, pod_name)
podNetworRx = self.getNetworkRxPod(client,node,ns_name, pod_name)
podNetworkError = self.getNetworkRxErrorsPod(client,node,ns_name, pod_name)
podNetworkErrorRate = self.getNetworkRxErrorsRatePod(client,node,ns_name, pod_name)
plt.plot(podNetworRxRate.index, podNetworRxRate['value'], 'b') # plotting t, b separately
#plt.plot(podNetworRx.index, podNetworRx['value'], 'g') # plotting t, b separately
#plt.plot(podNetworkError.index, podNetworkError['value'], 'y') # plotting t, b separately
plt.plot(podNetworkErrorRate.index, podNetworkErrorRate['value'], 'r') # plotting t, b separately
plt.show()
def getRelevantNodeName(self,client,ns_name):
allNodeNames = self.getAllNodeNames(client)
#nsNames = getNamespaceNames(allNodeNames[0])
relevantNodes = []
for node in allNodeNames:
allPodNamesNode = self.getAllPodNames(client,node,'default')
if(not allPodNamesNode.empty):
relevantNodes.append(node)
return relevantNodes
def getNodeResourceUtilizationDf(self,client, nodeName):
Result_node_CPU = client.query("SELECT value from \"cpu/node_utilization\" where nodename = '"+nodeName+"' AND type = 'node' ")
Result_node_MEM = client.query("SELECT value from \"memory/node_utilization\" where nodename = '"+nodeName+"' AND type = 'node' ")
Result_node_CPU_Cores = client.query("SELECT mean(\"value\") FROM \"cpu/node_capacity\" where nodename = '"+nodeName+
"' AND type = 'node' GROUP BY time(1m)")
Result_node_mem_node = client.query("SELECT mean(\"value\")FROM \"memory/node_capacity\" where nodename = '"+
nodeName+"' AND type = 'node' GROUP BY time(1m)")
cpu_points = pd.DataFrame(Result_node_CPU.get_points())
cpu_points['time'] = pd.to_datetime(cpu_points['time'])
cpu_points = cpu_points.set_index('time')
cpu_points.columns = ['node_cpu_util']
mem_points = pd.DataFrame(Result_node_MEM.get_points())
mem_points['time'] = pd.to_datetime(mem_points['time'])
mem_points = mem_points.set_index('time')
mem_points.columns = ['node_mem_util']
cores_points = pd.DataFrame(Result_node_CPU_Cores.get_points())
cores_points['time'] = pd.to_datetime(cores_points['time'])
cores_points = cores_points.set_index('time')
cores_points.columns = ['node_cores']
mem_node_points = pd.DataFrame(Result_node_mem_node.get_points())
mem_node_points['time'] = pd.to_datetime(mem_node_points['time'])
mem_node_points = mem_node_points.set_index('time')
mem_node_points.columns = ['node_mem']
df_node =pd.concat([cpu_points, mem_points,cores_points,mem_node_points], axis=1)
return df_node
def getPodResourceUtilizationDf(self,client, node, ns_name, pod_name):
Result_Pod_CPU_usage = client.query('SELECT value FROM "cpu/usage_rate" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\';')
Result_Pod_MEM_usage = client.query('SELECT value from \"memory/usage\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\';')
Result_Pod_CPU_limit = client.query('SELECT mean(\"value\") FROM "cpu/limit" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
Result_Pod_MEM_limit = client.query('SELECT mean(\"value\") from \"memory/limit\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\' group by time(1m);')
Result_Pod_CPU_requests = client.query('SELECT mean(\"value\") FROM "cpu/request" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
Result_Pod_MEM_requests = client.query('SELECT mean(\"value\") from \"memory/request\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\' group by time(1m);')
cpu_points_usage = pd.DataFrame(Result_Pod_CPU_usage.get_points())
cpu_points_usage['time'] = pd.to_datetime(cpu_points_usage['time'])
cpu_points_usage = cpu_points_usage.set_index('time')
cpu_points_usage.columns = ['pod_cpu_usage']
mem_points_usage = pd.DataFrame(Result_Pod_MEM_usage.get_points())
mem_points_usage['time'] = pd.to_datetime(mem_points_usage['time'])
mem_points_usage = mem_points_usage.set_index('time')
mem_points_usage.columns = ['pod_mem_usage']
cpu_points_limits = pd.DataFrame(Result_Pod_CPU_limit.get_points())
cpu_points_limits['time'] = pd.to_datetime(cpu_points_limits['time'])
cpu_points_limits = cpu_points_limits.set_index('time')
cpu_points_limits.columns = ['pod_cpu_limit']
mem_points_limits = pd.DataFrame(Result_Pod_MEM_limit.get_points())
mem_points_limits['time'] = pd.to_datetime(mem_points_limits['time'])
mem_points_limits = mem_points_limits.set_index('time')
mem_points_limits.columns = ['pod_mem_limit']
cpu_points_request = pd.DataFrame(Result_Pod_CPU_requests.get_points())
cpu_points_request['time'] = pd.to_datetime(cpu_points_request['time'])
cpu_points_request = cpu_points_request.set_index('time')
cpu_points_request.columns = ['pod_cpu_request']
mem_points_request = pd.DataFrame(Result_Pod_MEM_requests.get_points())
mem_points_request['time'] = | pd.to_datetime(mem_points_request['time']) | pandas.to_datetime |
import numpy
import random
from glob import glob
from scipy import interpolate
from scipy.special import softmax
from scipy.stats import ttest_ind
from sklearn.model_selection import KFold
import sys
from scipy.stats import skew, kurtosis
import itertools
import collections
import errno
import os.path as osp
import pickle
import time
import shutil
from itertools import count
from sklearn.metrics import confusion_matrix, f1_score, precision_score, roc_auc_score, recall_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import accuracy_score, classification_report, cohen_kappa_score, roc_curve, precision_recall_curve
from typing import List
from datetime import datetime
import sklearn.metrics as metrics
from mlxtend.plotting import plot_confusion_matrix as mlxtend_plot_confusion_matrix
from mlxtend.evaluate import confusion_matrix as mlxtend_confusion_matrix
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from inspect import signature
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from pathlib import Path
def get_project_root() -> Path:
return Path(__file__).parent.parent
def one_hot_array(label_array: np.array, total_classes):
assert len(label_array.shape) == 1, print("label_array must be 1D array")
tmp = np.zeros(shape=(label_array.shape[0], total_classes), dtype=np.float)
tmp[np.arange(label_array.size), label_array] = 1.0
return tmp
def load_tf_model(model_path=''):
import tensorflow as tf
with tf.Session() as sess:
loaded_saver = tf.train.import_meta_graph(model_path)
loaded_saver.restore(sess, tf.train.latest_checkpoint('/'))
print(sess.run('w1:0'))
return sess
def get_all_folders_include_sub(path):
folders = [x[0] for x in os.walk(path)]
return folders
def get_char_split_symbol():
if sys.platform == "win32":
sp = "\\"
else:
sp = "/"
return sp
def get_all_files_include_sub(path, file_type):
files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if file_type in file[-len(file_type):]:
files.append(os.path.join(os.path.abspath(r), file))
return files
def plot_train_history(history, title):
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title(title)
plt.legend()
plt.show()
def standardize_df_given_feature(df, features=[], scaler=None, df_name="", simple_method=True):
assert len(features) > 0, print("feature length must greater than 0")
scaler_dic = {}
# check if the df contains nan or inf
if simple_method:
print("pre-processing dataset frame using simple method")
df[features] = df[features].replace([np.inf, -np.inf], np.nan)
df[features] = df[features].fillna(df[features].mean())
# df[~np.isfinite(df)] = np.nan
nan = df[df.isnull().any(axis=1)]
if nan.shape[0] > 0:
print("df contains nan")
inf = df[df.eq(np.inf).any(axis=1)]
if inf.shape[0] > 0:
print("df contains inf")
else:
print("pre-processing dataset frame using comprehensive method")
for feature in features:
# print("quality check on %s for column name: % s" % (df_name, feature))
if df[feature].isnull().values.any():
df[feature] = df[feature].replace(np.nan,
df[~df[feature].isin([np.nan, np.inf, -np.inf])][feature].mean())
if df[feature].isin([np.inf]).values.any():
df[feature] = df[feature].replace(np.inf,
df[~df[feature].isin([np.nan, np.inf, -np.inf])][feature].max())
if df[feature].isin([-np.inf]).values.any():
df[feature] = df[feature].replace(-np.inf,
df[~df[feature].isin([np.nan, np.inf, -np.inf])][feature].min())
df[feature] = df[feature].replace([np.nan, np.inf, -np.inf], 0.0)
if scaler is None:
scaler = StandardScaler()
print(' Not given scaler start training scaler now!')
scaler.fit(df[features])
print('start transform dataset frame :%s' % df_name)
df[features] = scaler.transform(df[features])
return scaler
def extract_x_y_new(df, seq_len, mesaid, label_posi='mid', feature=""):
df_x = df[df["mesaid"] == mesaid][[feature, "stages"]].copy()
y = df_x["stages"].astype(int).values # get the ground truth for y
del df_x["stages"]
if label_posi == 'mid':
if seq_len % 2 == 0: # even win_len
fw_end = np.ceil(seq_len / 2)
bw_end = np.floor(seq_len / 2)
else:
fw_end = np.round(seq_len / 2)
bw_end = np.round(seq_len / 2)
for s in range(1, fw_end):
df_x["shift_%d" % s] = df_x[feature].shift(s)
# as half of the sliding window has reversed order (these df columns)
columns = df_x.columns.tolist()
columns = columns[::-1] # or data_frame = data_frame.sort_index(ascending=True, axis=0)
df_x = df_x[columns]
for s in range(1, bw_end):
df_x["shift_-%d" % s] = df_x[feature].shift(-s)
else:
for s in range(1, seq_len):
df_x["shift_%d" % s] = df_x["activity"].shift(s)
x = df_x.fillna(-1).values
return x, y
def extract_x_y(df, seq_len, pid, label_posi='mid', feature="", id_col_name="mesaid", gt_col_name="stages"):
df_x = df[df[id_col_name] == pid][[feature, gt_col_name]].copy()
y = df_x[gt_col_name].astype(int).values # get the ground truth for y
del df_x[gt_col_name]
if label_posi == 'mid':
for s in range(1, round(seq_len / 2) + 1):
df_x["shift_%d" % s] = df_x[feature].shift(s)
# reverse columns
columns = df_x.columns.tolist()
columns = columns[::-1] # or data_frame = data_frame.sort_index(ascending=True, axis=0)
df_x = df_x[columns]
for s in range(1, round(seq_len / 2) + 1):
df_x["shift_-%d" % s] = df_x[feature].shift(-s)
else:
for s in range(1, seq_len + 1):
df_x["shift_%d" % s] = df_x["activity"].shift(s)
x = df_x.fillna(-1).values
return x, y
def get_data(df, seq_len, feature_list, pid_col_name='mesaid', gt_col_name="stages"):
# build dataset by participant ID, extract dataset using sliding window method.
final_x = []
# loop all mesa_ids
for feature in tqdm(feature_list):
pids = df[pid_col_name].unique()
x, y = extract_x_y(df, seq_len, pids[0], label_posi='mid', feature=feature, id_col_name=pid_col_name,
gt_col_name=gt_col_name)
if len(pids) > 1:
for mid in pids[1:]:
x_tmp, y_tmp = extract_x_y(df, seq_len, mid, label_posi='mid', feature=feature,
id_col_name=pid_col_name,
gt_col_name=gt_col_name)
x = np.concatenate((x, x_tmp))
y = np.concatenate((y, y_tmp))
x = np.expand_dims(x, -1)
final_x.append(x)
combined_x = np.concatenate(final_x, axis=-1)
return combined_x, y
def standardize_features_to_array(df, scalers=None):
"""
This function will scale the dataset set use SK learn scaler function however we recommend do not pass a feature list
to the function as it may difficult to save the scaler list into H5py file
# fixme: need complete the code for the feature list, need return a scaler that was train from training dataset
# fixme: can be used for test dataset.
:param df:
:param features:
:param scaler:
:return:
"""
df = df.apply(lambda x: x.replace([np.nan], x[~x.isin([np.nan, np.inf, -np.inf])].mean()), axis=0)
df = df.apply(lambda x: x.replace([np.inf], x[~x.isin([np.nan, np.inf, -np.inf])].max()), axis=0)
df = df.apply(lambda x: x.replace([-np.inf], x[~x.isin([np.nan, np.inf, -np.inf])].min()), axis=0)
df = df.apply(lambda x: x.replace([np.nan, np.inf, -np.inf], 0.0), axis=0)
if scalers is not None:
df = scalers.transform(df)
else:
scaler = StandardScaler()
scaler.fit(df)
df = scaler.transform(df)
# the final check to replace any abnormal values
return df, scaler
def load_scaler(path, file_type=".pkl"):
scaler = None
if file_type == ".pkl":
with open(path, "rb") as f:
scaler = pickle.load(f)
return scaler
def load_h5_df_train_test_dataset(path):
""" this is only for the mesa dataset!!!!!"""
store = pd.HDFStore(path, 'r')
dftrain = store["train"]
dftest = store["test"]
feature_name = store["featnames"].values.tolist()
if type(feature_name[0]) is list:
feature_name = list(itertools.chain.from_iterable(feature_name))
store.close()
return dftrain, dftest, feature_name
def get_csv_files(data_path):
# Remove non-mat files, and perform ascending sort
print("searching csv files ...")
allfiles = os.listdir(data_path)
csv_files = []
for idx, f in enumerate(allfiles):
if ".csv" in f:
csv_files.append(os.path.join(data_path, f))
print("total found {} files".format(len(csv_files)))
csv_files.sort()
return csv_files
# TODO add argument that add the modality name in column name
def get_statistic_feature(df, column_name, windows_size=20):
"""
the function will directly change input argument dataset frame, so the argument isn't immutable
:param df:
:param column_name: the column name we want to extract its statistic features.
:param windows_size:
:return: feature_names : contains the features that extracted from the given window size.
"""
feature_names = []
for win_size in np.arange(1, windows_size):
df["_mean_%d" % win_size] = df[column_name].rolling(window=win_size, center=False,
min_periods=1).mean().fillna(0.0)
df["_mean_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).mean().fillna(0.0)
df["_median_%d" % win_size] = df[column_name].rolling(window=win_size, center=False,
min_periods=1).median().fillna(0.0)
df["_median_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).median().fillna(0.0)
df["_std_%d" % win_size] = df[column_name].rolling(window=win_size, center=False, min_periods=1).std().fillna(
0.0)
df["_std_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).std().fillna(0.0)
df["_max_%d" % win_size] = df[column_name].rolling(window=win_size, center=False, min_periods=1).max().fillna(
0.0)
df["_max_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).max().fillna(0.0)
df["_min_%d" % win_size] = df[column_name].rolling(window=win_size, center=False, min_periods=1).min().fillna(
0.0)
df["_min_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).min().fillna(0.0)
df["_var_%d" % win_size] = df[column_name].rolling(window=win_size, center=False, min_periods=1).var().fillna(
0.0)
df["_var_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).var().fillna(0.0)
df["_nat_%d" % win_size] = ((df[column_name] >= 50) & (df[column_name] < 100)).rolling(window=win_size,
center=False,
min_periods=1).sum().fillna(
0.0)
df["_nat_centered_%d" % win_size] = ((df[column_name] >= 50) & (df[column_name] < 100)).rolling(window=win_size,
center=True,
min_periods=1).sum().fillna(
0.0)
df["_anyact_%d" % win_size] = (df[column_name] > 0).rolling(window=win_size, center=False,
min_periods=1).sum().fillna(0.0)
df["_anyact_centered_%d" % win_size] = (df[column_name] > 0).rolling(window=win_size, center=True,
min_periods=1).sum().fillna(0.0)
if win_size > 3:
df["_skew_%d" % win_size] = df[column_name].rolling(window=win_size, center=False,
min_periods=1).skew().fillna(0.0)
df["_skew_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).skew().fillna(0.0)
#
df["_kurt_%d" % win_size] = df[column_name].rolling(window=win_size, center=False,
min_periods=1).kurt().fillna(0.0)
df["_kurt_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).kurt().fillna(0.0)
# build up the
for variant in ["centered_", ""]:
feature_names.append("_mean_%s%d" % (variant, win_size))
feature_names.append("_median_%s%d" % (variant, win_size))
feature_names.append("_max_%s%d" % (variant, win_size))
feature_names.append("_min_%s%d" % (variant, win_size))
feature_names.append("_std_%s%d" % (variant, win_size))
feature_names.append("_var_%s%d" % (variant, win_size))
feature_names.append("_nat_%s%d" % (variant, win_size))
feature_names.append("_anyact_%s%d" % (variant, win_size))
if win_size > 3:
feature_names.append("_skew_%s%d" % (variant, win_size))
feature_names.append("_kurt_%s%d" % (variant, win_size))
df["_Act"] = (df[column_name]).fillna(0.0)
df["_LocAct"] = (df[column_name] + 1.).apply(np.log).fillna(0.0) # build up the n log transformation
feature_names.append("_LocAct") # add logarithm transformation
feature_names.append("_Act")
return feature_names
def get_hr_statistic_feature(heart_rate_values):
"""
:param heart_rate_values a windowed contain a time series of heart rates
"""
heart_rate_values = np.asarray(heart_rate_values)
min_hr = np.mean(heart_rate_values)
max_hr = np.max(heart_rate_values)
mean_hr = np.mean(heart_rate_values)
skw_hr = skew(heart_rate_values)
kurt_hr = kurtosis(heart_rate_values)
std_hr = np.std(heart_rate_values)
return {"min_hr": min_hr, "max_hr": max_hr, "mean_hr": mean_hr, "skw_hr": skw_hr, "kurt_hr": kurt_hr,
"std_hr": std_hr}
def load_results(folder, num_classes, modality, feature_type, hrv_win_len):
"""
Load results from machine learning based methods and combine with deep learning model based results
"""
MLRESULTS = os.path.join(folder, "%d_stages_%ds_ml_%s.csv" % (num_classes, hrv_win_len, modality))
dfml = pd.read_csv(MLRESULTS)
dfnn = get_nns(folder, num_classes, modality, feature_type, hrv_win_len)
dfml = dfml.rename(columns={"Unnamed: 0": "algs"})
dfnn = dfnn.rename(columns={"actValue": "activity"})
merged = pd.merge(dfml, dfnn, on=["mesaid", "linetime", "activity", "stages", "gt_sleep_block"])
assert len(merged.stages.unique()) == num_classes
for cl in ['activity_y', 'stages_y', 'gt_sleep_block_y']:
if cl in merged.columns:
del merged[cl]
merged["always_0"] = 0
merged["always_1"] = 1
merged["always_2"] = 2
merged["always_3"] = 3
merged["always_4"] = 4
# merged["sleep"] = (~merged["wake"].astype(np.bool)).astype(float)
return merged
def pvalue(results, alg1, alg2, metric):
"""
get the t statistic p-value from two algorithm
:param results:
:param alg1:
:param alg2:
:param metric:
:return:
"""
return ttest_ind(results[alg1][metric], results[alg2][metric])[1]
def make_one_block(source_df, start_idx, end_idx):
# create a new df from the source df index and fill zeros
result = pd.Series(data=0, index=source_df.index)
# set a block in dataset frame with value 1
result.loc[start_idx:end_idx] = 1
return result
def get_files_given_type(data_path, file_type):
"""
this function will return all file names with postfix
:param data_path:
:param file_type:
:return:
"""
print("searching csv files ...")
allfiles = os.listdir(data_path)
files = []
for idx, f in enumerate(allfiles):
if file_type in f:
files.append(os.path.basename(f))
print("total found {} files".format(len(files)))
files.sort()
return files
def plot_multiple_classifier_roc(files_path=None):
"""
it can generate a diagram contains of roc curve for multiple classifiers to show the performance
:param files_path:
:return:
"""
files = get_files_given_type(files_path, file_type='npz')
# plot roc curve
plt.figure(0).clf()
for npz_file in files:
with np.load(npz_file) as data:
label = data['experiment']
y_true = data['y_true']
y_pred = data['y_pred']
# label = np.random.randint(2, size=1000)
fpr, tpr, thresh = roc_curve(y_true, y_pred)
auc = roc_auc_score(y_true, y_pred)
plt.plot(fpr, tpr, label=label + " auc=%0.2f" % auc)
plt.plot([0, 1], [0, 1], color='red', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
plt.legend(loc=0)
def save_validation_logits(y_true, y_pred, classifier_name=None, file_path=None, ):
if file_path != None:
save_dict = {"experiment": classifier_name, 'y_true': y_true, 'y_pred': y_pred}
np.savez(file_path, **save_dict)
# we should first to check if the file existed if not create the file
def log_print_inference(y_test, yhat, label_value, target_names, epochs=0, tensor_board_path='', file_title=""
, args=None):
"""
Log inference results to tensor board path, we can track each experiment prediction result include accuracy, recall,
precision, F1 score, F1 report, confusion matrix and confusion matrix in picture format.
TODO: need add specificity, sensitivity, PPV, also we need log the args
TODO we need two levels performance evaluation. Classifier level and label level
=== Confusion Matrix ===
a b c d e f g <-- classified as
50 15 3 0 0 1 1 | a = build wind float
16 47 6 0 2 3 2 | b = build wind non-float
5 5 6 0 0 1 0 | c = vehic wind float
0 0 0 0 0 0 0 | d = vehic wind non-float
0 2 0 0 10 0 1 | e = containers
1 1 0 0 0 7 0 | f = tableware
3 2 0 0 0 1 23 | g = headlamps
=== Detailed Accuracy By Class ===
TP Rate FP Rate Precision Recall F-Measure MCC ROC Area PRC Area Class
0.714 0.174 0.667 0.714 0.690 0.532 0.806 0.667 build wind float
0.618 0.181 0.653 0.618 0.635 0.443 0.768 0.606 build wind non-float
0.353 0.046 0.400 0.353 0.375 0.325 0.766 0.251 vehic wind float
0.000 0.000 0.000 0.000 0.000 0.000 ? ? vehic wind non-float
0.769 0.010 0.833 0.769 0.800 0.788 0.872 0.575 containers
0.778 0.029 0.538 0.778 0.636 0.629 0.930 0.527 tableware
0.793 0.022 0.852 0.793 0.821 0.795 0.869 0.738 headlamps
0.668 0.130 0.670 0.668 0.668 0.539 0.807 0.611 Weighted Avg.
:param args:
:param file_title:
:param y_test:
:param yhat:
:param label_value:
:param target_names:
:param epochs:
:param tensor_board_path:
:return:
"""
if args is not None:
write_arguments_to_file(args, os.path.join(tensor_board_path, file_title + "_args.csv"))
if len(y_test.shape) > 2:
y_test = np.reshape(y_test, -1)
accuracy = accuracy_score(y_test, yhat)
print('Accuracy: %f' % accuracy)
precision = precision_score(y_test, yhat, average='macro')
print('Precision: %f' % precision)
recall = recall_score(y_test, yhat, average='macro')
print('Recall: %f' % recall)
f1_result = f1_score(y_test, yhat, average='macro')
print('F1 score: %f' % f1_result)
matrix = confusion_matrix(y_test, yhat, label_value)
report = classification_report(y_test, yhat, target_names=target_names, digits=4)
print("Classification report: \n")
print(report)
to_json = {'epoch_num': [epochs], 'accuracy': [accuracy], 'precision_weighted': [precision], 'recall': [recall],
'f1_result': [f1_result]}
result = pd.DataFrame.from_dict(to_json)
result.to_csv(os.path.join(tensor_board_path, file_title + "metrics_summary.csv"), index=False)
np.savetxt(os.path.join(tensor_board_path, file_title + 'confusion_matrix.txt'), matrix, fmt='%d', delimiter=',')
with open(os.path.join(tensor_board_path, file_title + "classification_report.txt"), "w") as text_file:
text_file.write(report)
# for binary classification we produce the ROC curve
if len(target_names) == 2:
ratio = sum(y_test) / len(y_test)
print("The ratio between negative and positive case are {}".format(str(ratio)))
# save the best trained model as well.
normal_path = plot_save_confusion_matrix(y_test, yhat, normalize=True, class_names=target_names,
location=tensor_board_path, title=file_title)
return [normal_path]
def log_print_metrics(y_pred, y_test, epochs, num_classes, note, tensorboard_path, args):
if len(y_pred.shape) > 1:
yhat_classes = np.argmax(y_pred, axis=-1)
else:
yhat_classes = y_pred
# Y_test_classes = np.reshape(y_test, (-1, 2))
if len(y_test.shape) > 1:
Y_test_classes = np.argmax(y_test, axis=-1)
else:
Y_test_classes = y_test
label_values, target_names = sleep_class_name_mapping(num_classes)
log_print_inference(Y_test_classes, yhat_classes, label_value=label_values, target_names=target_names,
epochs=epochs, tensor_board_path=tensorboard_path, file_title="dl_exp_%s" % note, args=args)
def sleep_class_name_mapping(num_classes):
if num_classes == 5:
label_values = [0, 1, 2, 3, 4]
target_names = ['Wake', 'N1', 'N2', 'N3', 'REM']
elif num_classes == 4:
label_values = [0, 1, 2, 3]
target_names = ['Wake', 'Light', 'Deep', 'REM']
elif num_classes == 3:
label_values = [0, 1, 2]
target_names = ['Wake', 'NREM', 'REM']
else:
label_values = [0, 1]
target_names = ['Wake', 'Sleep']
return label_values, target_names
def plot_pr_re_curve(y_true, y_prob, save_path=None):
# In matplotlib < 1.5, plt.fill_between does not have a 'step' argument
precision, recall, _ = precision_recall_curve(y_true, y_prob)
average_precision = average_precision_score(y_true, y_prob)
step_kwargs = ({'step': 'post'}
if 'step' in signature(plt.fill_between).parameters
else {})
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(
average_precision))
def plot_roc_curve(y_true, y_prob, save_path=None):
if max(y_true) == 1:
return
fpr, tpr, thresholds = roc_curve(y_true, y_prob)
auc = roc_auc_score(y_true, y_prob)
####################################
# The optimal cut off would be where tpr is high and fpr is low
# tpr - (1-fpr) is zero or near to zero is the optimal cut off point
####################################
i = np.arange(len(tpr)) # index for df
roc = pd.DataFrame(
{'fpr': pd.Series(fpr, index=i), 'tpr': pd.Series(tpr, index=i), '1-fpr': | pd.Series(1 - fpr, index=i) | pandas.Series |
import pytest
import sys
import numpy as np
import swan_vis as swan
import networkx as nx
import math
import pandas as pd
###########################################################################
##################### Related to adding metadata ##########################
###########################################################################
class TestMetadata(object):
# test add_metadata - one after the other with dupe cols
# yes overwrite
def test_add_metadata_4(self):
sg = swan.SwanGraph()
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_dupecols.tsv'
sg.add_metadata(meta, overwrite=True)
assert {'3','4'} == set(sg.adata.obs.cluster.tolist())
# test add_metadata - one after the other with dupe cols
# don'e overwrite
def test_add_metadata_3(self):
sg = swan.SwanGraph()
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_dupecols.tsv'
sg.add_metadata(meta, overwrite=False)
assert {'2', '1'} == set(sg.adata.obs.cluster.tolist())
# test add_metadata - one after the other
def test_add_metadata_2(self):
pass
sg = swan.SwanGraph()
# just gencode vM21 chr 11 and tcf3
gtf = 'files/chr11_and_Tcf3.gtf'
sg.add_annotation(gtf, verbose=True)
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_2.tsv'
sg.add_metadata(meta)
test = sg.adata.obs
data = [['D12', '1', 'K562', 'G0'],
['PB65_B017', '2', 'GM12878', 'M'],
['PB65_B018', '2', 'GM12878', 'S']]
cols = ['dataset', 'cluster', 'sample', 'cell_state']
ctrl = pd.DataFrame(data=data, columns=cols)
ctrl.index = ctrl.dataset
ctrl = ctrl[test.columns]
ctrl.sort_index(inplace=True)
test.sort_index(inplace=True)
print('test')
print(test)
print('control')
print(ctrl)
assert test.equals(ctrl)
# test add_metadata - vanilla
def test_add_metadata(self):
sg = swan.SwanGraph()
# just gencode vM21 chr 11 and tcf3
# gtf = 'files/chr11_and_Tcf3.gtf'
# sg.add_annotation(gtf, verbose=True)
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
# print(sg.t_df)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
test = sg.adata.obs
data = [['D12', '1', 'K562'],
['PB65_B017', '2', 'GM12878'],
['PB65_B018', '2', 'GM12878']]
cols = ['dataset', 'cluster', 'sample']
ctrl = pd.DataFrame(data=data, columns=cols)
ctrl.index = ctrl.dataset
ctrl = ctrl[test.columns]
ctrl.sort_index(inplace=True)
test.sort_index(inplace=True)
print('test')
print(test)
print('control')
print(ctrl)
assert test.equals(ctrl)
###########################################################################
############### Related to high-level dataset addition ####################
###########################################################################
class TestDataset(object):
# TODO
# add_dataset, add_transcriptome, add_annotation
# tests add_transcriptome - added after adding an annotation
def test_add_transcriptome_2(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full_annotation.gtf')
sg.add_transcriptome('files/test_full.gtf')
# t_df
sg.t_df = sg.t_df[['tid', 'annotation']]
data = [['test1', True],
['test2', True],
['test3', False],
['test4', True],
['test5', True],
['test6', True]]
cols = ['tid', 'annotation']
ctrl_t_df = pd.DataFrame(data=data, columns=cols)
ctrl_t_df = swan.create_dupe_index(ctrl_t_df, 'tid')
ctrl_t_df = swan.set_dupe_index(ctrl_t_df, 'tid')
# first order to make them comparable
# sort all values by their IDs
sg.t_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_t_df = ctrl_t_df[sg.t_df.columns]
print('test')
print(sg.t_df)
print('control')
print(ctrl_t_df)
assert (sg.t_df == ctrl_t_df).all(axis=0).all()
# loc_df - new location at chr2, 65
print('test')
print(sg.loc_df)
ind = (sg.loc_df.chrom=='chr2')&(sg.loc_df.coord==65)
temp = sg.loc_df.loc[ind, 'annotation'].to_frame()
for i, entry in temp.iterrows():
assert entry.annotation == False
temp = sg.loc_df.loc[~ind]
for i, entry in temp.iterrows():
assert entry.annotation == True
# tests add_transcriptome - vanilla
def test_add_transcriptome_1(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
# tests add_annotation - transcriptome already in SG
def test_add_annotation_2(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.add_annotation('files/test_full_annotation.gtf')
# t_df
sg.t_df = sg.t_df[['tid', 'annotation']]
data = [['test1', True],
['test2', True],
['test3', False],
['test4', True],
['test5', True],
['test6', True]]
cols = ['tid', 'annotation']
ctrl_t_df = pd.DataFrame(data=data, columns=cols)
ctrl_t_df = swan.create_dupe_index(ctrl_t_df, 'tid')
ctrl_t_df = swan.set_dupe_index(ctrl_t_df, 'tid')
# first order to make them comparable
# sort all values by their IDs
sg.t_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_t_df = ctrl_t_df[sg.t_df.columns]
print('test')
print(sg.t_df)
print('control')
print(ctrl_t_df)
assert (sg.t_df == ctrl_t_df).all(axis=0).all()
# loc_df - new location at chr2, 65
print('test')
print(sg.loc_df)
ind = (sg.loc_df.chrom=='chr2')&(sg.loc_df.coord==65)
temp = sg.loc_df.loc[ind, 'annotation'].to_frame()
for i, entry in temp.iterrows():
assert entry.annotation == False
temp = sg.loc_df.loc[~ind]
for i, entry in temp.iterrows():
assert entry.annotation == True
# tests add_annotation - vanilla
def test_add_annotation_1(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full_annotation.gtf')
# # loc_df
# data = [['chr1', 1, 0, True],
# ['chr1', 20, 1, True],
# ['chr1', 25, 2, True],
# ['chr1', 30, 3, True],
# ['chr1', 35, 4, True],
# ['chr1', 40, 5, True],
# ['chr2', 45, 6, True],
# ['chr2', 50, 7, True],
# ['chr2', 60, 8, True],
# ['chr2', 75, 10, True],
# ['chr2', 80, 11, True],
# ['chr2', 100, 12, True],
# ['chr2', 110, 13, True]]
# cols = ['chrom', 'coord', 'vertex_id', 'annotation']
# ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
# ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
# ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
#
# print('test')
# print(sg.loc_df)
# print('ctrl')
# print(ctrl_loc_df)
#
# print(sg.edge_df)
# assert 1 == 0
# # edge_df
# data = [[0, 1, '+', 'exon', 0, True],
# [1, 2],
# [2, 3],
# [3, 4],
# [4, 5],
# [5, 6],
# [6, 7],
#
#
# ]
# cols = ['v1', 'v2', 'strand', 'edge_type', 'annotation']
#
# # t_df
# data = [['test1', 'test1_tname', 'test1_gid', 'test1_gname', [0,1,2,3,4]], [0,1,2,3,4,5], True],
# ['test2', 'test2_tname', 'test2_gid', 'test2_gname', [5,6,7,8,9], [12,11,10,8,7,6], True],
# ['test4', 'test4_tname', 'test4_gid', 'test4_gname', [10], [6,7], True],
# ['test5', 'test5_tname', 'test2_gid', 'test2_gname', [5,11,12], [12,11,8,7], True],
# ['test6', 'test6_tname', 'test2_gid', 'test2_gname', [,6,7,8,9], [13,11,10,8,7,6], True]]
# cols = ['tid', 'tname', 'gid', 'gname', 'path', 'loc_path', 'annotation']
#
assert sg.annotation == True
assert 'annotation' in sg.t_df.columns
assert 'annotation' in sg.edge_df.columns
assert 'annotation' in sg.loc_df.columns
for ind, entry in sg.t_df.iterrows():
assert entry.annotation == True
assert entry.novelty == 'Known'
for ind, entry in sg.edge_df.iterrows():
assert entry.annotation == True
for ind, entry in sg.loc_df.iterrows():
assert entry.annotation == True
# tests:, label_annotated
# label annotated transcripts
def test_label_annotated(self):
sg = swan.SwanGraph()
data = [[0, [0,1]],
[1, [2,3]],
[2, [4,5]]]
sg.t_df = pd.DataFrame(data=data, columns=['tid', 'path'])
data = [[0,0,1], [1,1,2], [2,2,3], [3,3,4],
[4,4,5], [5,5,6]]
sg.edge_df = pd.DataFrame(data=data, columns=['edge_id', 'v1', 'v2'])
data = [0,1,2,3,4,5,6]
sg.loc_df = pd.DataFrame(data=data, columns=['vertex_id'])
tids = [0,1]
sg.label_annotated(tids)
ctrl_tids = [0,1]
tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
assert set(ctrl_tids) == set(tids)
ctrl_edges = [0,1,2,3]
edges = sg.edge_df.loc[sg.edge_df.annotation == True, 'edge_id'].tolist()
assert set(ctrl_edges) == set(edges)
ctrl_locs = [0,1,2,3,4]
locs = sg.loc_df.loc[sg.loc_df.annotation == True, 'vertex_id'].tolist()
assert set(ctrl_locs) == set(locs)
# add to empty sg, don't add isms
def test_add_transcriptome(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel_talon.gtf', include_isms=False)
print(sg.t_df)
assert "ISM" not in sg.t_df.novelty.unique()
# assert 1 == 0
# tests if correct error is thrown when adding annotation to
# sg that already has one
def test_add_annotation_already(self):
sg = swan.SwanGraph()
sg.annotation = True
with pytest.raises(Exception) as e:
sg.add_annotation('files/Canx.gtf')
assert 'Annotation already' in str(e.value)
# add annotation to empty sg
def test_add_annotation_empty_sg(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full.gtf')
# check annotation columns
assert all(sg.t_df.annotation.tolist())
assert all(sg.edge_df.annotation.tolist())
assert all(sg.loc_df.annotation.tolist())
# check novelty column in t_df
assert len(sg.t_df.loc[sg.t_df.novelty=='Known']) == len(sg.t_df.index)
# check annotation flag
assert sg.annotation == True
# add annotation to sg with data already in it
def test_add_annotation_sg_data(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel.gtf')
sg.add_annotation('files/test_known.gtf')
# t_df
annot_tids = ['test1', 'test2', 'test4']
assert all(sg.t_df.loc[annot_tids, 'annotation'])
ctrl_novel_tids = ['test3', 'test5']
novel_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
assert len(set(ctrl_novel_tids)-set(novel_tids)) == 0
assert len(ctrl_novel_tids) == len(novel_tids)
# make sure the novelty assignment worked
annot_tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Known', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
annot_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Undefined', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
# loc_df
ctrl_novel_locs = [('chr2', 65)]
temp = sg.loc_df[sg.loc_df.annotation == False]
chroms = temp.chrom.tolist()
coords = temp.coord.tolist()
novel_locs = [(chrom, coord) for chrom, coord in zip(chroms, coords)]
print('control')
print(ctrl_novel_locs)
print('test')
print(novel_locs)
assert len(set(ctrl_novel_locs)-set(novel_locs)) == 0
assert len(novel_locs) == len(ctrl_novel_locs)
# edge_df
edge_df = sg.add_edge_coords()
edge_df = edge_df.loc[edge_df.annotation == False]
ctrl_novel_edges = [('chr2', 75, 65, '-', 'exon'),
('chr2', 65, 50, '-', 'intron'),
('chr2', 80, 60, '-', 'intron'),
('chr2', 60, 50, '-', 'exon')]
chroms = edge_df.chrom.tolist()
v1s = edge_df.v1_coord.tolist()
v2s = edge_df.v2_coord.tolist()
strands = edge_df.strand.tolist()
etypes = edge_df.edge_type.tolist()
novel_edges = [(chrom,v1,v2,strand,etype) for chrom,v1,v2,strand,etype \
in zip(chroms,v1s,v2s,strands,etypes)]
print('control')
print(ctrl_novel_edges)
print('test')
print(novel_edges)
assert len(set(ctrl_novel_edges)-set(novel_edges)) == 0
assert len(ctrl_novel_edges) == len(novel_edges)
# add annotation to sg with data where data contains dupe transcript
def test_add_annotation_sg_data_dupe_tid(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel_1.gtf')
sg.add_annotation('files/test_known.gtf')
# check with coord/chr bc of reindexing fuckery not being
# remimplemented yet
# t_df
annot_tids = ['test1', 'test2', 'test4']
assert all(sg.t_df.loc[annot_tids, 'annotation'])
ctrl_novel_tids = ['test3', 'test5']
novel_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
assert len(set(ctrl_novel_tids)-set(novel_tids)) == 0
assert len(ctrl_novel_tids) == len(novel_tids)
# make sure the novelty assignment worked
annot_tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Known', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
annot_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Undefined', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
# loc_df
ctrl_novel_locs = [('chr2', 65)]
temp = sg.loc_df[sg.loc_df.annotation == False]
chroms = temp.chrom.tolist()
coords = temp.coord.tolist()
novel_locs = [(chrom, coord) for chrom, coord in zip(chroms, coords)]
print('control')
print(ctrl_novel_locs)
print('test')
print(novel_locs)
assert len(set(ctrl_novel_locs)-set(novel_locs)) == 0
assert len(novel_locs) == len(ctrl_novel_locs)
# edge_df
edge_df = sg.add_edge_coords()
edge_df = edge_df.loc[edge_df.annotation == False]
ctrl_novel_edges = [('chr2', 75, 65, '-', 'exon'),
('chr2', 65, 50, '-', 'intron'),
('chr2', 80, 60, '-', 'intron'),
('chr2', 60, 50, '-', 'exon')]
chroms = edge_df.chrom.tolist()
v1s = edge_df.v1_coord.tolist()
v2s = edge_df.v2_coord.tolist()
strands = edge_df.strand.tolist()
etypes = edge_df.edge_type.tolist()
novel_edges = [(chrom,v1,v2,strand,etype) for chrom,v1,v2,strand,etype \
in zip(chroms,v1s,v2s,strands,etypes)]
print('control')
print(ctrl_novel_edges)
print('test')
print(novel_edges)
assert len(set(ctrl_novel_edges)-set(novel_edges)) == 0
assert len(ctrl_novel_edges) == len(novel_edges)
###########################################################################
###################### Related to file parsing ############################
###########################################################################
class TestFiles(object):
# tests GTF parsing
def test_parse_gtf(self):
gtf_file = 'files/Canx.gtf'
t_df, exon_df, from_talon = swan.parse_gtf(gtf_file, True, False)
t_df.index.name = 'tid_index'
t_df = t_df.sort_values(by='tid_index')
ctrl_t_df = pd.read_csv('files/Canx_transcript.tsv',sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df = ctrl_t_df.sort_values(by='tid_index')
ctrl_exons = ctrl_t_df.exons.tolist()
ctrl_exons = [exons.split(',') for exons in ctrl_exons]
ctrl_t_df['exons'] = ctrl_exons
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
# tests TALON DB parsing - no pass_list
def test_parse_db_1(self):
db_file = 'files/test_full.db'
pass_list = 'files/test_full_pass_list.csv'
t_df, edge_df = swan.parse_db(db_file, None, False, True, False)
ctrl_t_df, ctrl_e_df = get_test_transcript_exon_dicts()
for key, item in ctrl_t_df.items():
item['exons'] = swan.reorder_exons(item['exons'])
ctrl_t_df = pd.DataFrame(ctrl_t_df).transpose()
ctrl_e_df = pd.DataFrame(ctrl_e_df).transpose()
# sort all values by their IDs
edge_df.sort_index(inplace=True)
t_df.sort_index(inplace=True)
ctrl_e_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_e_df = ctrl_e_df[edge_df.columns]
ctrl_t_df = ctrl_t_df[t_df.columns]
assert 'novelty' in t_df.columns
print('test')
print(edge_df)
print('control')
print(ctrl_e_df)
print(edge_df == ctrl_e_df)
assert (edge_df == ctrl_e_df).all(axis=0).all()
print('test')
print(t_df)
print(t_df.exons)
print('control')
print(ctrl_t_df)
print(ctrl_t_df.exons)
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
# tests TALON DB parsing - yes pass_list
def test_parse_db_2(self):
db_file = 'files/test_full.db'
pass_list = 'files/test_full_pass_list.csv'
t_df, edge_df = swan.parse_db(db_file, pass_list, False, True, False)
ctrl_t_df, ctrl_e_df = get_test_transcript_exon_dicts()
for key, item in ctrl_t_df.items():
item['exons'] = swan.reorder_exons(item['exons'])
# delete entries that weren't on pass list
del ctrl_e_df['chr2_45_50_+_exon']
del ctrl_t_df['test4']
ctrl_t_df = pd.DataFrame(ctrl_t_df).transpose()
ctrl_e_df = pd.DataFrame(ctrl_e_df).transpose()
# sort all values by their IDs
edge_df.sort_index(inplace=True)
t_df.sort_index(inplace=True)
ctrl_e_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_e_df = ctrl_e_df[edge_df.columns]
ctrl_t_df = ctrl_t_df[t_df.columns]
assert 'novelty' in t_df.columns
print('test')
print(edge_df)
print('control')
print(ctrl_e_df)
print(edge_df == ctrl_e_df)
assert (edge_df == ctrl_e_df).all(axis=0).all()
print('test')
print(t_df)
print(t_df.exons)
print('control')
print(ctrl_t_df)
print(ctrl_t_df.exons)
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
###########################################################################
####################### Related to DF creation ############################
###########################################################################
class TestCreateDFs(object):
# add_edge_coords, get_current_locs, get_current_edges,
# create_loc_dict, create_transcript_edge_dict create_dfs,
# tests add_edge_coords
def test_add_edge_coords(self):
sg = swan.SwanGraph()
sg = add_transcriptome_no_reorder_gtf(sg, 'files/test_full.gtf')
# sg.add_transcriptome('files/test_full.gtf')
cols = ['edge_id', 'v1', 'v2', 'strand', 'edge_type',
'v1_coord', 'v2_coord']
# print(sg.edge_df.head())
edge_df = sg.add_edge_coords()
print(edge_df.head())
edge_df = edge_df[cols]
ctrl_edge_df = pd.read_csv('files/test_add_edge_coords_result.tsv', sep='\t')
ctrl_edge_df = ctrl_edge_df[cols]
# first order to make them comparable
# sort all values by their IDs
edge_df.sort_values(by='edge_id', inplace=True)
ctrl_edge_df.sort_values(by='edge_id', inplace=True)
# and order columns the same way
ctrl_edge_df = ctrl_edge_df[edge_df.columns]
print('test')
print(edge_df)
print('control')
print(ctrl_edge_df)
assert (edge_df == ctrl_edge_df).all(axis=0).all()
# tests get_current_locs with an empty swangraph
def test_get_current_locs_empty_sg(self):
sg = swan.SwanGraph()
locs, n = sg.get_current_locs()
assert locs == {}
assert n == -1
# tests get_current_locs with a swangraph with data
def test_get_current_locs_sg_data(self):
sg = swan.SwanGraph()
cols = ['vertex_id', 'chrom', 'coord']
data = [[0, 1, 2], [1, 1, 3], [2, 3, 50]]
sg.loc_df = pd.DataFrame(data=data, columns=cols)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
locs, n = sg.get_current_locs()
ctrl_locs = {(1,2):0, (1,3):1, (3,50):2}
assert locs == ctrl_locs
assert n == 2
# tests get_current_edges with an empty swangraph
def test_get_current_edges_empty_sg(self):
sg = swan.SwanGraph()
edges, n = sg.get_current_edges()
assert(edges == {})
assert(n == -1)
# tests get_current_edges in a sg with data
def test_get_current_edges_sg_data(self):
sg = swan.SwanGraph()
cols = ['vertex_id', 'chrom', 'coord']
data = [[0, 1, 2], [1, 1, 3], [2, 1, 50]]
sg.loc_df = pd.DataFrame(data=data, columns=cols)
cols = ['edge_id', 'v1', 'v2', 'strand', 'edge_type']
data = [[0, 0, 1, '+', 'exon'],
[1, 1, 2, '+', 'intron']]
sg.edge_df = pd.DataFrame(data=data, columns=cols)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
edges, n = sg.get_current_edges()
ctrl = {(1,2,3,'+','exon'): {'edge_id': 0,
'edge_type': 'exon',
'v1': 0 ,
'v2': 1},
(1,3,50,'+','intron'): {'edge_id': 1,
'edge_type': 'intron',
'v1': 1,
'v2': 2}}
assert(edges == ctrl)
assert(n == 1)
# test create_loc_dict on an empty sg
# also checks to make sure exons that use the same loc
# don't result in dupe entries in loc_df
def test_create_loc_dict_empty_sg(self):
_, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
locs = sg.create_loc_dict(exons)
ctrl_locs = {('chr1',1): 0,
('chr1', 20): 1,
('chr1', 25): 2,
('chr1', 30): 3,
('chr1', 35): 4,
('chr1', 40): 5,
('chr2', 100): 6,
('chr2', 80): 7,
('chr2', 75): 8,
('chr2', 60): 9,
('chr2', 50): 10,
('chr2', 45): 11,
('chr2', 65): 12
}
assert(ctrl_locs == locs)
# tests create_loc_dict when locs already exist in sg
def test_create_loc_dict_sg_data(self):
_, exons = get_test_transcript_exon_dicts()
# dummy preexisting data
sg = swan.SwanGraph()
data = [[0, 'chr1', 1], [1, 'chr2', 80]]
columns = ['vertex_id', 'chrom', 'coord']
sg.loc_df = pd.DataFrame(data=data, columns=columns)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
locs = sg.create_loc_dict(exons)
ctrl_locs = {('chr1', 1):0,
('chr2', 80): 1,
('chr1', 20): 2,
('chr1', 25): 3,
('chr1', 30): 4,
('chr1', 35): 5,
('chr1', 40): 6,
('chr2', 100): 7,
('chr2', 75): 8,
('chr2', 60): 9,
('chr2', 50): 10,
('chr2', 45): 11,
('chr2', 65): 12
}
print('test')
print(locs)
print('control')
print(ctrl_locs)
assert(ctrl_locs == locs)
# tests create_transcript_edge_dict empty swangraph
def test_create_transcript_edge_dict_emtpy_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
locs = sg.create_loc_dict(exons)
transcripts, edges = sg.create_transcript_edge_dicts(transcripts, exons, locs)
# just compare the paths for the transcripts, which is the only
# part modified by this function
transcripts = dict([(key, item['path']) for key, item in transcripts.items()])
ctrl_transcript_paths = {
'test1': [0,1,2,3,4],
'test2': [5,6,7,8,9],
'test3': [5,6,10,11,9],
'test4': [12],
'test5': [5,13,14]
}
assert(transcripts == ctrl_transcript_paths)
ctrl_edges = {
('chr1', 1, 20, '+', 'exon'): {
'edge_id': 0,
'edge_type': 'exon',
'v1': 0,
'v2': 1
},
('chr1', 20, 25, '+', 'intron'): {
'edge_id': 1,
'edge_type': 'intron',
'v1': 1,
'v2': 2
},
('chr1', 25, 30, '+', 'exon'): {
'edge_id': 2,
'edge_type': 'exon',
'v1': 2,
'v2': 3
},
('chr1', 30, 35, '+', 'intron'): {
'edge_id': 3,
'edge_type': 'intron',
'v1': 3,
'v2': 4
},
('chr1', 35, 40, '+', 'exon'): {
'edge_id': 4,
'edge_type': 'exon',
'v1': 4,
'v2': 5
},
('chr2', 100, 80, '-', 'exon'): {
'edge_id': 5,
'edge_type': 'exon',
'v1': 6,
'v2': 7
},
('chr2', 80, 75, '-', 'intron'): {
'edge_id': 6,
'edge_type': 'intron',
'v1': 7,
'v2': 8
},
('chr2', 75, 60, '-', 'exon'): {
'edge_id': 7,
'edge_type': 'exon' ,
'v1': 8,
'v2': 9
},
('chr2', 60, 50, '-', 'intron'): {
'edge_id': 8,
'edge_type': 'intron',
'v1': 9,
'v2': 10
},
('chr2', 50, 45, '-', 'exon'): {
'edge_id': 9,
'edge_type': 'exon',
'v1': 10,
'v2': 11
},
('chr2', 75, 65, '-', 'exon'): {
'edge_id': 10,
'edge_type': 'exon',
'v1': 8,
'v2': 12
},
('chr2', 65, 50, '-', 'intron'): {
'edge_id': 11,
'edge_type': 'intron',
'v1': 12,
'v2': 10
},
('chr2', 45, 50, '+', 'exon'): {
'edge_id': 12,
'edge_type': 'exon',
'v1': 11,
'v2': 10
},
('chr2', 80, 60, '-', 'intron'): {
'edge_id': 13,
'edge_type': 'intron',
'v1': 7,
'v2': 9
},
('chr2', 60, 50, '-', 'exon'): {
'edge_id': 14,
'edge_type': 'exon',
'v1': 9,
'v2': 10
}
}
assert(edges == ctrl_edges)
# tests create_transcript_edge_dict with edges already in swangraph
def test_create_transcript_edge_dict_edge_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
# add some dummy data
sg = swan.SwanGraph()
data = [[0, 'chr1', 1],
[1, 'chr2', 20],
[2, 'chr2', 100],
[3, 'chr2', 80]]
columns = ['vertex_id', 'chrom', 'coord']
sg.loc_df = pd.DataFrame(data=data, columns=columns)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
locs = sg.create_loc_dict(exons)
data = [[0, 0, 1, '+', 'exon'],
[1, 2, 3, '-', 'exon']]
columns = ['edge_id', 'v1', 'v2', 'strand', 'edge_type']
sg.edge_df = pd.DataFrame(data=data, columns=columns)
transcripts, edges = sg.create_transcript_edge_dicts(transcripts, exons, locs)
# just compare the paths for the transcripts, which is the only
# part modified by this function
transcripts = dict([(key, item['path']) for key, item in transcripts.items()])
ctrl_transcript_paths = {
'test1': [0,2,3,4,5],
'test2': [1,6,7,8,9],
'test3': [1,6,10,11,9],
'test4': [12],
'test5': [1,13,14]
}
assert(transcripts == ctrl_transcript_paths)
ctrl_edges = {
('chr1', 1, 20, '+', 'exon'): {
'edge_id': 0,
'edge_type': 'exon',
'v1': 0,
'v2': 1
},
('chr1', 20, 25, '+', 'intron'): {
'edge_id': 2,
'edge_type': 'intron',
'v1': 4,
'v2': 5
},
('chr1', 25, 30, '+', 'exon'): {
'edge_id': 3,
'edge_type': 'exon',
'v1': 5,
'v2': 6
},
('chr1', 30, 35, '+', 'intron'): {
'edge_id': 4,
'edge_type': 'intron',
'v1': 6,
'v2': 7
},
('chr1', 35, 40, '+', 'exon'): {
'edge_id': 5,
'edge_type': 'exon',
'v1': 7,
'v2': 8
},
('chr2', 100, 80, '-', 'exon'): {
'edge_id': 1,
'edge_type': 'exon',
'v1': 2,
'v2': 3
},
('chr2', 80, 75, '-', 'intron'): {
'edge_id': 6,
'edge_type': 'intron',
'v1': 3,
'v2': 9
},
('chr2', 75, 60, '-', 'exon'): {
'edge_id': 7,
'edge_type': 'exon' ,
'v1': 9,
'v2': 10
},
('chr2', 60, 50, '-', 'intron'): {
'edge_id': 8,
'edge_type': 'intron',
'v1': 10,
'v2': 11
},
('chr2', 50, 45, '-', 'exon'): {
'edge_id': 9,
'edge_type': 'exon',
'v1': 11,
'v2': 12
},
('chr2', 75, 65, '-', 'exon'): {
'edge_id': 10,
'edge_type': 'exon',
'v1': 9,
'v2': 13
},
('chr2', 65, 50, '-', 'intron'): {
'edge_id': 11,
'edge_type': 'intron',
'v1': 13,
'v2': 11
},
('chr2', 45, 50, '+', 'exon'): {
'edge_id': 12,
'edge_type': 'exon',
'v1': 12,
'v2': 11
},
('chr2', 80, 60, '-', 'intron'): {
'edge_id': 13,
'edge_type': 'intron',
'v1': 3,
'v2': 10
},
('chr2', 60, 50, '-', 'exon'): {
'edge_id': 14,
'edge_type': 'exon',
'v1': 10,
'v2': 11
}
}
assert(edges == ctrl_edges)
# # tests create_transcript_edge_dict where transcripts already
# # # exist in the swangraph
# # def test_create_transcript_edge_dict_edge_t_sg(self):
# # pass
# # # TODO
#
# tests create_dfs with an empty sg
# also ensures that empty dict -> df -> dict conversion doesn't screw up
def test_create_dfs_empty_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, False)
ctrl_loc_df = pd.read_csv('files/test_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# remove the columns that are there just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# again, remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path', 'novelty'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs when from_talon = True
def test_create_dfs_empty_sg_from_talon(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, True)
ctrl_loc_df = pd.read_csv('files/test_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# remove the columns that are there just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# again, remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
ctrl_t_df = ctrl_t_df[t_df.columns]
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs in a swangraph with data
def test_create_dfs_data_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
del transcripts['test2']
sg = swan.SwanGraph()
# add dummy data
# loc_df - format
loc_df = pd.read_csv('files/test_preexisting_loc_df.tsv', sep='\t')
loc_df.set_index('vertex_id_index', inplace=True)
loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
edge_df = pd.read_csv('files/test_preexisting_edge_df.tsv', sep='\t')
edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
edge_df.set_index('edge_id_index', inplace=True)
edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
t_df = pd.read_csv('files/test_preexisting_t_df.tsv', sep='\t')
t_df.set_index('tid_index', inplace=True)
t_df.index.name = 'tid'
t_df.drop(['loc_path'], axis=1, inplace=True)
t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
t_df['path'] = t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
t_df = t_df[t_df.columns]
sg.loc_df = loc_df
sg.edge_df = edge_df
sg.t_df = t_df
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, True)
# control data
# loc_df - format
ctrl_loc_df = pd.read_csv('files/test_preexisting_result_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_preexisting_result_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_preexisting_result_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
ctrl_t_df = ctrl_t_df[t_df.columns]
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs in sg with data where existing data has novelty
# and added dataset does not
def test_create_dfs_data_sg_nov1(self):
transcripts, exons = get_test_transcript_exon_dicts()
# to do - remove transcript that's already there
sg = swan.SwanGraph()
# add dummy data
# loc_df - format
loc_df = pd.read_csv('files/test_preexisting_loc_df.tsv', sep='\t')
loc_df.set_index('vertex_id_index', inplace=True)
loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
edge_df = pd.read_csv('files/test_preexisting_edge_df.tsv', sep='\t')
edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
edge_df.set_index('edge_id_index', inplace=True)
edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
t_df = pd.read_csv('files/test_preexisting_t_df.tsv', sep='\t')
t_df.set_index('tid_index', inplace=True)
t_df.index.name = 'tid'
t_df.drop(['loc_path'], axis=1, inplace=True)
t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
t_df['path'] = t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
t_df = t_df[t_df.columns]
sg.loc_df = loc_df
sg.edge_df = edge_df
sg.t_df = t_df
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, False)
# control data
# loc_df - format
ctrl_loc_df = pd.read_csv('files/test_preexisting_result_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_preexisting_result_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_preexisting_result_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
ctrl_t_df = ctrl_t_df[t_df.columns]
# remove novelty for entries that are new
new_tids = ['test1', 'test3', 'test4', 'test5']
ctrl_t_df.loc[ctrl_t_df.tid.isin(new_tids), 'novelty'] = 'Undefined'
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs with preexisting data and a duplicate transcript
# being added
# also tests that old data (novelty in this case) is not overwritten
def test_create_dfs_data_sg_dupe(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
# add dummy data
# loc_df - format
loc_df = pd.read_csv('files/test_preexisting_loc_df.tsv', sep='\t')
loc_df.set_index('vertex_id_index', inplace=True)
loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
edge_df = pd.read_csv('files/test_preexisting_edge_df.tsv', sep='\t')
edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
edge_df.set_index('edge_id_index', inplace=True)
edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
t_df = pd.read_csv('files/test_preexisting_t_df.tsv', sep='\t')
t_df.set_index('tid_index', inplace=True)
t_df.index.name = 'tid'
t_df.drop(['loc_path'], axis=1, inplace=True)
t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
t_df['path'] = t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
t_df = t_df[t_df.columns]
sg.loc_df = loc_df
sg.edge_df = edge_df
sg.t_df = t_df
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, False)
# control data
# loc_df - format
ctrl_loc_df = pd.read_csv('files/test_preexisting_result_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_preexisting_result_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
ctrl_t_df = | pd.read_csv('files/test_preexisting_result_t_df.tsv', sep='\t') | pandas.read_csv |
import csv
import os
import pickle
import re
import emoji
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import MinMaxScaler
from textblob import TextBlob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from emotion.EmojiEmotionFeature import EmojiEmotionFeature
from personality_features import get_lang_based_scores
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
# function to clean the word of any punctuation or special characters
# we keep slang and emojis as they might aid in differentiating between fake and real news spreaders
# TODO: improve text preprocessing
def cleanPunc(sentence):
cleaned = re.sub(r'[?|!|\'|"|#]', r'', sentence)
cleaned = re.sub(r'[.|,|)|(|\|/]', r' ', cleaned)
cleaned = cleaned.strip()
cleaned = cleaned.replace("\n", " ")
return cleaned
# function to count emojis
def emoji_count(text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
"]+", flags=re.UNICODE)
counter = 0
data = list(text) # split the text into characters
for word in data:
counter += len(emoji_pattern.findall(word))
return counter
# function to count negation words
def negation(tokens: list):
negations = ["not", "no"]
negative = 0
for token in tokens:
if token in negations:
negative = 1
return negative
# function to count slang words
def slang_count(text):
slang_data = []
with open(os.path.join(__location__, "utils/slang.txt"), 'r', encoding="utf8") as exRtFile:
exchReader = csv.reader(exRtFile, delimiter=':')
for row in exchReader:
slang_data.append(row[0].lower())
counter = 0
data = text.lower().split()
for word in data:
for slang in slang_data:
if slang == word:
counter += 1
return counter
def demoji(tokens):
"""
This function describes each emoji with a text that can be later used for vectorization and ML predictions
:param tokens:
:return:
"""
emoji_description = []
for token in tokens:
detect = emoji.demojize(token)
emoji_description.append(detect)
return emoji_description
def get_emotion(doc):
"""
This method used to detect emoji. All the emoji are separating in 4 categories anger, fear, joy, sadness and
all of the detected emoji add a value to their category.
:param doc: Array of string tokens
:return: anger, fear, joy, sadness average value
"""
emoji_emo = EmojiEmotionFeature()
emojies = demoji(doc)
emoji_emotion_values = []
for e in emojies:
if emoji_emo.is_emoji_name_like(e):
if emoji_emo.exists_emoji_name(e):
emotions = emoji_emo.emotions_of_emoji_named(e)
emoji_emotion_values.append(emotions)
if len(emoji_emotion_values) > 0:
anger_total = 0
fear_total = 0
joy_total = 0
sadness_total = 0
for emotions in emoji_emotion_values:
anger_total += emotions[0]
fear_total += emotions[1]
joy_total += emotions[2]
sadness_total += emotions[3]
anger = anger_total / len(emoji_emotion_values)
fear = fear_total / len(emoji_emotion_values)
joy = joy_total / len(emoji_emotion_values)
sadness = sadness_total / len(emoji_emotion_values)
return anger, fear, joy, sadness
return 0, 0, 0, 0
# function to count RTs, User mentions, hashtags, urls
def count_relics(text):
retweets = len(re.findall('RT', text))
user_mentions = len(re.findall('#USER#', text))
hashtags = len(re.findall('#HASHTAG#', text))
urls = len(re.findall('#URL#', text))
return retweets, user_mentions, hashtags, urls
# function to clean relics of dataset
def clean_relics(text):
text = re.sub(r"RT", "", text)
text = re.sub(r"#USER#", "", text)
text = re.sub(r"#HASHTAG#", "", text)
text = re.sub(r"#URL#", "", text)
return text
# function to count capitalized words (e.g. Apple, Woof, Dog but not APPLE etc)
def capitalized_count(text):
text = clean_relics(text)
t = re.findall('([A-Z][a-z]+)', text)
return len(t)
# function to count capitalized words (e.g. APPLE, WOOF)
def full_capitalized_count(text):
text = clean_relics(text)
t = re.findall('([A-Z][A-Z]+)', text)
return len(t)
# function to convert text to tf idf vector and pickle save vectorizer
def get_tfidf_vectors(df):
vectorizer = TfidfVectorizer(stop_words='english', max_features=1000, ngram_range=(1, 3), min_df=0.01, max_df=0.90)
vectors = vectorizer.fit_transform(df['text'])
pickle.dump(vectorizer, open("tfidf_fake_news.pkl", "wb")) # save tfidf vector
vectors_pd = pd.DataFrame(vectors.toarray())
df = df.drop(columns=['text'])
new_df = pd.concat([df, vectors_pd], axis=1)
return new_df
# function to load trained vectorizer and transform new text for predictions
def get_tfidf_vectors_from_pickle(df):
vectorizer = pickle.load(open("tfidf_fake_news.pkl", 'rb'))
vectors = vectorizer.transform(df['text'])
vectors_pd = pd.DataFrame(vectors.toarray())
df = df.drop(columns=['text'])
new_df = pd.concat([df, vectors_pd], axis=1)
return new_df
def get_features_for_gender(df):
df['avg_word_count'] = df['text'].str.split().str.len() / 300
df['slang_count'] = 0
df['emoji_count'] = 0
df['capitalized_count'] = 0
df['full_capitalized_count'] = 0
for i in range(0, len(df)):
df['slang_count'].iloc[i] = slang_count(df['text'].iloc[i])
df['emoji_count'].iloc[i] = emoji_count(df['text'].iloc[i])
df['capitalized_count'].iloc[i] = capitalized_count(df['text'].iloc[i])
df['full_capitalized_count'].iloc[i] = full_capitalized_count(df['text'].iloc[i])
return df[
['user_id', 'avg_word_count', 'emoji_count', 'slang_count', 'capitalized_count', 'full_capitalized_count']]
def get_readability_features(df):
df['avg_word_count'] = df['text'].str.split().str.len() / 300
df['slang_count'] = 0
df['emoji_count'] = 0
df['capitalized_count'] = 0
df['full_capitalized_count'] = 0
df['retweets_count'] = 0
df['user_mentions_count'] = 0
# df['hashtags_count'] = 0
# df['url_count'] = 0
for i in range(0, len(df)):
df['slang_count'].iloc[i] = slang_count(df['text'].iloc[i])
df['emoji_count'].iloc[i] = emoji_count(df['text'].iloc[i])
df['capitalized_count'].iloc[i] = capitalized_count(df['text'].iloc[i])
df['full_capitalized_count'].iloc[i] = full_capitalized_count(df['text'].iloc[i])
retweets, user_mentions, hashtags, urls = count_relics(df['text'].iloc[i])
df['retweets_count'].iloc[i] = retweets
df['user_mentions_count'].iloc[i] = user_mentions
# df['hashtags_count'].iloc[i] = hashtags
# df['url_count'].iloc[i] = urls
return df[
['user_id', 'avg_word_count', 'emoji_count', 'slang_count', 'capitalized_count', 'full_capitalized_count',
'retweets_count', 'user_mentions_count']]
def get_personality_features(df):
df['text'] = df['text'].apply(clean_relics)
users_with_personality = get_lang_based_scores(df) # returns a df with user_id and personality scores
# users_with_personality.to_csv("users_with_personality.csv", index=False)
return users_with_personality
def get_gender_features(df):
vectorizer = pickle.load(open("./gender/tfidf_gender.pkl", "rb"))
data_readability = get_readability_features(df).drop(['user_id', 'hashtags_count', 'user_mentions_count'], axis=1)
vectors = vectorizer.transform(df['text'])
# save sparse tfidf vectors to dataframe to use with other features
vectors_pd = pd.DataFrame(vectors.toarray())
features = pd.concat([vectors_pd, data_readability], axis=1)
# use scaler to scale our data to [0,1] range
x = features.values # returns a numpy array
scaler = MinMaxScaler()
x_scaled = scaler.fit_transform(x)
X = pd.DataFrame(x_scaled, columns=features.columns)
# load gender classifier
filename = 'classifiers/gender/Naive Bayes_0.708029197080292_final.sav'
clf = pickle.load(open(filename, 'rb'))
y = clf.predict(X)
# convert M and F to float representation
gender = {'M': 0, 'F': 1}
y = [gender[item] for item in y]
df['gender'] = y
return df[['user_id', 'gender']]
def get_sentiment_features(df):
vader = SentimentIntensityAnalyzer()
df['anger'] = 0
df['fear'] = 0
df['joy'] = 0
df['sadness'] = 0
df['negation'] = 0
df['vader_score'] = 0
df['textblob_score'] = 0
vader_score = []
textblob_score = []
for i in range(0, len(df)):
df['anger'].iloc[i], df['fear'].iloc[i], df['joy'].iloc[i], df['sadness'].iloc[i] \
= get_emotion(df['text'].iloc[i])
df['negation'].iloc[i] = negation(df['text'].iloc[i])
k = vader.polarity_scores(df['text'].iloc[i])
l = TextBlob(df['text'].iloc[i]).sentiment
vader_score.append(k)
textblob_score.append(l)
vader_compound_score = pd.DataFrame.from_dict(vader_score)
textblob_polarity_score = | pd.DataFrame.from_dict(textblob_score) | pandas.DataFrame.from_dict |
__all__ = ["spectrometer_sensitivity"]
# standard library
from typing import List, Union
# dependent packages
import numpy as np
import pandas as pd
from .atmosphere import eta_atm_func
from .instruments import eta_Al_ohmic_850, photon_NEP_kid, window_trans
from .physics import johnson_nyquist_psd, rad_trans, T_from_psd
from .physics import c, h, k
from .filter import eta_filter_lorentzian, eta_filter_csv, weighted_average
# type aliases
ArrayLike = Union[np.ndarray, List[float], List[int], float, int]
# main functions
def spectrometer_sensitivity(
filter_transmission_csv: str = "",
F: ArrayLike = 350.0e9,
R: float = 500.0,
F_res: int = 30,
overflow: int = 80,
pwv: float = 0.5,
EL: float = 60.0,
eta_M1_spill: ArrayLike = 0.99,
eta_M2_spill: ArrayLike = 0.90,
eta_wo_spill: ArrayLike = 0.99,
n_wo_mirrors: int = 4.0,
window_AR: bool = True,
eta_co: ArrayLike = 0.65,
eta_lens_antenna_rad: ArrayLike = 0.81,
eta_circuit: ArrayLike = 0.32,
eta_IBF: ArrayLike = 0.5,
KID_excess_noise_factor: float = 1.1,
theta_maj: ArrayLike = 22.0 * np.pi / 180.0 / 60.0 / 60.0,
theta_min: ArrayLike = 22.0 * np.pi / 180.0 / 60.0 / 60.0,
eta_mb: ArrayLike = 0.6,
telescope_diameter: float = 10.0,
Tb_cmb: ArrayLike = 2.725,
Tp_amb: ArrayLike = 273.0,
Tp_cabin: ArrayLike = 290.0,
Tp_co: ArrayLike = 4.0,
Tp_chip: ArrayLike = 0.12,
snr: float = 5.0,
obs_hours: float = 10.0,
on_source_fraction: float = 0.4 * 0.9,
on_off: bool = True,
):
"""Calculate the sensitivity of a spectrometer.
Parameters which are functions of frequency can be a vector (see Parameters).
Output is a pandas DataFrame which containts results of simulation (see Returns).
Parameters
----------
filter_transmission_csv
Optional. File location of a .csv file with transmission for filter channels
Header: Frequencies
rows: filter channels with transmission per column frequency
F
Used when filter_transmission_csv isn't used.
Frequency of the astronomical signal. Units: Hz.
R
Used when filter_transmission_csv isn't used.
Spectral resolving power in F/W_F where W_F is equivalent bandwidth and HWHM
of filters.
Units: None. See also: http://www.astrosurf.com/buil/us/spe2/hresol7.htm
F_res
Used when filter_transmission_csv isn't used.
The number of frequency bins within a FWHM
Units: none.
Overflow
Used when filter_transmission_csv isn't used.
The number of extra FHWM's below the first and above the last channel
Units: none.
pwv
Precipitable water vapour. Units: mm.
EL
Telescope elevation angle. Units: degrees.
eta_M1_spill
Spillover efficiency at the telescope primary mirror. Units: None.
eta_M2_spill
Spillover efficiency at the telescope secondary mirror. Units: None.
eta_wo_spill
Product of all spillover losses in the warm optics in the cabin. Units: None.
n_wo_mirrors
Number of cabin optics excluding telescope M1 and M2. Units: None.
window_AR
Whether the window is supposed to be coated by Ar (True) or not (False).
eta_co
Product of following. Units: None.
(1) Cold spillover.
(2) Cold ohmic losses.
(3) Filter transmission loss.
eta_lens_antenna_rad
The loss at chip temperature, *that is not in the circuit.*
Product of the following. Units: None.
(1) Front-to-back ratio of the lens-antenna on the chip (defalut: 0.93).
(2) Reflection efficiency at the surface of the lens (default: 0.9).
(3) Matching efficiency, due to the mismatch (default: 0.98).
(4) Spillover efficiency of the lens-antenna (default: 0.993).
These values can be found in D2_2V3.pdf, p14.
eta_circuit
The loss at chip temperature, *in the circuit.*. Units: None.
eta_IBF
Fraction of the filter power transmission that is within the filter
channel bandwidth. Units: None. The rest of the power is cross talk,
picking up power that is in the bands of neighboring channels.
This efficiency applies to the coupling to astronomical line signals.
This efficiency does not apply to the coupling to continuum,
including the the coupling to the atmosphere for calculating the NEP.
KID_excess_noise_factor
Need to be documented. Units: None.
theta_maj
The HPBW along the major axis, assuming a Gaussian beam. Units: radians.
theta_min
The HPBW along the minor axis, assuming a Gaussian beam. Units: radians.
eta_mb
Main beam efficiency. Units: None. Note that eta_mb includes
the following terms from D2_2V3.pdf from Shahab's report.
because a decrease in these will launch the beam to the sky
but not couple it to the point source (See also FAQ.).
(1) eta_Phi.
(2) eta_amp.
telescope_diameter
Diameter of the telescope. Units: m.
Tb_cmb
Brightness temperature of the CMB. Units: K.
Tp_amb
Physical temperature of the atmosphere and ambient environment
around the telescope. Units: K.
Tp_cabin
Physical temperature of the telescope cabin. Units: K.
Tp_co
Physical temperature of the cold optics inside the cryostat. Units: K.
Tp_chip
Physical temperature of the chip. Units: K.
snr
Target signal to noise to be reached (for calculating the MDLF). Units: None.
obs_hours
Observing hours, including off-source time and the slew overhead
between on- and off-source. Units: hours.
on_source_fraction
Fraction of the time on source (between 0. and 1.). Units: None.
on_off
If the observation involves on_off chopping, then the SNR degrades
by sqrt(2) because the signal difference includes the noise twice.
Returns
----------
F
Best-fit center frequencies from filter_transmission_csv.
Same as input if filter_transmission_csv isn't used. Units: Hz.
pwv
Same as input.
EL
Same as input
eta_atm
Atmospheric transmission within the FHWM of the channel. Units: None.
eta_atm_cont
Atmospheric transmission across the entire widht of the filter. Units: None.
R
best-fit F/FWHM fitted from filter_transmission_csv
Equivalent bandwidth within F/R if filter_transmission_csv isn't used.
Units: None
W_F_spec
Best-fit Equivalent bandwith within the FWHM from filter_transmission_csv
Equivalent bandwidth within F/R if filter_transmission_csv isn't used.
Units: Hz.
W_F_cont
Equivalent bandwidth of 1 channel including the power coupled
outside of the filter channel band. Units: Hz.
theta_maj
Same as input.
theta_min
Same as input.
eta_a
Aperture efficiency. Units: None.
See also: https://deshima.kibe.la/notes/324
eta_mb
Main beam efficiency. Units: None.
eta_forward
Forward efficiency within the FHWM of the channel. Units: None.
See also: https://deshima.kibe.la/notes/324
eta_forward_cont
Forward efficiency across the entire widht of the filter. Units: None.
See also: https://deshima.kibe.la/notes/324
eta_sw
Coupling efficiency from a spectral point source to the cryostat window. Units: None.
eta_sw_cont
Coupling efficiency from a continuum point source to the cryostat window. Units: None.
eta_window
Transmission of the cryostat window within the FHWM of the channel.
Units: None.
eta_window_cont
Transmission of the cryostat window across the entire widht of the filter.
Units: None.
eta_inst
Instrument optical efficiency within the FHWM of the channel. Units: None.
See also: https://arxiv.org/abs/1901.06934
eta_inst_cont
Instrument optical efficiency across the entire widht of the filter. Units: None.
See also: https://arxiv.org/abs/1901.06934
eta_circuit
Equivalent efficiency of Lorentzian fit from filter_transmission.csv.
Same as input if filter_transmission.csv isn't used. Units: None
Tb_sky
Planck brightness temperature of the sky. Units: K.
Tb_M1
Planck brightness temperature looking into the telescope primary. Units: K.
Tb_M2
Planck brightness temperature looking into the telescope secondary,
including the spillover to the cold sky. Units: K.
Tb_wo
Planck brightness temperature looking into the warm optics. Units: K.
Tb_window
Planck brightness temperature looking into the window. Units: K.
Tb_co
Planck brightness temperature looking into the cold optis. Units: K.
Tb_filter
Planck brightness temperature looking into the lens from the filter. Units: K.
Tb_KID
Planck brightness temperature looking into the filter from the KID. Units: K.
Pkid
Power absorbed by the KID. Units: W.
Pkid_sky
Power of the sky loading to the KID. Units: W
Pkid_warm
Power of the warm optics loading to the KID. Units: W
Pkid_cold
Power of the cold optics and circuit loading to the KID. Units: W
n_ph
Photon occupation number within the FHWM of the channel. Units: None.
See also: http://adsabs.harvard.edu/abs/1999ASPC..180..671R
n_ph_cont
Photon occupation number across the entire widht of the filter. Units: None.
See also: http://adsabs.harvard.edu/abs/1999ASPC..180..671R
NEPkid
Noise equivalent power at the KID with respect to the absorbed power.
Units: W Hz^0.5.
NEPinst
Instrumnet NEP within within the FHWM of the channel. Units: W Hz^0.5.
See also: https://arxiv.org/abs/1901.06934
NEPinst_cont
Instrumnet NEP across the entire widht of the filter. Units: W Hz^0.5.
See also: https://arxiv.org/abs/1901.06934
NEFD_line
Noise Equivalent Flux Density for couploing to a line that is not wider
than the filter bandwidth. Units: W/m^2/Hz * s^0.5.
NEFD_continuum
Noise Equivalent Flux Density for couploing to a countinuum source.
Units: W/m^2/Hz * s^0.5.
NEF
Noise Equivalent Flux within the FHWM of the channel. Units: W/m^2 * s^0.5.
NEF_cont
Noise Equivalent Flux across the entire widht of the filter. Units: W/m^2 * s^0.5.
MDLF
Minimum Detectable Line Flux. Units: W/m^2.
MS
Mapping Speed. Units: arcmin^2 mJy^-2 h^-1.
snr
Same as input.
obs_hours
Same as input.
on_source_fraction
Same as input.
on_source_hours
Observing hours on source. Units: hours.
equivalent_Trx
Equivalent receiver noise temperature within the FHWM of the channel. Units: K.
at the moment this assumes Rayleigh-Jeans!
equivalent_Trx_cont
Equivalent receiver noise temperature across the entire widht of the filter.
Units: K.
at the moment this assumes Rayleigh-Jeans!
chi_sq
The Chi Squared value of the Lorentzian fit from filter_transmission_csv
Zero when filter_transmission_csv is not used. Units: None.
Notes
-----
The parameters to calculate the window transmission / reflection
is hard-coded in the function window_trans().
"""
# Filter approximation or read from csv?
if filter_transmission_csv == "":
# Generate filter
(
eta_filter,
eta_inband,
F,
F_int,
W_F_int,
box_height,
box_width,
chi_sq,
) = eta_filter_lorentzian(F, F / R, eta_circuit, F_res, overflow)
R = F / box_width
else:
# Read from csv
(
eta_filter,
eta_inband,
F,
F_int,
W_F_int,
box_height,
box_width,
chi_sq,
) = eta_filter_csv(filter_transmission_csv)
# Equivalent Bandwidth of 1 channel, modelled as a box filter.
# Used for calculating loading and coupling to a continuum source
W_F_cont = box_width / eta_IBF
# Used for calculating coupling to a line source,
# with a linewidth not wider than the filter channel
W_F_spec = box_width
# Efficiency of filter channels
eta_circuit = box_height
# #############################################################
# 1. Calculating loading power absorbed by the KID, and the NEP
# #############################################################
# .......................................................
# Efficiencies for calculating sky coupling
# .......................................................
# Ohmic loss as a function of frequency, from skin effect scaling
eta_Al_ohmic = 1.0 - (1.0 - eta_Al_ohmic_850) * np.sqrt(F_int / 850.0e9)
eta_M1_ohmic = eta_Al_ohmic
eta_M2_ohmic = eta_Al_ohmic
# Collect efficiencies at the same temperature
eta_M1 = eta_M1_ohmic * eta_M1_spill
eta_wo = eta_Al_ohmic**n_wo_mirrors * eta_wo_spill
# Forward efficiency: does/should not include window loss
# because it is defined as how much power out of
# the crystat window couples to the cold sky.
eta_forward_spec = weighted_average(
eta_M1 * eta_M2_ohmic * eta_M2_spill * eta_wo + (1.0 - eta_M2_spill) * eta_wo,
eta_inband,
)
eta_forward_cont = weighted_average(
eta_M1 * eta_M2_ohmic * eta_M2_spill * eta_wo + (1.0 - eta_M2_spill) * eta_wo,
eta_filter,
)
# Calcuate eta at center of integration bin
eta_atm = eta_atm_func(F=F_int, pwv=pwv, EL=EL)
# Johnson-Nyquist Power Spectral Density (W/Hz)
# for the physical temperatures of each stage
psd_jn_cmb = johnson_nyquist_psd(F=F_int, T=Tb_cmb)
psd_jn_amb = johnson_nyquist_psd(F=F_int, T=Tp_amb)
psd_jn_cabin = johnson_nyquist_psd(F=F_int, T=Tp_cabin)
psd_jn_co = johnson_nyquist_psd(F=F_int, T=Tp_co)
psd_jn_chip = johnson_nyquist_psd(F=F_int, T=Tp_chip)
# Optical Chain
# Sequentially calculate the Power Spectral Density (W/Hz) at each stage.
# Uses only basic radiation transfer: rad_out = eta*rad_in + (1-eta)*medium
psd_sky = rad_trans(rad_in=psd_jn_cmb, medium=psd_jn_amb, eta=eta_atm)
psd_M1 = rad_trans(rad_in=psd_sky, medium=psd_jn_amb, eta=eta_M1)
psd_M2 = rad_trans(rad_in=psd_M1, medium=psd_jn_amb, eta=eta_M2_ohmic)
psd_M2_spill = rad_trans(rad_in=psd_M2, medium=psd_sky, eta=eta_M2_spill)
psd_wo = rad_trans(rad_in=psd_M2_spill, medium=psd_jn_cabin, eta=eta_wo)
[psd_window, eta_window] = window_trans(
F=F_int,
psd_in=psd_wo,
psd_cabin=psd_jn_cabin,
psd_co=psd_jn_co,
window_AR=window_AR,
)
psd_co = rad_trans(rad_in=psd_window, medium=psd_jn_co, eta=eta_co)
psd_filter = rad_trans(rad_in=psd_co, medium=psd_jn_chip, eta=eta_lens_antenna_rad)
# Instrument optical efficiency as in JATIS 2019
# (eta_inst can be calculated only after calculating eta_window)
eta_inst_spec = (
eta_lens_antenna_rad
* eta_co
* eta_circuit
* weighted_average(eta_window, eta_inband)
)
eta_inst_cont = (
eta_lens_antenna_rad
* eta_co
* eta_circuit
* weighted_average(eta_window, eta_filter)
)
# Calculating Sky loading, Warm loading and Cold loading individually for reference
# (Not required for calculating Pkid, but serves as a consistency check.)
# .................................................................................
# Sky loading
psd_KID_sky_1 = (
psd_sky
* eta_M1
* eta_M2_spill
* eta_M2_ohmic
* eta_wo
* eta_lens_antenna_rad
* eta_co
* eta_window
)
psd_KID_sky_2 = (
rad_trans(0, psd_sky, eta_M2_spill)
* eta_M2_ohmic
* eta_wo
* eta_lens_antenna_rad
* eta_co
* eta_window
)
psd_KID_sky = psd_KID_sky_1 + psd_KID_sky_2
skycoup = weighted_average(
psd_KID_sky / psd_sky, eta_filter
) # To compare with Jochem
# Warm loading
psd_KID_warm = (
window_trans(
F=F_int,
psd_in=rad_trans(
rad_trans(
rad_trans(
rad_trans(0, psd_jn_amb, eta_M1), 0, eta_M2_spill
), # sky spillover does not count for warm loading
psd_jn_amb,
eta_M2_ohmic,
),
psd_jn_cabin,
eta_wo,
),
psd_cabin=psd_jn_cabin,
psd_co=0,
window_AR=window_AR,
)[0]
* eta_co
* eta_lens_antenna_rad
)
# Cold loading
psd_KID_cold = rad_trans(
rad_trans(
window_trans(
F=F_int,
psd_in=0.0,
psd_cabin=0.0,
psd_co=psd_jn_co,
window_AR=window_AR,
)[0],
psd_jn_co,
eta_co,
),
psd_jn_chip,
eta_lens_antenna_rad,
)
# Loadig power absorbed by the KID
# .............................................
""" if np.all(psd_filter != psd_KID_sky + psd_KID_warm + psd_KID_cold):
print("WARNING: psd_filter != psd_KID_sky + psd_KID_warm + psd_KID_cold")
"""
Pkid = np.sum(psd_filter * W_F_int * eta_filter, axis=1)
Pkid_sky = np.sum(psd_KID_sky * W_F_int * eta_filter, axis=1)
Pkid_warm = np.sum(psd_KID_warm * W_F_int * eta_filter, axis=1)
Pkid_cold = np.sum(psd_KID_cold * W_F_int * eta_filter, axis=1)
# Photon + R(ecombination) NEP of the KID
# .............................................
n_ph_spec = weighted_average(psd_filter / (h * F_int), eta_inband) * eta_circuit
n_ph_cont = weighted_average(psd_filter / (h * F_int), eta_filter) * eta_circuit
NEPkid = (
photon_NEP_kid(F_int, psd_filter * W_F_int * eta_filter, W_F_int)
* KID_excess_noise_factor
)
# Instrument NEP as in JATIS 2019
# .............................................
NEPinst_spec = NEPkid / eta_inst_spec # Instrument NEP
NEPinst_cont = NEPkid / eta_inst_cont # Instrument NEP
# ##############################################################
# 2. Calculating source coupling and sensitivtiy (MDLF and NEFD)
# ##############################################################
# Efficiencies
# .........................................................
Ag = np.pi * (telescope_diameter / 2.0) ** 2.0 # Geometric area of the telescope
omega_mb = np.pi * theta_maj * theta_min / np.log(2) / 4 # Main beam solid angle
omega_a = omega_mb / eta_mb # beam solid angle
Ae = (c / F) ** 2 / omega_a # Effective Aperture (m^2): lambda^2 / omega_a
eta_a = Ae / Ag # Aperture efficiency
# Coupling from the "S"ource to outside of "W"indow
eta_pol = 0.5 # Instrument is single polarization
eta_atm_spec = weighted_average(eta_atm, eta_inband)
eta_atm_cont = weighted_average(eta_atm, eta_filter)
eta_sw_spec = (
eta_pol * eta_a * eta_forward_spec * eta_atm_spec
) # Source-Window coupling
eta_sw_cont = (
eta_pol * eta_a * eta_forward_cont * eta_atm_cont
) # Source-Window coupling
# NESP: Noise Equivalent Source Power (an intermediate quantitiy)
# .........................................................
NESP_spec = NEPinst_spec / eta_sw_spec # Noise equivalnet source power
NESP_cont = NEPinst_cont / eta_sw_cont # Noise equivalnet source power
# NEF: Noise Equivalent Flux (an intermediate quantitiy)
# .........................................................
# From this point, units change from Hz^-0.5 to t^0.5
# sqrt(2) is because NEP is defined for 0.5 s integration.
NEF_spec = NESP_spec / Ag / np.sqrt(2) # Noise equivalent flux
NEF_cont = NESP_cont / Ag / np.sqrt(2) # Noise equivalent flux
# If the observation is involves ON-OFF sky subtraction,
# Subtraction of two noisy sources results in sqrt(2) increase in noise.
if on_off:
NEF_spec = np.sqrt(2) * NEF_spec
NEF_cont = np.sqrt(2) * NEF_cont
# MDLF (Minimum Detectable Line Flux)
# .........................................................
# Note that eta_IBF does not matter for MDLF because it is flux.
MDLF = NEF_spec * snr / np.sqrt(obs_hours * on_source_fraction * 60.0 * 60.0)
# NEFD (Noise Equivalent Flux Density)
# .........................................................
continuum_NEFD = NEF_cont / W_F_cont
spectral_NEFD = NEF_spec / W_F_spec # = continuum_NEFD / eta_IBF > spectral_NEFD
# Mapping Speed (line, 1 channel) (arcmin^2 mJy^-2 h^-1)
# .........................................................
MS = (
60.0
* 60.0
* 1.0
* omega_mb
* (180.0 / np.pi * 60.0) ** 2.0
/ (np.sqrt(2) * spectral_NEFD * 1e29) ** 2.0
)
# Equivalent Trx
# .........................................................
Trx_spec = NEPinst_spec / k / np.sqrt(2 * W_F_cont) - T_from_psd(
F, weighted_average(psd_wo, eta_inband)
) # assumes RJ!
Trx_cont = NEPinst_spec / k / np.sqrt(2 * W_F_cont) - T_from_psd(
F, weighted_average(psd_wo, eta_filter)
) # assumes RJ!
# ############################################
# 3. Output results as Pandas DataFrame
# ############################################
result = pd.concat(
[
pd.Series(F, name="F"),
pd.Series(pwv, name="PWV"),
pd.Series(EL, name="EL"),
pd.Series(eta_atm_spec, name="eta_atm"),
pd.Series(eta_atm_cont, name="eta_atm_cont"),
pd.Series(R, name="R"),
pd.Series(W_F_spec, name="W_F_spec"),
pd.Series(W_F_cont, name="W_F_cont"),
pd.Series(theta_maj, name="theta_maj"),
pd.Series(theta_min, name="theta_min"),
pd.Series(eta_a, name="eta_a"),
pd.Series(eta_mb, name="eta_mb"),
pd.Series(eta_forward_spec, name="eta_forward"),
pd.Series(eta_forward_cont, name="eta_forward_cont"),
pd.Series(eta_sw_spec, name="eta_sw"),
pd.Series(eta_sw_cont, name="eta_sw_cont"),
pd.Series(weighted_average(eta_window, eta_inband), name="eta_window"),
pd.Series(weighted_average(eta_window, eta_filter), name="eta_window_cont"),
pd.Series(eta_inst_spec, name="eta_inst"),
pd.Series(eta_inst_cont, name="eta_inst_cont"),
pd.Series(eta_circuit, name="eta_circuit"),
pd.Series(
weighted_average(T_from_psd(F_int, psd_sky), eta_filter), name="Tb_sky"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_M1), eta_filter), name="Tb_M1"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_M2), eta_filter), name="Tb_M2"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_wo), eta_filter), name="Tb_wo"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_window), eta_filter),
name="Tb_window",
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_co), eta_filter), name="Tb_co"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_filter), eta_filter),
name="Tb_filter",
),
pd.Series(
T_from_psd(F, eta_circuit * weighted_average(psd_filter, eta_filter)),
name="Tb_KID",
),
pd.Series(Pkid, name="Pkid"),
pd.Series(Pkid_sky, name="Pkid_sky"),
pd.Series(Pkid_warm, name="Pkid_warm"),
| pd.Series(Pkid_cold, name="Pkid_cold") | pandas.Series |
from sklearn.covariance import EmpiricalCovariance, LedoitWolf, OAS
from sklearn.model_selection import GroupShuffleSplit
from scipy.spatial.distance import mahalanobis
import numpy as np
import pandas as pd
from multiprocessing import Pool
from contextlib import closing
from functools import partial
def similarity(X, y, group=None, n_splits = 1000, class_order=None,
return_raw=False, return_distance=False, normalize=False,
split_size=None, distance='mahalanobis',
cov_estimator='oas', cov_method='shared_split', n_jobs=1):
"""
Calculates similarity between points of each class.
Parameters
----------
X, y: arrays of same shape[0]
Respectively the features and labels.
group : array of shape equal to y, optional
Half of groups will make each split.
If None, y is used instead.
n_splits : int, default 100
How many times to repeat the separatioon and calculation of distances.
class_order : list, optional
Class ordering. If None, np.unique(y) is used
split_size : int, optional
size of each set on each split.
if None, half of the groups are used in each (floor rounded)
distance : {'mahalanobis', 'euclidean'}
How to measure distance between split means.
cov_estimator : {'oas', 'lw', 'ml'}
Which method will decide regularization strength
Ignored if distance is 'euclidean'
cov_method : {'shared_split','shared_single', 'class_single', 'class_split'}
shared_single - only one covariance for whole dataset
shared_split - one covariance, recalculated in each split
class_single - one covariance per class
class_split - one covariance per class per split
Ignored if distance is 'euclidean'
"""
assert cov_method in ['shared_split','shared_single', 'class_split', 'class_single']
assert y.shape[1]==1
y = y.ravel()
classes = np.unique(y) if class_order is None else class_order
groups = classes if group is None else np.unique(group)
split_size = len(groups)//2 if split_size is None else split_size
# sh = GroupShuffleSplit(n_splits, split_size, split_size)
if distance is 'mahalanobis':
clf = MahalanobisClassifier(classes=classes, estimator=cov_estimator,
shared_cov= ('shared' in cov_method),
assume_centered=False)
if 'split' not in cov_method:
clf.fit_cov(X, y)
elif distance is 'euclidean':
clf = EuclideanClassifier()
raise NotImplementedError
else:
raise NotImplementedError
with closing(Pool(n_jobs)) as p:
func = partial(one_split, clf=clf, split_size=split_size,
X=X, y=y, group=group, classes=classes,
cov_method=cov_method)
res = p.map(func, np.arange(n_splits))
results = | pd.concat(res) | pandas.concat |
# Copyright 2017-2021 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run: python3 -m unittest discover -s tests/ -p test_*.py -t . -v
import os
import unittest
from unittest.mock import patch
import glob
import pickle
import joblib
import inspect
import pandas as pd
import numpy as np
from moonshot import MoonshotML
from moonshot.cache import TMP_DIR
from moonshot.exceptions import MoonshotError
from sklearn.tree import DecisionTreeClassifier
class SKLearnMachineLearningTestCase(unittest.TestCase):
def setUp(self):
"""
Trains a scikit-learn model.
"""
self.model = DecisionTreeClassifier()
# Predict Y will be same as X
X = np.array([[1,1],[0,0]])
Y = np.array([1,0])
self.model.fit(X, Y)
self.pickle_path = "{0}/decision_tree_model.pkl".format(TMP_DIR)
self.joblib_path = "{0}/decision_tree_model.joblib".format(TMP_DIR)
def tearDown(self):
"""
Remove cached files.
"""
for file in glob.glob("{0}/moonshot*.pkl".format(TMP_DIR)):
os.remove(file)
for file in (self.pickle_path, self.joblib_path):
if os.path.exists(file):
os.remove(file)
def test_complain_if_mix_dataframe_and_series(self):
"""
Tests error handling when the features list contains a mix of
DataFrames and Series.
"""
# pickle model
with open(self.pickle_path, "wb") as f:
pickle.dump(self.model, f)
class DecisionTreeML1(MoonshotML):
MODEL = self.pickle_path
def prices_to_features(self, prices):
features = []
# DataFrame then Series
features.append(prices.loc["Close"] > 10)
features.append(prices.loc["Close"]["FI12345"] > 10)
return features, None
class DecisionTreeML2(MoonshotML):
MODEL = self.pickle_path
def prices_to_features(self, prices):
features = []
# Series then DataFrame
features.append(prices.loc["Close"]["FI12345"] > 10)
features.append(prices.loc["Close"] > 10)
return features, None
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
# Close
9,
11,
10.50,
9.99,
],
"FI23456": [
# Close
9.89,
11,
8.50,
10.50,
],
},
index=idx
)
prices.columns.name = "Sid"
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
"FI12345": [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
"FI23456": [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "Sid"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_prices", new=mock_get_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with self.assertRaises(MoonshotError) as cm:
results = DecisionTreeML1().backtest()
self.assertIn(
"features should be either all DataFrames or all Series, not a mix of both",
repr(cm.exception))
# clear cache
for file in glob.glob("{0}/moonshot*.pkl".format(TMP_DIR)):
os.remove(file)
with self.assertRaises(MoonshotError) as cm:
results = DecisionTreeML2().backtest()
self.assertIn(
"features should be either all DataFrames or all Series, not a mix of both",
repr(cm.exception))
def test_complain_if_no_targets(self):
"""
Tests error handling when prices_to_features doesn't return a two-tuple.
"""
# pickle model
with open(self.pickle_path, "wb") as f:
pickle.dump(self.model, f)
class DecisionTreeML(MoonshotML):
MODEL = self.pickle_path
def prices_to_features(self, prices):
features = []
features.append(prices.loc["Close"] > 10)
features.append(prices.loc["Close"] > 100)
return features
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
# Close
9,
11,
10.50,
9.99,
],
"FI23456": [
# Close
9.89,
11,
8.50,
10.50,
],
},
index=idx
)
prices.columns.name = "Sid"
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
"FI12345": [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
"FI23456": [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "Sid"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_prices", new=mock_get_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with self.assertRaises(MoonshotError) as cm:
results = DecisionTreeML().backtest()
self.assertIn(
"prices_to_features should return a tuple of (features, targets)", repr(cm.exception))
def test_backtest_from_pickle(self):
"""
Tests that the resulting DataFrames are correct after running a basic
machine learning strategy and loading the model from a pickle.
"""
# pickle model
with open(self.pickle_path, "wb") as f:
pickle.dump(self.model, f)
class DecisionTreeML(MoonshotML):
MODEL = self.pickle_path
def prices_to_features(self, prices):
features = {}
features["feature1"] = prices.loc["Close"] > 10
features["feature2"] = prices.loc["Close"] > 10 # silly, duplicate feature
return features, None
def predictions_to_signals(self, predictions, prices):
# Go long when price is predicted to be below 10
signals = predictions == 0
return signals.astype(int)
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
# Close
9,
11,
10.50,
9.99,
],
"FI23456": [
# Close
9.89,
11,
8.50,
10.50,
],
},
index=idx
)
prices.columns.name = "Sid"
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
"FI12345": [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
"FI23456": [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "Sid"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_prices", new=mock_get_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
results = DecisionTreeML().backtest()
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
# replace nan with "nan" to allow equality comparisons
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [1.0,
0.0,
0.0,
1.0],
"FI23456": [1.0,
0.0,
1.0,
0.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
0.0,
0.0,
1.0],
"FI23456": [0.5,
0.0,
1.0,
0.0]}
)
abs_weights = results.loc["AbsWeight"].reset_index()
abs_weights.loc[:, "Date"] = abs_weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
0.0,
0.0,
1.0],
"FI23456": [0.5,
0.0,
1.0,
0.0]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.0,
0.0],
"FI23456": ["nan",
0.5,
0.0,
1.0]}
)
abs_positions = results.loc["AbsExposure"].reset_index()
abs_positions.loc[:, "Date"] = abs_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.0,
0.0],
"FI23456": ["nan",
0.5,
0.0,
1.0]}
)
total_holdings = results.loc["TotalHoldings"].reset_index()
total_holdings.loc[:, "Date"] = total_holdings.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
total_holdings.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0,
1.0,
0,
0],
"FI23456": [0,
1.0,
0,
1.0]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.5,
0.0],
"FI23456": ["nan",
0.5,
0.5,
1.0]}
)
commissions = results.loc["Commission"].reset_index()
commissions.loc[:, "Date"] = commissions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
commissions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
-0.0227273, # (10.50 - 11)/11 * 0.5
-0.0],
"FI23456": [0.0,
0.0,
-0.1136364, # (8.50 - 11)/11 * 0.5
0.0]})
def test_backtest_from_joblib(self):
"""
Tests that the resulting DataFrames are correct after running a basic
machine learning strategy and loading the model from joblib.
"""
# save model
joblib.dump(self.model, self.joblib_path)
class DecisionTreeML(MoonshotML):
MODEL = self.joblib_path
def prices_to_features(self, prices):
features = {}
features["feature1"] = prices.loc["Close"] > 10
features["feature2"] = prices.loc["Close"] > 10 # silly, duplicate feature
return features, None
def predictions_to_signals(self, predictions, prices):
# Go long when price is predicted to be below 10
signals = predictions == 0
return signals.astype(int)
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
# Close
9,
11,
10.50,
9.99,
],
"FI23456": [
# Close
9.89,
11,
8.50,
10.50,
],
},
index=idx
)
prices.columns.name = "Sid"
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
"FI12345": [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
"FI23456": [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "Sid"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_prices", new=mock_get_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
results = DecisionTreeML().backtest()
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
# replace nan with "nan" to allow equality comparisons
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [1.0,
0.0,
0.0,
1.0],
"FI23456": [1.0,
0.0,
1.0,
0.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
0.0,
0.0,
1.0],
"FI23456": [0.5,
0.0,
1.0,
0.0]}
)
abs_weights = results.loc["AbsWeight"].reset_index()
abs_weights.loc[:, "Date"] = abs_weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
0.0,
0.0,
1.0],
"FI23456": [0.5,
0.0,
1.0,
0.0]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.0,
0.0],
"FI23456": ["nan",
0.5,
0.0,
1.0]}
)
abs_positions = results.loc["AbsExposure"].reset_index()
abs_positions.loc[:, "Date"] = abs_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.0,
0.0],
"FI23456": ["nan",
0.5,
0.0,
1.0]}
)
total_holdings = results.loc["TotalHoldings"].reset_index()
total_holdings.loc[:, "Date"] = total_holdings.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
total_holdings.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0,
1.0,
0,
0],
"FI23456": [0,
1.0,
0,
1.0]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.5,
0.0],
"FI23456": ["nan",
0.5,
0.5,
1.0]}
)
commissions = results.loc["Commission"].reset_index()
commissions.loc[:, "Date"] = commissions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
commissions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
-0.0227273, # (10.50 - 11)/11 * 0.5
-0.0],
"FI23456": [0.0,
0.0,
-0.1136364, # (8.50 - 11)/11 * 0.5
0.0]})
def test_predict_proba(self):
"""
Tests that the resulting DataFrames are correct after running a basic
machine learning strategy and using predict_proba instead of predict.
"""
# save model
joblib.dump(self.model, self.joblib_path)
class DecisionTreeML(MoonshotML):
MODEL = self.joblib_path
def prices_to_features(self, prices):
self.model.predict = self.model.predict_proba
features = {}
features["feature1"] = prices.loc["Close"] > 10
features["feature2"] = prices.loc["Close"] > 10 # silly, duplicate feature
return features, None
def predictions_to_signals(self, predictions, prices):
# Go long when <50% probability of being in class 1 (class 1 = price > 10)
signals = predictions < 0.5
return signals.astype(int)
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
# Close
9,
11,
10.50,
9.99,
],
"FI23456": [
# Close
9.89,
11,
8.50,
10.50,
],
},
index=idx
)
prices.columns.name = "Sid"
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
"FI12345": [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
"FI23456": [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "Sid"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_prices", new=mock_get_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
results = DecisionTreeML().backtest()
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
# replace nan with "nan" to allow equality comparisons
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [1.0,
0.0,
0.0,
1.0],
"FI23456": [1.0,
0.0,
1.0,
0.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
0.0,
0.0,
1.0],
"FI23456": [0.5,
0.0,
1.0,
0.0]}
)
abs_weights = results.loc["AbsWeight"].reset_index()
abs_weights.loc[:, "Date"] = abs_weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
0.0,
0.0,
1.0],
"FI23456": [0.5,
0.0,
1.0,
0.0]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.0,
0.0],
"FI23456": ["nan",
0.5,
0.0,
1.0]}
)
abs_positions = results.loc["AbsExposure"].reset_index()
abs_positions.loc[:, "Date"] = abs_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.0,
0.0],
"FI23456": ["nan",
0.5,
0.0,
1.0]}
)
total_holdings = results.loc["TotalHoldings"].reset_index()
total_holdings.loc[:, "Date"] = total_holdings.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
total_holdings.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0,
1.0,
0,
0],
"FI23456": [0,
1.0,
0,
1.0]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.5,
0.0],
"FI23456": ["nan",
0.5,
0.5,
1.0]}
)
commissions = results.loc["Commission"].reset_index()
commissions.loc[:, "Date"] = commissions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
commissions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
-0.0227273, # (10.50 - 11)/11 * 0.5
-0.0],
"FI23456": [0.0,
0.0,
-0.1136364, # (8.50 - 11)/11 * 0.5
0.0]})
def test_backtest_pass_model(self):
"""
Tests that the resulting DataFrames are correct after running a basic
machine learning strategy and passing the model directly.
"""
class DecisionTreeML(MoonshotML):
MODEL = "nosuchpath.pkl" # should be ignored
def prices_to_features(self, prices):
features = {}
features["feature1"] = prices.loc["Close"] > 10
features["feature2"] = prices.loc["Close"] > 10 # silly, duplicate feature
return features, None
def predictions_to_signals(self, predictions, prices):
# Go long when price is predicted to be below 10
signals = predictions == 0
return signals.astype(int)
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close"]
idx = | pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"]) | pandas.MultiIndex.from_product |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 11 19:07:28 2018
@author: deadpool
"""
import pandas as pd
from sklearn.manifold import Isomap
from scipy import misc
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import os
plt.style.use('ggplot')
folder = 'Datasets/ALOI/32'
samples = []
colors = []
for imgname in os.listdir(folder):
img = misc.imread(os.path.join(folder, imgname))
samples.append((img/255.0).reshape(-1))
colors.append('b')
folder += 'i'
for imgname in os.listdir(folder):
img = misc.imread(os.path.join(folder, imgname))
samples.append((img/255.0).reshape(-1))
colors.append('r')
df = | pd.DataFrame(samples) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
module for implementation of indicator class, which is designed as MinIn for systems with netvalues
"""
import pandas as pd
from pyecharts.globals import CurrentConfig, NotebookType
from pyecharts import options as opts
from pyecharts.charts import Kline, Line, Bar, Grid
from pyecharts.commons.utils import JsCode
from xalpha.cons import line_opts, opendate, yesterdayobj, sqrt_days_in_year
def _upcount(ls):
"""
count the ratio of upmove days by given a list
"""
count = 0
for i in range(len(ls) - 1):
# somehow after pandas 0.23(22?), the input is a series(dataframe?) and old list supporting syntax are illegal
if ls.iloc[i + 1] > ls.iloc[i]:
count += 1
return count / (len(ls) - 1)
class indicator:
"""
MixIn class provide quant indicator tool box which is desinged as interface for mulfix class as well
as info class, who are both treated as a single fund with price table of net value.
Most of the quant indexes, their name conventions, definitions and calculations are from
`joinquant <https://www.joinquant.com/help/api/help?name=api#%E9%A3%8E%E9%99%A9%E6%8C%87%E6%A0%87>`_.
Make sure first run obj.bcmkset() before you want to use functions in this class.
"""
def bcmkset(self, infoobj, start=None, riskfree=0.0371724, name="基金组合"):
"""
Once you want to utilize the indicator tool box for analysis, first run bcmkset function to set
the benchmark, otherwise most of the functions would raise error.
:param infoobj: info obj, whose netvalue are used as benchmark
:param start: datetime obj, indicating the starting date of all analysis.
Note if use default start, there may be problems for some fundinfo obj, as lots of
funds lack netvalues of several days from our API, resulting unequal length between
benchmarks and fund net values.
:param riskfree: float, annual rate in the unit of 100%, strongly suggest make this value
consistent with the interest parameter when instanciate cashinfo() class
"""
self._pricegenerate(name)
if start is None:
self.start = self.price.iloc[0].date
elif isinstance(start, str):
self.start = pd.to_datetime(
start, format="%Y-%m-%d"
) # pd.Timestamp.strptime(start, "%Y-%m-%d")
self.benchmark = infoobj
self.riskfree = riskfree
self.bmprice = self.benchmark.price[self.benchmark.price["date"] >= self.start]
self.price = self.price[self.price["date"] >= self.start]
self.bmprice = self.bmprice[self.bmprice["date"].isin(self.price["date"])]
self.price = self.price[self.price["date"].isin(self.bmprice["date"])]
# the price data is removed from the infoobj before start date
def _pricegenerate(self, name):
"""
generate price table for mulfix class, the cinfo class has this attr by default
"""
if getattr(self, "price", None) is None: # 基金组合类,而非基金信息类
times = pd.date_range(self.totcftable.iloc[0].date, yesterdayobj())
netvalue = []
for date in times:
netvalue.append(self.unitvalue(date)) # may take a long time
self.price = pd.DataFrame(data={"date": times, "netvalue": netvalue})
self.price = self.price[self.price["date"].isin(opendate)]
self.name = name
def comparison(self, date=yesterdayobj()):
"""
:returns: tuple of two pd.Dataframe, the first is for aim and the second if for the benchmark index
all netvalues are normalized and set equal 1.00 on the self.start date
"""
partp = self.price[self.price["date"] <= date]
partm = self.bmprice[self.bmprice["date"] <= date]
normp = partp.iloc[0].netvalue
normm = partm.iloc[0].netvalue
partp["netvalue"] = partp["netvalue"] / normp
partm["netvalue"] = partm["netvalue"] / normm
return (partp, partm)
def total_return(self, date=yesterdayobj()):
return round(
(
self.price[self.price["date"] <= date].iloc[-1].netvalue
- self.price.iloc[0].netvalue
)
/ self.price.iloc[0].netvalue,
4,
)
@staticmethod
def annualized_returns(price, start, date=yesterdayobj()):
"""
:param price: price table of info().price
:param start: datetime obj for starting date of calculation
:param date: datetime obj for ending date of calculation
:returns: float, annualized returns of the price table
"""
datediff = (price[price["date"] <= date].iloc[-1].date - start).days
totreturn = (
price[price["date"] <= date].iloc[-1].netvalue - price.iloc[0].netvalue
) / price.iloc[0].netvalue
return round((1 + totreturn) ** (365 / datediff) - 1, 4)
def total_annualized_returns(self, date=yesterdayobj()):
return indicator.annualized_returns(self.price, self.start, date)
def benchmark_annualized_returns(self, date=yesterdayobj()):
return indicator.annualized_returns(self.bmprice, self.start, date)
def pct_chg(self, freq="Y", benchmark=True):
"""
年度,月,周涨幅统计
:param freq: str, default Y, could be M or W or anything pd.date_range accepts
:return: pd.DataFrame with columns date and pct_chg
"""
if getattr(self, "bmprice", None) is None:
benchmark = False
ydf = pd.merge_asof(
pd.DataFrame(
pd.date_range(
self.price["date"].iloc[0], self.price["date"].iloc[-1], freq=freq
),
columns=["date"],
),
self.price,
)
ydf["pct_chg"] = ydf["netvalue"].pct_change()
if benchmark:
ydf = pd.merge_asof(ydf, self.bmprice, on="date", suffixes=["", "_bc"])
ydf["pct_chg_benchmark"] = ydf["netvalue_bc"].pct_change()
ydf["pct_chg_difference"] = ydf["pct_chg"] - ydf["pct_chg_benchmark"]
return ydf[["date", "pct_chg", "pct_chg_benchmark", "pct_chg_difference"]]
return ydf[["date", "pct_chg"]]
def beta(self, date=yesterdayobj()):
bcmk = indicator.ratedaily(self.bmprice, date)
bt = indicator.ratedaily(self.price, date)
df = | pd.DataFrame(data={"bcmk": bcmk, "bt": bt}) | pandas.DataFrame |
import ast
import json
import os
from io import BytesIO
from sys import argv
import cv2
import numpy as np
import pandas as pd
import requests
from PIL import Image
def get_box_centers_all_emotions(sample_row):
"""
input: A dictionary of the format
{
emotion1: [{geometry_box1: [list of 4 corner coordinates],
geometry_box1: [list of 4 corner coordinates],
...}]
emotion2: [...],
...
emotionN: [...]
}
output: A dictionary of the format
{
emotion1: [(center1 coordinates), (center1 coordinates), ...],
emotion2: [...],
...
emotionN: [...]
}
"""
lab = json.loads(sample_row)
centers = {}
for key, val in lab.items():
lab_df = pd.DataFrame(val)
centers[key] = lab_df.iloc[:, 0].apply(lambda s: get_box_center(s)).tolist()
return centers
def get_box_center(box_row):
"""
input: A list of 4 coordinates, representing the four corners of the bounding box.
Each coordinate is a dictionary of {'x': xcoord, 'y': ycoord}
output: A tuple representing the coordinates of the center of the bounding box, and its width and height.
"""
x_coords = []
y_coords = []
for item in box_row:
x_coords.append(item['x'])
y_coords.append(item['y'])
x_center = (np.max(x_coords) - np.min(x_coords)) / 2 + np.min(x_coords)
y_center = (np.max(y_coords) - np.min(y_coords)) / 2 + np.min(y_coords)
box_width = np.max(x_coords) - np.min(x_coords)
box_height = np.max(y_coords) - np.min(y_coords)
return x_center, y_center, box_width, box_height
def get_width_height_labelbox(sample_row):
response = requests.get(sample_row)
if response.status_code == 200: # !!!
img = Image.open(BytesIO(response.content))
return img.width, img.height
else:
return 0, 0
def get_width_height_local(file_name, image_folder_path):
file_path = os.path.join(image_folder_path, file_name)
img_width, img_height = 0, 0
img = cv2.imread(file_path)
if img is not None:
img_height, img_width, channels = img.shape
print('returned local dimensions for {}: ({}, {})'.format(file_name, img_width, img_height))
else:
print('could not load image for {}'.format(file_name))
return img_width, img_height
def get_yolo_formats(emotion_dict, total_width, total_height):
"""
Input: a dictionary of
{emotion: [
(box_x_center, box_y_center, box_width, box_height),
(box_x_center, box_y_center, box_width, box_height), ...]
}
"""
yolo_formats_to_write = []
if total_width == 0 or total_height == 0:
print('have 0 height or width. skipping file')
return yolo_formats_to_write
for emotion, boxes in emotion_dict.items():
for box in boxes:
if emotion == 'happy':
class_id = 0
elif emotion == 'neutral':
class_id = 1
elif emotion == 'surprised':
class_id = 2
elif emotion == 'sad':
class_id = 3
elif emotion == 'angry':
class_id = 4
else:
continue
box_x_center, box_y_center, box_width, box_height = box[0], box[1], box[2], box[3]
yolo_formats_to_write.append(' '.join([str(class_id),
str(box_x_center / total_width),
str(box_y_center / total_height),
str(box_width / total_width), str(box_height / total_height)]))
return yolo_formats_to_write
if __name__ == '__main__':
script_name, path_to_coords, image_folder_path = argv
# Read raw data into memory
raw = | pd.read_csv(path_to_coords) | pandas.read_csv |
import numpy as np
import pandas as pd
from bach import Series, DataFrame
from bach.operations.cut import CutOperation, QCutOperation
from sql_models.util import quote_identifier
from tests.functional.bach.test_data_and_utils import assert_equals_data
PD_TESTING_SETTINGS = {
'check_dtype': False,
'check_exact': False,
'atol': 1e-3,
}
def compare_boundaries(expected: pd.Series, result: Series) -> None:
for exp, res in zip(expected.to_numpy(), result.to_numpy()):
if not isinstance(exp, pd.Interval):
assert res is None or np.isnan(res)
continue
np.testing.assert_almost_equal(exp.left, float(res.left), decimal=2)
np.testing.assert_almost_equal(exp.right, float(res.right), decimal=2)
if exp.closed_left:
assert res.closed_left
if exp.closed_right:
assert res.closed_right
def test_cut_operation_pandas(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = pd.cut(p_series, bins=10)
result = CutOperation(series=series, bins=10)()
compare_boundaries(expected, result)
expected_wo_right = pd.cut(p_series, bins=10, right=False)
result_wo_right = CutOperation(series, bins=10, right=False)()
compare_boundaries(expected_wo_right, result_wo_right)
def test_cut_operation_bach(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
ranges = [
pd.Interval(0, 9.9, closed='both'),
pd.Interval(9.9, 19.8, closed='right'),
pd.Interval(19.8, 29.7, closed='right'),
pd.Interval(29.7, 39.6, closed='right'),
| pd.Interval(39.6, 49.5, closed='right') | pandas.Interval |
'''This script runs a couple of loops to find the best-performing combinations of
symptoms for predicting COVID. It makes heavy use of the multiprocessing
module, and the second loop--triggered by the RUN_META parameter--is best run
on a scientific workstation or HPC cluster.
'''
import numpy as np
import pandas as pd
import xlsxwriter
import itertools
import time
from multiprocessing import Pool
from sklearn.metrics import f1_score
import tools
import multi
# Whether to calculate statistics for any-of-n combinations
RUN_SINGLE = False
# Whether to calculate statistics for combinations of combinations (metacombos)
RUN_META = True
# Whether to omit the 13 sero+/PCR- contacts from the primary analysis
OMIT_DISC = True
# Whether to write results to Excel workbooks
EXCEL = True
# Whether this is running on Windows
WINDOWS = True
# Reading in the data
if WINDOWS:
file_dir = WINDOWS_FILE_DIR
else:
file_dir = UNIX_FILE_DIR
records = pd.read_csv(file_dir + 'records.csv')
# Optionally removing folks who are sero+ but PCR-
if OMIT_DISC:
no_disc = np.where([not (records.pcr_pos[i] == 0 and
records.sero_pos[i] == 1)
for i in range(records.shape[0])])[0]
records = records.iloc[no_disc, :]
# List of symptom names and case definitions
symptom_list = [
'wheeze', 'throat', 'sob', 'nausea', 'myalgia', 'headache',
'fatigue', 'discomf', 'diarrhea', 'cough', 'chestpain',
'abdpain', 'fever_chills', 'nasal_combo', 'tastesmell_combo'
]
# Pinning down the inputs and targets
X = np.array(records[symptom_list], dtype=np.uint8)
y = np.array(records.pcr_pos, dtype=np.uint8)
# Organizing by age group
kids = np.where(records.age_adult == 0)[0]
adults = np.where(records.age_adult == 1)[0]
# Making separate inputs and targets for kids, adults, and everyone
X_list = [X, X[adults], X[kids]]
y_list = [y, y[adults], y[kids]]
# Combining the two lists into a single list of tuples for easier looping
groups = [(X_list[i], y_list[i]) for i in range(len(X_list))]
group_idx = [np.array(list(range(X.shape[0]))), adults, kids]
group_names = ['all', 'adults', 'kids']
# Calculating performance for the any-of-n (single) and m-of-n [and/or]
# m-of-n (meta) combinations
p = Pool()
# Setting the maximum combination size
c_min = 1
c_max = 5
c_list = list(range(c_min, c_max+1))
# Generating the combos
n_symps = range(len(symptom_list))
combos = [[(list(group), k)
for group in itertools.combinations(n_symps, k)]
for k in list(range(c_min, c_max+1))]
combos = tools.flatten(combos)
nums = [combo[1] for combo in combos]
col_combos = [combo[0] for combo in combos]
# Running the combo comparisons for the different age groups
cstat_list = []
cy_list = []
cnames_list = [[symptom_list[n] for n in combo] for combo in col_combos]
if RUN_SINGLE:
for i, tup in enumerate(groups):
# Pulling out the features and targets for each stratum
ftrs = tup[0]
tgts = tup[1]
# Getting the performance stats for each combo
combo_input = [ftrs[:, cols] for cols in col_combos]
combo_y = p.map(tools.rowsums, combo_input)
combo_stats = pd.concat(p.starmap(tools.clf_metrics,
[(tgts, preds) for preds in combo_y]),
axis=0)
combo_stats['rule'] = cnames_list
combo_stats['combo_size'] = nums
cstat_list.append(combo_stats)
cy_list.append(combo_y)
# Writing the combo stats to csv
if EXCEL:
writer = | pd.ExcelWriter(file_dir + 'combo_stats.xlsx') | pandas.ExcelWriter |
# The published output of this file currently lives here:
# http://share.streamlit.io/0.23.0-2EMF1/index.html?id=8hMSF5ZV3Wmbg5sA3UH3gW
import keras
import math
import numpy as np
import pandas as pd
import streamlit as st
from scipy.sparse.linalg import svds
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from streamlit.Chart import Chart
interactive_mode = False
rating_cols = ['user_id', 'item_id', 'rating', 'timestamp']
movie_cols = ['movie_id','movie_title','release_date', 'video_release_date','IMDb_URL','unknown','Action','Adventure','Animation','Childrens','Comedy','Crime','Documentary','Drama','Fantasy','Film-Noir','Horror','Musical','Mystery','Romance ','Sci-Fi','Thriller','War' ,'Western']
user_cols = ['user_id','age','gender','occupation','zip_code']
users = pd.read_csv('../data/ml-100k/u.user', sep='|', names=user_cols, encoding='latin-1')
movies = pd.read_csv('../data/ml-100k/u.item', sep='|', names=movie_cols, encoding='latin-1')
ratings = pd.read_csv('../data/ml-100k/u.data', sep='\t', names=rating_cols, encoding='latin-1')
ratings = ratings.drop(['timestamp'], axis=1)
n_users, n_movies = len(ratings.user_id.unique()), len(ratings.item_id.unique())
st.title('Iterating on our recommendation system')
st.write("""
In `week2_rec_v0.py`, we put together a very basic recommendation system, that
we now want to improve. We identified that the main problem with our current
approach is that the ratings matrix is very sparse. Only 7% of the matrix is
filled. Can we fix this?
In order to construct a denser matrix, we need to predict a user's ratings for
movies they haven't seen. We try two different techniques for doing this - both
of which use low-dimensional representations of the ratings matrix to predict
the missing values. In both cases, we are basically learning an embedding for
movies and users, and then in order to predict a rating for a specific
(user, movie) pair - we take the dot product between the two.
1. The first technique uses SciPy’s singular value decomposition (SVD), and
is inspired by [<NAME>’s work](https://cambridgespark.com/content/tutorials/implementing-your-own-recommender-systems-in-Python/index.html).
2. The second approach uses matrix factorization in Keras (with an Adam
optimizer). This technique is inspired by [<NAME>’s work](https://nipunbatra.github.io/blog/2017/recommend-keras.html),
and has outperformed other ML algorithms for predicting ratings.
""")
st.header('A Tale of 2 Prediction Techniques')
st.subheader('Preparing the train & test data')
st.write("""
First we need to split our data into a training and testing. We also capture the
true ratings for test data. We will use this later when we want to measure the
error of our predictions.
""")
if interactive_mode:
st.info("""
1. Uncomment the next section to see how we split the data into training and
testing datasets.
""")
# # -----------------------------------------------------------------------------
x_train, x_test = train_test_split(ratings, test_size=0.2)
y_true = x_test.rating
st.write('x_train:')
st.write(x_train)
st.write('x_test:')
st.write(x_test)
st.write("""
You'll notice that every time we rerun our code, `x_train` and `x_test` are
recalculated (and thus different).
""")
if interactive_mode:
st.info("""
2a. To help us reason about our code later on,
let's cache the result so that it stays static as we iterate on this code.
Comment this section. Uncomment the next.
""")
# # -----------------------------------------------------------------------------
st.write("""
In Streamlit, you can cache a function by adding `@st.cache` right above it.
This tells Streamlit to not recompute the result *unless* something changes (e.g.
the input to the function or the function body). Here we are using it to keep
our training and testing datasets fixed as we iterate on our code, but @st.cache
is also a great way to speed up your code!
""")
with st.echo():
@st.cache
def split(ratings):
return train_test_split(ratings, test_size=0.2)
x_train, x_test = split(ratings)
y_true = x_test.rating
st.write("""
If you ever need to clear the cache (e.g. to force a rerun of a function), run
this in your terminal:
``` streamlit clear_cache ```
""")
if interactive_mode:
st.info("2. Uncomment the next section to learn about SVD.")
# # -----------------------------------------------------------------------------
st.subheader('SVD')
st.write("""
Now that we've split our data, let's try out SVD. First, we convert `ratings`
into a matrix (`n_users` x `n_movies`). SVD then factorizes this matrix into
3 matrices (`u`, `s`, `vt`). Taking their dot product produces a fully
filled matrix of predicted ratings.
We can then use this matrix to make predictions about our test cases in
`x_test`.
""")
with st.echo():
@st.cache
def convert_to_matrix(r):
matrix = np.zeros((n_users, n_movies))
for line in r.itertuples():
matrix[line[1]-1, line[2]-1] = line[3]
return matrix
@st.cache
def svds_filled_matrix(train_data_matrix):
u, s, vt = svds(train_data_matrix, k = 20)
return np.dot(np.dot(u, np.diag(s)), vt)
@st.cache
def svds_predictions(x_train, x_test):
train_data_matrix = convert_to_matrix(x_train)
filled = svds_filled_matrix(train_data_matrix)
return x_test.apply(lambda row : np.round(filled[row['user_id']-1, row['item_id']-1], 0), axis=1)
svds_preds = svds_predictions(x_test, x_test)
st.write('*SVDS Predictions for x_test*')
st.write(svds_preds)
if interactive_mode:
st.info("3. Uncomment the next section to see how we did.")
# # -----------------------------------------------------------------------------
st.write("Well, how did we do? Let's measure the mean squared error.")
st.write('**MSE for SVD**: %s' % mean_squared_error(y_true, svds_preds))
if interactive_mode:
st.info("4. Uncomment the next section to see how we do this with Keras.")
# # -----------------------------------------------------------------------------
st.subheader('Keras with Adam Optimizer')
st.write("""
Let's do the same thing, but this time using Keras, with the Adam Optimizer.
(Warning this takes a while so you'll have to wait...)
""")
with st.echo():
@st.cache
def adam_predictions(x_train, x_test):
n_latent_factors = 3
movie_input = keras.layers.Input(shape=[1],name='Item')
movie_embedding = keras.layers.Embedding(n_movies + 1, n_latent_factors, name='Movie-Embedding')(movie_input)
movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)
user_input = keras.layers.Input(shape=[1],name='User')
user_embedding = keras.layers.Embedding(n_users + 1, n_latent_factors,name='User-Embedding')(user_input)
user_vec = keras.layers.Flatten(name='FlattenUsers')(user_embedding)
prod = keras.layers.dot([movie_vec, user_vec], axes = 1)
model = keras.Model([user_input, movie_input], prod)
model.compile('adam', 'mean_squared_error', metrics=["accuracy", "mae", "mse"])
num_epochs = 10
model.fit([x_train.user_id, x_train.item_id], x_train.rating, validation_data=([x_test.user_id, x_test.item_id], x_test.rating) ,epochs=num_epochs, verbose=0)
return np.round(model.predict([x_test.user_id, x_test.item_id]),0), model
adam_preds, model = adam_predictions(x_train, x_test)
st.write('*Keras Adam Predictions for x_test*')
st.write(adam_preds)
st.write('**MSE for Keras Adam Prediction**: %s' % mean_squared_error(y_true, adam_preds))
st.write("""
Awesome. This does much better than the SVD algorithm above. But, wow, it's so
much slower! `@st.cache` helps, but what if we want to iterate on our model?
Perhaps we want to understand how the model is doing epoch-by-epoch? Do we
really need 10 epochs? Is it too many? Do we need more? Or maybe
we want to try out different values for `n_latent_factors`?
""")
if interactive_mode:
st.info("""
5a. To see how we can sprinkle a few lines of streamlit into the Keras callbacks to
get more insight into the training process, comment this section and uncomment
the next.
""")
# # -----------------------------------------------------------------------------
st.subheader('Keras with Adam Optimizer')
st.write("""
Here we have the same code as before for training the model, but we add some
callbacks to track the mean squared error through the training process, and
to look at a few sample predictions our model makes at the end of each epoch.
We pass this into the `model.fit()` function. Here is our callback code:
""")
with st.echo():
class MyCallback(keras.callbacks.Callback):
def __init__(self, x_test, num_epochs):
self._num_epochs = num_epochs
self._sample_tests = x_test[0:10]
def on_train_begin(self, logs=None):
st.header('Progress')
self._summary_chart = self._create_chart('area', 300)
st.header('Percentage Complete')
self._progress = st.empty()
self._progress.progress(0)
st.header('Current Epoch')
self._epoch_header = st.empty()
st.header('A Few Tests')
self._sample_test_results = st.empty()
self._sample_test_results.dataframe(self._sample_tests)
def on_epoch_begin(self, epoch, logs=None):
self._epoch = epoch
self._epoch_header.text(f'Epoch in progress: {epoch}')
def on_batch_end(self, batch, logs=None):
rows = pd.DataFrame([[logs['mean_squared_error']]],
columns=['mean_squared_error'])
if batch % 100 == 99:
self._summary_chart.add_rows(rows)
batch_percent = logs['batch'] * logs['size'] / self.params['samples']
percent = self._epoch / self._num_epochs + (batch_percent / self._num_epochs)
self._progress.progress(math.ceil(percent * 100))
def on_epoch_end(self, epoch, logs=None):
t = self._sample_tests
prediction = np.round(self.model.predict([t.user_id, t.item_id]),0)
self._sample_tests[f'epoch {epoch}'] = prediction
self._sample_test_results.dataframe(self._sample_tests)
def _create_chart(self, type='line', height=0):
empty_data = pd.DataFrame(columns=['mean_squared_error'])
epoch_chart = Chart(empty_data, f'{type}_chart', height=height)
epoch_chart.y_axis(type='number', orientation='right',
y_axis_id="mse_axis", allow_data_overflow="true")
epoch_chart.cartesian_grid(stroke_dasharray='3 3')
epoch_chart.legend()
getattr(epoch_chart, type)(type='monotone', data_key='mean_squared_error',
stroke='#82ca9d', fill='#82ca9d',
dot="false", y_axis_id='mse_axis')
return st.DeltaConnection.get_connection().get_delta_generator()._native_chart(epoch_chart)
#TODO: would be cool for the sample_test results to be visualized better than just a table
@st.cache
def adam_predictions_with_monitoring(x_train, x_test):
n_latent_factors = 3
movie_input = keras.layers.Input(shape=[1],name='Item')
movie_embedding = keras.layers.Embedding(n_movies + 1, n_latent_factors, name='Movie-Embedding')(movie_input)
movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)
user_input = keras.layers.Input(shape=[1],name='User')
user_embedding = keras.layers.Embedding(n_users + 1, n_latent_factors,name='User-Embedding')(user_input)
user_vec = keras.layers.Flatten(name='FlattenUsers')(user_embedding)
prod = keras.layers.dot([movie_vec, user_vec], axes = 1)
model = keras.Model([user_input, movie_input], prod)
model.compile('adam', 'mean_squared_error', metrics=["accuracy", "mae", "mse"])
num_epochs = 10
model.fit([x_train.user_id, x_train.item_id], x_train.rating, validation_data=([x_test.user_id, x_test.item_id], x_test.rating),epochs=num_epochs, verbose=0, callbacks=[MyCallback(x_test, num_epochs)])
return np.round(model.predict([x_test.user_id, x_test.item_id]),0), model
adam_preds, model = adam_predictions_with_monitoring(x_train, x_test)
st.write("""
Beautiful! These live charts will also be really helpful in Week 4 when we
try running our code on a dataset that's *200X* larger!
For now, we've determined that the Keras approach achieves a significantly
lower error, so let's proceed with filling the matrix with this algorithm.
""")
if interactive_mode:
st.info("5. To proceed, uncomment the next section.")
# # -----------------------------------------------------------------------------
st.subheader('Filling the Matrix')
st.write("""
We now fill the matrix with predictions using our model.
""")
with st.echo():
@st.cache
def filled_matrix():
n_movies = np.arange(1,1683,1)
n_users = np.arange(1,944,1)
user_movie_matrixA = np.repeat(n_users, len(n_movies))
user_movie_matrixB = np.tile(n_movies, len(n_users))
user_movie_matrix = np.array([user_movie_matrixA,user_movie_matrixB])
st.write('Starting matrix fill process ... ')
all_rating = model.predict([user_movie_matrixA[::],user_movie_matrixB[::]])
st.write('Finished.')
df_users = pd.DataFrame(user_movie_matrixA)
df_movies = pd.DataFrame(user_movie_matrixB)
df_ratings = | pd.DataFrame(all_rating) | pandas.DataFrame |
import random
from os import path
import pandas as pd
from IPython.core.display import display, clear_output
from ipywidgets import widgets, Button
from SmartAnno.gui.PreviousNextWidgets import PreviousNext
from SmartAnno.gui.Workflow import Step
import xml.etree.ElementTree as ET
from dateutil.parser import parse
class ReadFiles(PreviousNext):
"""Display a progress bar to show files (set up by DirChooser) importing process"""
def __init__(self, name=str(Step.global_id + 1), show_previous=True, show_next=True):
super().__init__(name, show_previous, show_next)
self.next_button.disabled = True
self.dataset_name = ''
self.data = None
self.references = None
self.resetParameters()
self.start_import_btn = Button(description="Start Import")
self.progressbar = widgets.IntProgress(min=0, max=1, value=0, layout=widgets.Layout(width='50%'))
self.sample_num = widgets.Text(
value='',
placeholder='',
description='Number of files to sample',
disabled=False, style={'description_width': 'initial'}
)
pass
def resetParameters(self):
self.data = | pd.DataFrame(columns=['BUNCH_ID', 'DOC_NAME', 'TEXT', 'DATE', 'REF_DATE']) | pandas.DataFrame |
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
import numpy as np
import plotly.graph_objs as go
import plotly.tools as tools
from dash.dependencies import Input, Output, State
from dateutil.parser import parse
import squarify
import math
from datetime import datetime
from bisect import bisect_left
import grasia_dash_components as gdc
####################################################
### DASH SETUP CODE ###
####################################################
# Setup Dash's default CSS stylesheet
#external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# Setup the Dash application
#app = dash.Dash(__name__, external_stylesheets=external_stylesheets, static_folder='static')
app = dash.Dash(__name__, static_folder='static')
app.title = 'Bitcoin Booms and Busts'
server = app.server
####################################################
### DESCENTRALIZED VIZ CODE ###
####################################################
tm_width = 100
tm_height = 100
tm_x = 0
tm_y = 0
# color pallette for the viz
cList = ['lightcyan', 'lightblue', 'deepskyblue', 'dodgerblue', 'steelblue',
'midnightblue']
table = [['Name', 'Estimated Balance'],
['Planktons', '0 to 1 Bitcoin'],
['Clowfishes', '1 to 10 Bitcoins'],
['Lionfishes', '10 to 100 Bitcoins'],
['Swordfishes', '100 to 1000 Bitcoins'],
['Sharks', '1000 to 10000 Bitcoins'],
['Whales', 'More than 10000 Bitcoins']]
df_table = pd.DataFrame(table)
df_table.columns = df_table.iloc[0]
df_table = df_table[1:]
df_val_per_month = pd.read_csv('change_bins_values_per_month.csv')
df_val_per_month.fillna(0, inplace=True)
df_val_per_month.loc[:,"_0_to_1":"More_10000"] = df_val_per_month.loc[:,"_0_to_1":"More_10000"].div(df_val_per_month.sum(axis=1), axis=0) * 100
df_val_per_month.columns = ["Month", "Planktons", "Clownfishes",
"Lionfishes", "Swordfishes", "Sharks", "Whales"]
df_val_per_month = df_val_per_month.sort_values(by='Month')
df_val_per_month.Month = | pd.to_datetime(df_val_per_month.Month) | pandas.to_datetime |
import unittest
import pandas as pd
import argopandas.path as path
class TestPath(unittest.TestCase):
def test_path_info(self):
info = path.info('R2901633_052.nc')
self.assertEqual(list(path.info(['R2901633_052.nc'])), [info])
self.assertIsInstance(path.info(pd.Series(['R2901633_052.nc'])), pd.DataFrame)
self.assertIsInstance(
path.info(pd.DataFrame({'file': ['R2901633_052.nc']})),
pd.DataFrame
)
def test_path_prof(self):
info = path.info('R2901633_052.nc')
self.assertEqual(info['type'], 'prof')
self.assertEqual(info['float'], 2901633)
self.assertEqual(info['cycle'], 52)
self.assertEqual(info['data_mode'], 'R')
self.assertIsNone(info['modifier'])
self.assertIsNone(info['descending'])
self.assertIsNone(info['aux'])
def test_path_not_prof(self):
info = path.info('2900313_Rtraj.nc')
self.assertEqual(info['type'], 'traj')
self.assertEqual(info['float'], 2900313)
self.assertEqual(info['data_mode'], 'R')
self.assertIsNone(info['modifier'])
self.assertIsNone(info['aux'])
def test_path_none(self):
info = path.info('not anything really')
self.assertIsNone(info['type'])
def test_search(self):
self.assertTrue(path.is_descending('R2901633_052D.nc'))
self.assertEqual([path.is_descending('a')], list(path.is_descending(['a'])))
self.assertIsInstance(path.is_descending(pd.Series(['a'])), pd.Series)
self.assertIsInstance(path.is_descending( | pd.DataFrame({'file': ['a']}) | pandas.DataFrame |
"""
TODO Pendletoon, doc this whole module
"""
import logging
import pandas as pd
import capture.devconfig as config
from utils.data_handling import update_sheet_column
from utils import globals
from utils.globals import lab_safeget
modlog = logging.getLogger('capture.prepare.interface')
def _get_reagent_header_cells(column: str):
"""Get all cells in the rows that start each reagent for a given colum
:param column: (str) in {A, B, ..., Z, AA, AB, ...}
"""
startrow = lab_safeget(config.lab_vars, globals.get_lab(), 'reagent_interface_amount_startrow')
reagent_interface_step = int(lab_safeget(config.lab_vars, globals.get_lab(), 'maxreagentchemicals')) + 1
num_reagents = lab_safeget(config.lab_vars, globals.get_lab(), 'max_reagents')
stoprow = startrow + reagent_interface_step * num_reagents
result = [column + str(i) for i in range(startrow, stoprow, reagent_interface_step)]
return result
def get_reagent_target_volumes(erdf, deadvolume):
"""Target volumes for reagent preparation as dictionary"""
reagent_target_volumes = {}
for reagent in erdf.columns:
reagent_volume = erdf[reagent].sum() + deadvolume
reagentname = reagent.split(' ')[0]
reagent_target_volumes[reagentname] = reagent_volume
return reagent_target_volumes
def build_nominals_df(rdict,
chemicalnamedf,
target_final_volume,
liquidlist,
maxreagentchemicals,
chemdf):
''' calculate the mass of each chemical return dataframe
TODO: write out nominal molarity to google sheets, see issue#52
:param chemdf: Chemical data frame from google drive.
:returns: a dataframe sized for export to version 2.x interface
'''
nominalsdf = pd.DataFrame()
itemcount = 1
chemicalnamedf.sort_index(inplace=True)
for index, row in chemicalnamedf.iterrows():
reagentname = row['reagentnames']
chemabbr = row['chemabbr']
if row['chemabbr'] == 'Final Volume = ':
formulavollist = []
formulavol = 'null'
itemcount = 1
finalvolindex = index
pass
else:
# stock solutions should be summed for final total volume
if chemabbr in liquidlist or chemabbr == 'FAH': # todo dejank
formulavol = (target_final_volume[reagentname]/1000).round(2)
formulavollist.append(formulavol)
nominalsdf.loc[index, "nominal_amount"] = formulavol
nominalsdf.loc[index, "Unit"] = 'milliliter'
itemcount+=1
elif chemabbr == 'null':
nominalsdf.loc[index, "nominal_amount"] = 'null'
nominalsdf.loc[index, "Unit"] = 'null'
nominalsdf.loc[index, "actualsnull"] = 'null'
itemcount+=1
pass
else:
#calculate reagent amounts from formula
reagentnum = str(reagentname.split('t')[1])
nominalamount = (target_final_volume[reagentname]/1000/1000 * \
rdict[reagentnum].concs['conc_item%s' %(itemcount)] * \
float(chemdf.loc["%s" %chemabbr, "Molecular Weight (g/mol)"])
).round(2)
nominalsdf.loc[index, "nominal_amount"] = nominalamount
nominalsdf.loc[index, "Unit"] = 'gram'
itemcount+=1
if itemcount == (maxreagentchemicals+1):
if len(formulavollist) > 0:
nominalsdf.loc[finalvolindex, "nominal_amount"] = sum(formulavollist)
nominalsdf.loc[finalvolindex, "Unit"] = 'milliliter'
else:
nominalsdf.loc[finalvolindex, "nominal_amount"] = formulavol
nominalsdf.loc[finalvolindex, "Unit"] = 'null'
nominalsdf.loc[finalvolindex, "actualsnull"] = 'null'
modlog.info((reagentname, "formula calculation complete"))
nominalsdf.sort_index(inplace=True)
return nominalsdf
def build_nominals_v1(rdict,
chemicalnamedf,
target_final_volume_dict,
liquidlist,
maxreagentchemicals,
chemdf):
''' calculate the mass of each chemical return dataframe
Uses model 1 of the density calculation to get a better approximation
for the contribution of solids to the final volume
TODO: write out nominal molarity to google sheets, see issue#52
TODO: ensure column integrity of read in chemical dataframe
:param chemdf: Chemical data frame from google drive.
:returns: a dataframe sized for export to version 2.x interface
'''
nominalsdf = | pd.DataFrame() | pandas.DataFrame |
import os
from os.path import join, isfile
import subprocess
import json
import pandas as pd
from abc import ABC, abstractmethod
from typing import List, Dict, Tuple, Optional, Union, Any
import random
from nerblackbox.modules.utils.util_functions import get_dataset_path
from nerblackbox.modules.utils.env_variable import env_variable
from nerblackbox.modules.datasets.formatter.util_functions import get_ner_tag_mapping
from nerblackbox.modules.datasets.analyzer import Analyzer
SEED_SHUFFLE = {
"train": 4,
"val": 5,
"test": 6,
}
SENTENCES_ROWS_PRETOKENIZED = List[List[List[str]]]
SENTENCES_ROWS_UNPRETOKENIZED = List[Dict[str, Any]]
SENTENCES_ROWS = Union[SENTENCES_ROWS_PRETOKENIZED, SENTENCES_ROWS_UNPRETOKENIZED]
class BaseFormatter(ABC):
def __init__(
self, ner_dataset: str, ner_tag_list: List[str], ner_dataset_subset: str = ""
):
"""
Args:
ner_dataset: 'swedish_ner_corpus' or 'suc'
ner_tag_list: e.g. ['PER', 'LOC', ..]
ner_dataset_subset: e.g. 'original_cased'
"""
self.ner_dataset: str = ner_dataset
self.ner_tag_list: List[str] = ner_tag_list
self.dataset_path: str = get_dataset_path(ner_dataset, ner_dataset_subset)
self.file_name: Dict[str, str] = {}
self.analyzer = Analyzer(self.ner_dataset, self.ner_tag_list, self.dataset_path)
####################################################################################################################
# ABSTRACT BASE METHODS
####################################################################################################################
@abstractmethod
def get_data(self, verbose: bool) -> None: # pragma: no cover
"""
I: get data
Args:
verbose: [bool]
"""
pass
@abstractmethod
def create_ner_tag_mapping(self) -> Dict[str, str]: # pragma: no cover
"""
II: customize ner_training tag mapping if wanted
Returns:
ner_tag_mapping: [dict] w/ keys = tags in original data, values = tags in formatted data
"""
pass
@abstractmethod
def format_data(
self, shuffle: bool = True, write_csv: bool = True
) -> Optional[SENTENCES_ROWS]: # pragma: no cover
"""
III: format data
Args:
shuffle: whether to shuffle rows of dataset
write_csv: whether to write dataset to csv (should always be True except for testing)
"""
pass
def set_original_file_paths(self) -> None: # pragma: no cover
"""
III: format data
Changed Attributes:
file_paths: [Dict[str, str]], e.g. {'train': <path_to_train_csv>, 'val': ..}
Returns: -
"""
pass
@abstractmethod
def _parse_row(self, _row: str) -> List[str]: # pragma: no cover
"""
III: format data
Args:
_row: e.g. "Det PER X B"
Returns:
_row_list: e.g. ["Det", "PER", "X", "B"]
"""
pass
def _format_original_file(
self, _row_list: List[str]
) -> Optional[List[str]]: # pragma: no cover
"""
III: format data
Args:
_row_list: e.g. ["test", "PER", "X", "B"]
Returns:
_row_list_formatted: e.g. ["test", "B-PER"]
"""
pass
@abstractmethod
def resplit_data(
self, val_fraction: float, write_csv: bool
) -> Optional[Tuple[pd.DataFrame, ...]]: # pragma: no cover
"""
IV: resplit data
Args:
val_fraction: [float], e.g. 0.3
write_csv: whether to write dataset to csv (should always be True except for testing)
"""
pass
####################################################################################################################
# BASE METHODS
####################################################################################################################
def create_directory(self) -> None: # pragma: no cover
"""
0: create directory for dataset
"""
directory_path = join(self.dataset_path, "analyze_data")
os.makedirs(directory_path, exist_ok=True)
bash_cmd = (
f'echo "*" > {env_variable("DIR_DATASETS")}/{self.ner_dataset}/.gitignore'
)
try:
subprocess.run(bash_cmd, shell=True, check=True)
except subprocess.CalledProcessError as e:
print(e)
def create_ner_tag_mapping_json(self, modify: bool) -> None: # pragma: no cover
"""
II: create customized ner_training tag mapping to map tags in original data to tags in formatted data
Args:
modify: [bool], if True: modify tags as specified in method modify_ner_tag_mapping()
Returns: -
"""
if modify:
ner_tag_mapping = self.create_ner_tag_mapping()
else:
ner_tag_mapping = dict()
json_path = join(self.dataset_path, "ner_tag_mapping.json")
with open(json_path, "w") as f:
json.dump(ner_tag_mapping, f)
print(f"> dumped the following dict to {json_path}:")
print(ner_tag_mapping)
####################################################################################################################
# HELPER: READ ORIGINAL
####################################################################################################################
def _read_original_file(self, phase: str) -> SENTENCES_ROWS_PRETOKENIZED:
"""
III: format data
Args:
phase: 'train', 'val', 'test'
Returns:
sentences_rows: e.g. (-pretokenized-)
[
[['Inger', 'PER'], ['säger', '0'], .., []],
[['Det', '0'], .., []]
]
"""
self.set_original_file_paths()
file_path_original = join(self.dataset_path, self.file_name[phase])
_sentences_rows = list()
if isfile(file_path_original):
_sentence = list()
with open(file_path_original) as f:
for row in f.readlines():
row_list = self._parse_row(row)
if len(row_list) > 0:
row_list_formatted = self._format_original_file(row_list)
if row_list_formatted is not None:
_sentence.append(row_list_formatted)
else:
if len(_sentence):
_sentences_rows.append(_sentence)
_sentence = list()
print(f"\n> read {file_path_original}")
else: # pragma: no cover
raise Exception(f"ERROR! could not find file {file_path_original}!")
return _sentences_rows
####################################################################################################################
# HELPER: WRITE FORMATTED
####################################################################################################################
def _write_formatted_csv(
self, phase: str, sentences_rows: SENTENCES_ROWS_PRETOKENIZED
) -> None: # pragma: no cover
"""
III: format data
Args:
phase: 'train', 'val', 'test'
sentences_rows: e.g. (-pretokenized-)
[
[['Inger', 'PER'], ['säger', '0'], .., []],
[['Det', '0'], .., []]
]
Returns: -
"""
sentences_rows_formatted = self._format_sentences_rows(sentences_rows)
df = pd.DataFrame(sentences_rows_formatted)
file_path = join(self.dataset_path, f"{phase}_formatted.csv")
df.to_csv(file_path, sep="\t", header=False, index=False)
print(f"> phase = {phase}: wrote {len(df)} sentences to {file_path}")
def _write_formatted_jsonl(
self, phase: str, sentences_rows: SENTENCES_ROWS_UNPRETOKENIZED
) -> None: # pragma: no cover
"""
save to jsonl file
Args:
phase: 'train', 'val', 'test'
sentences_rows: e.g. (-unpretokenized-)
[
{
'text': 'Inger säger ..',
'tags': [{'token': 'Inger', 'tag': 'PER', 'char_start': 0, 'char_end': 5}, ..],
},
{
'text': 'Det ..',
'tags': [{..}, ..]
}
]
Returns: -
"""
file_path = join(self.dataset_path, f"{phase}_formatted.jsonl")
with open(file_path, "w") as file:
for sentence_row in sentences_rows:
file.write(json.dumps(sentence_row, ensure_ascii=False) + "\n")
print(
f"> phase = {phase}: wrote {len(sentences_rows)} sentences to {file_path}"
)
def _format_sentences_rows(
self, sentences_rows: SENTENCES_ROWS_PRETOKENIZED
) -> List[Tuple[str, str]]:
"""
III: format data
Args:
sentences_rows: e.g. (-pretokenized-)
[
[['Inger', 'PER'], ['säger', '0'], .., []],
[['Det', '0'], .., []]
]
Returns:
sentences_rows_formatted, e.g. (-pretokenized-)
[
('PER O', 'Inger säger'),
('O', 'Det'),
]
"""
# ner tag mapping
ner_tag_mapping = get_ner_tag_mapping(
path=join(self.dataset_path, "ner_tag_mapping.json")
)
# processing
sentences_rows_formatted = list()
for sentence in sentences_rows:
text_list = list()
tags_list = list()
for row in sentence:
assert (
len(row) == 2
), f"ERROR! row with length = {len(row)} found (should be 2): {row}"
text_list.append(row[0])
tags_list.append(
ner_tag_mapping(row[1]) if row[1] != "0" else "O"
) # replace zeros by capital O (!)
sentences_rows_formatted.append((" ".join(tags_list), " ".join(text_list)))
return sentences_rows_formatted
@staticmethod
def _convert_iob1_to_iob2(
sentences_rows_iob1: SENTENCES_ROWS_PRETOKENIZED,
) -> SENTENCES_ROWS_PRETOKENIZED:
"""
III: format data
convert tags from IOB1 to IOB2 format
Args:
sentences_rows_iob1: e.g. (-pretokenized-)
[
[['Inger', 'I-PER'], ['säger', '0'], .., []],
]
Returns:
sentences_rows_iob2: e.g. (-pretokenized-)
[
[['Inger', 'B-PER'], ['säger', '0'], .., []],
]
"""
sentences_rows_iob2 = list()
for sentence in sentences_rows_iob1:
sentence_iob2 = list()
for i, row in enumerate(sentence):
assert (
len(row) == 2
), f"ERROR! row = {row} should have length 0 or 2, not {len(row)}"
current_tag = row[1]
if (
current_tag == "O"
or "-" not in current_tag
or current_tag.startswith("B-")
):
sentence_iob2.append(row)
elif current_tag.startswith("I-"):
previous_tag = (
sentence[i - 1][1]
if (i > 0 and len(sentence[i - 1]) == 2)
else None
)
if previous_tag not in [
current_tag,
current_tag.replace("I-", "B-"),
]:
tag_iob2 = current_tag.replace("I-", "B-")
sentence_iob2.append([row[0], tag_iob2])
else:
sentence_iob2.append(row)
sentences_rows_iob2.append(sentence_iob2)
return sentences_rows_iob2
@staticmethod
def _shuffle_dataset(_phase: str, _sentences_rows: List[Any]) -> List[Any]:
"""
III: format data
Args:
_phase: "train", "val", "test"
_sentences_rows: e.g. (-pretokenized-)
[
[['Inger', 'PER'], ['säger', '0'], .., []],
[['Det', '0'], .., []]
]
e.g. (-unpretokenized-)
[
{
'text': 'Inger säger ..',
'tags': [{'token': 'Inger', 'tag': 'PER', 'char_start': 0, 'char_end': 5}, ..],
},
{
'text': 'Det ..',
'tags': [{..}, ..]
}
]
Returns:
_sentences_rows_shuffled: e.g. (-pretokenized-)
[
[['Det', '0'], .., [0],
[['Inger', 'PER'], ['säger', '0'], .., []]
]
e.g. (-unpretokenized-)
[
{
'text': 'Det ..',
'tags': [{..}, ..]
},
{
'text': 'Inger säger ..',
'tags': [{'token': 'Inger', 'tag': 'PER', 'char_start': 0, 'char_end': 5}, ..],
}
]
"""
# change _sentences_rows by shuffling sentences
random.Random(SEED_SHUFFLE[_phase]).shuffle(_sentences_rows)
return _sentences_rows
####################################################################################################################
# HELPER: READ FORMATTED
####################################################################################################################
def _read_formatted_csvs(self, phases: List[str]) -> pd.DataFrame:
"""
IV: resplit data
Args:
phases: .. to read formatted csvs from, e.g. ['val', 'test']
Returns:
df_phases: contains formatted csvs of phases
"""
df_phases = [self._read_formatted_csv(phase) for phase in phases]
return pd.concat(df_phases, ignore_index=True)
def _read_formatted_csv(self, phase: str):
"""
IV: resplit data
Args:
phase: .. to read formatted df from, e.g. 'val'
Returns:
df: formatted df
"""
formatted_file_path = join(self.dataset_path, f"{phase}_formatted.csv")
return | pd.read_csv(formatted_file_path, sep="\t", header=None) | pandas.read_csv |
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.backends.backend_pdf import PdfPages
import math
import time
from tqdm import tqdm
import os
import glob
import fnmatch
from src.data.config import site, dates, folders, fountain, surface
dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
start = time.time()
if __name__ == '__main__':
path = folders["data"]
all_files = glob.glob(
os.path.join(path, "TOA5__Flux_Notes*.dat"))
all_files_B = glob.glob(
os.path.join(path, "TOA5__FluxB_Notes*.dat"))
li = []
li_B = []
for filename in all_files:
df_in = pd.read_csv(filename, header=1)
df_in = df_in[2:].reset_index(drop=True)
li.append(df_in)
for filename in all_files_B:
df_inB = pd.read_csv(filename, header=1)
df_inB = df_inB[2:].reset_index(drop=True)
df_inB = df_inB.drop(["RECORD"], axis=1)
li_B.append(df_inB)
df_A = pd.concat(li, axis=0, ignore_index=True)
df_B = | pd.concat(li_B, axis=0, ignore_index=True) | pandas.concat |
import os
import pandas as pd
UPLOAD_FOLDER = 'files'
INPUT_FILENAME = 'ab.xlsx'
OUTPUT_FILENAME = 'c.xlsx'
input_file_path = os.path.join(UPLOAD_FOLDER, INPUT_FILENAME)
output_file_path = os.path.join(UPLOAD_FOLDER, OUTPUT_FILENAME)
def test():
# read values from input file
df_ab = | pd.read_excel(input_file_path) | pandas.read_excel |
# -*- coding: UTF-8 -*-
# **********************************************************************************#
# File: Report file
# **********************************************************************************#
import pandas as pd
from copy import deepcopy, copy
from . risk_metrics import *
from .. utils.datetime_utils import get_trading_days
from .. universe.universe import UniverseService
from .. utils.pandas_utils import smart_concat
from .. instrument.asset_service import AssetType
from .. account import StockAccount, FuturesAccount, OTCFundAccount, IndexAccount
class StocksReport(object):
"""
股票回测记录,包含如下属性
* self.std_keys:标准输出变量名称
* self.rec_keys:从account中记录的变量名称
* self.sup_keys:通过observe增加的变量名称
* self.tradeDate:记录日期列表
* self.blotter:记录交易指令列表
* self.cash:记录现金列表
* self.security_position:记录证券头寸列表
* self.portfolio_value:记录投资组合价值
* self.benchmark_return:记录参照标准收益率列表
* self.blotter: 记录指令簿列表
* self.buy_value: 记录总买入价值列表
* self.sell_value: 记录总卖出价值列表
* self.initial_value: 记录账户初始值
* self.len_universe: 记录回测总证券池数量
"""
def __init__(self, data, sim_params, universe_service):
"""
初始化,输入必须是在同一套参数下定义的
Args:
data (data): 数据行情
sim_params (SimulationParameters): SimulationParameters实例
universe_service (Universe): UniverseService实例
Examples:
>> report = StockReport(data, sim_params, universe)
"""
self.std_keys = ['tradeDate', 'cash', 'security_position',
'portfolio_value', 'benchmark_return', 'blotter']
self.rec_keys = ['current_date', 'position', 'benchmark', 'blotter']
self.sup_keys = []
self.tradeDate = []
self.cash = []
self.security_position = []
self.portfolio_value = []
self.benchmark_return = []
self.blotter = []
self.buy_value = []
self.sell_value = []
self.initial_value = sim_params.portfolio.cash
trading_days = get_trading_days(sim_params.start, sim_params.end)
for s, a in sim_params.portfolio.secpos.items():
if len(trading_days) > 0:
p = data['preClosePrice'].at[trading_days[0].strftime('%Y-%m-%d'), s]
self.initial_value += p * a
assert isinstance(universe_service, UniverseService)
self.len_universe = len(universe_service.view(with_init_universe=True))
def _update_trade_value(self, account, data):
"""
根据回测的具体情况更新某个交易日的和持仓相关的报告数据
Args:
account (Account): 账户对象实例
data (data): 缓存数据
"""
initial_position = dict([(k, {'amount': v}) for (k, v) in account.sim_params.security_base.iteritems()])
yesterday_positions = self.security_position[-2] if len(self.security_position) > 1 else initial_position
today_positions = self.security_position[-1]
for s, v in yesterday_positions.iteritems():
q_change = today_positions.get(s, {'amount': 0})['amount'] - v['amount']
if q_change > 0:
self.buy_value.append(q_change * data.at[s, 'openPrice'])
else:
self.sell_value.append(- q_change * data.at[s, 'openPrice'])
for s, v in today_positions.iteritems():
if s not in yesterday_positions:
self.buy_value.append(v['amount'] * data.at[s, 'openPrice'])
def update(self, context, account_name):
"""
更新相应指标的数据
Args:
context (context): Environment 对象
account_name (str): 账户名称
"""
data = context.registered_accounts[account_name].broker.daily_data[context.current_date.strftime('%Y-%m-%d')]
record = context.registered_accounts[account_name].to_record()
self.tradeDate.append(record["current_date"])
self.blotter.append(record["blotter"])
self.cash.append(record["position"].cash)
self.portfolio_value.append(record["position"].evaluate(data))
self.security_position.append(record["position"].show())
major_benchmark = context.sim_params.major_benchmark
self.benchmark_return.append(
data.at[major_benchmark, 'closePrice'] / data.at[major_benchmark, 'preClosePrice'] - 1
if data.at[major_benchmark, 'preClosePrice'] != 0.0 else 0.0)
self._update_trade_value(context, data)
for k in record:
if k not in (self.std_keys + self.sup_keys + self.rec_keys):
self.sup_keys.append(k)
setattr(self, k, [])
for k in self.sup_keys:
getattr(self, k).append(record[k])
def output(self):
"""
输出成pandas.DataFrame格式
Returns:
DataFrame: 回测记录
"""
output_dict = {k: getattr(self, k) for k in (self.std_keys + self.sup_keys)}
output_frame = pd.DataFrame(output_dict).loc[:, (self.std_keys + self.sup_keys)]
output_frame.index = output_frame.tradeDate
return output_frame
class FuturesReport(object):
"""
期货回测记录,包含如下属性
* self.trade_date : 记录日期列表
* self.futures_blotter : 记录交易指令列表
* self.futures_cash : 记录现金列表
* self.future_position : 记录期货持仓
* self.futures_position_detail : 记录期货持仓明细
* self.portfolio_value : 记录投资组合价值
"""
def __init__(self, initial_value=None):
# 输出的字段
self.keys = ['trade_date', 'futures_cash', 'futures_position', 'futures_blotter',
'portfolio_value']
self.trade_date = []
self.futures_blotter = []
self.futures_cash = []
self.futures_position = []
self.futures_position_detail = []
self.portfolio_value = []
self.trades = []
self.benchmark_return = []
self.initial_value = initial_value if initial_value else 10000000
def output(self):
"""
输出成pandas.DataFrame格式
Returns:
DataFrame: 回测记录
"""
for key, value in enumerate(self.portfolio_value):
if value <= 0:
break
try:
if key < len(self.portfolio_value) - 1:
self.portfolio_value = self.portfolio_value[:key] + [0]*(len(self.portfolio_value)-key)
self.futures_cash = self.futures_cash[:key] + [0]*(len(self.futures_cash)-key)
self.futures_blotter = self.futures_blotter[:key] + [[]]*(len(self.futures_blotter)-key)
self.futures_position = self.futures_position[:key] + [{}]*(len(self.futures_position)-key)
self.trades = self.trades[:key] + [[]]*(len(self.trades)-key)
except:
pass
df = pd.DataFrame({
'tradeDate': self.trade_date,
'futures_blotter': self.futures_blotter,
'futures_cash': self.futures_cash,
'futures_position': self.futures_position,
'portfolio_value': self.portfolio_value,
'futures_trades': self.trades,
'benchmark_return': self.benchmark_return
})
df.index = self.trade_date
return df
def update(self, context, account_name):
"""
更新相应指标的数据
Args:
context (context): Environment 对象
account_name (str): 账户名称
"""
account = context.get_account(account_name)
self.trade_date.append(account.clock.current_date)
self.futures_blotter.append(deepcopy(account.broker.blotter.to_list()))
self.futures_cash.append(account.broker.portfolio.settle_cash)
self.futures_position.append(account.broker.portfolio.settle_position)
self.portfolio_value.append(account.broker.portfolio.pre_portfolio_value)
self.trades.append(deepcopy(account.get_trades()))
data = account.broker.daily_data[context.current_date.strftime('%Y-%m-%d')]
major_benchmark = context.sim_params.major_benchmark
self.benchmark_return.append(data.loc[major_benchmark, 'closePrice']
/ data.loc[major_benchmark, 'preClosePrice'] - 1)
class IndexReport(object):
"""
指数账户回测记录,包含如下属性
* self.trade_date : 记录日期列表
* self.index_blotter : 记录交易指令列表
* self.index_cash : 记录现金列表
* self.index_position : 记录期货持仓
* self.index_trades : 记录期货成交明细
* self.benchmark_return : 记录 Benchmark 收益
* self.portfolio_value : 记录投资组合价值
"""
def __init__(self, account):
self.account = account
self.trade_date = list()
self.index_cash = list()
self.index_blotter = list()
self.index_position = list()
self.index_trades = list()
self.portfolio_value = list()
self.benchmark_return = list()
def output(self):
"""
输出成pandas.DataFrame格式
Returns:
DataFrame: 回测记录
"""
output_dict = {
'tradeDate': self.trade_date,
'index_blotter': self.index_blotter,
'index_cash': self.index_cash,
'index_position': self.index_position,
'index_trades': self.index_trades,
'benchmark_return': self.benchmark_return,
'portfolio_value': self.portfolio_value
}
return | pd.DataFrame(output_dict, index=self.trade_date) | pandas.DataFrame |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.nunique(), pdf.nunique())
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
def test_sort_values(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
for ascending in [True, False]:
for na_position in ["first", "last"]:
self.assert_eq(
psdf.sort_values("a", ascending=ascending, na_position=na_position),
pdf.sort_values("a", ascending=ascending, na_position=na_position),
)
self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"]))
self.assert_eq(
psdf.sort_values(["a", "b"], ascending=[False, True]),
pdf.sort_values(["a", "b"], ascending=[False, True]),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False]))
self.assert_eq(
psdf.sort_values(["a", "b"], na_position="first"),
pdf.sort_values(["a", "b"], na_position="first"),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid"))
pserA = pdf.a
psserA = psdf.a
self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# multi-index columns
pdf = pd.DataFrame(
{("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]},
index=np.random.rand(7),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20)))
self.assert_eq(
psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)])
)
self.assertRaisesRegex(
ValueError,
"For a multi-index, the label must be a tuple with elements",
lambda: psdf.sort_values(["X"]),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(20), pdf.sort_values(20))
self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10]))
def test_sort_index(self):
pdf = pd.DataFrame(
{"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan]
)
psdf = ps.from_pandas(pdf)
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Assert sorting descending
self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first"))
# Assert sorting descending and NA indices first
self.assert_eq(
psdf.sort_index(ascending=False, na_position="first"),
pdf.sort_index(ascending=False, na_position="first"),
)
# Assert sorting inplace
pserA = pdf.A
psserA = psdf.A
self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# Assert multi-indices
pdf = pd.DataFrame(
{"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0]))
self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index())
# Assert with multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = | pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx) | pandas.DataFrame |
import math
import logging
from itertools import groupby
from datetime import date
from dateutil.relativedelta import relativedelta
from calendar import monthrange
import pandas as pd
from statsmodels.tsa.api import ExponentialSmoothing
from dispatch.incident_type.models import IncidentType
from .models import Incident
log = logging.getLogger(__name__)
def month_grouper(item):
"""Determines the last day of a given month."""
key = date(
item.reported_at.year,
item.reported_at.month,
monthrange(item.reported_at.year, item.reported_at.month)[-1],
)
return key
def get_incident_counts(db_session, months=2):
"""Counts the number of incident in the last n months."""
incidents = (
db_session.query(Incident)
.join(IncidentType)
.filter(IncidentType.exclude_from_metrics.isnot(True))
.filter(Incident.reported_at > date.today().replace(day=1) + relativedelta(months=-2))
.all()
)
counts = []
incidents_sorted = sorted(incidents, key=month_grouper)
for (key, items) in groupby(incidents_sorted, month_grouper):
items = list(items)
counts.append(len(items))
return counts
def make_forecast(
db_session, incident_type: str = None, periods: int = 24, grouping: str = "month"
):
"""Makes an incident forecast."""
query = db_session.query(Incident).join(IncidentType)
# exclude incident types
query = query.filter(IncidentType.exclude_from_metrics.isnot(True))
# exclude last two months
query = query.filter(
Incident.reported_at < date.today().replace(day=1) + relativedelta(months=-2)
)
if incident_type != "all":
if incident_type:
query = query.filter(IncidentType.name == incident_type)
if grouping == "month":
grouper = month_grouper
query.filter(Incident.reported_at > date.today() + relativedelta(months=-periods))
incidents = query.all()
incidents_sorted = sorted(incidents, key=grouper)
dataframe_dict = {"ds": [], "y": []}
for (last_day, items) in groupby(incidents_sorted, grouper):
dataframe_dict["ds"].append(str(last_day))
dataframe_dict["y"].append(len(list(items)))
dataframe = pd.DataFrame.from_dict(dataframe_dict)
if dataframe.empty:
return {
"categories": [],
"series": [{"name": "Predicted", "data": []}],
}
# reset index to by month and drop month column
dataframe.index = dataframe.ds
dataframe.index.freq = "M"
dataframe.drop("ds", inplace=True, axis=1)
# fill periods without incidents with 0
idx = | pd.date_range(dataframe.index[0], dataframe.index[-1], freq="M") | pandas.date_range |
import copy
import re
import pandas as pd
from num2words import num2words
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import nltk
from src import db
import src.param as pm
from pandas.api.types import CategoricalDtype
from src.param import no_of_movie_per_genre, no_imdb_genres
from .search_feature_extraction import FeatureExtraction
from surprise import dump
import os
nltk.download('stopwords')
nltk.download('punkt')
class Utils:
def __init__(self):
"""
Preprocesses document text queries
"""
self.porter = PorterStemmer()
self.stopwords = stopwords.words('english')
def get_movie_descs(self):
query = """SELECT id, name, description, url FROM movie"""
movie = db.session.execute(query)
return movie
def liked_movie(self,t,u):
query = """SELECT DISTINCT movie_id FROM movies_user_like WHERE movie_id='{}' AND username='{}'""".format(t,u)
title = db.session.execute(query).all()
#print(t,u,title)
if title == []:
return 'white'
return 'orange'
def similar_movies(self,t):
query = """SELECT DISTINCT movie_id FROM movies_user_like WHERE movie_id='{}'""".format(t)
title = db.session.execute(query).all()
if title == []:
return 'white'
return 'orange'
def process_single_document(self, doc):
"""
preprocess single document
:param doc: str
:return: list(str)
"""
alpn = re.sub(r'[^a-zA-Z0-9]', ' ', doc).lower()
tokens = nltk.word_tokenize(alpn)
filtered_words = [self.porter.stem(word) for word in tokens if word not in self.stopwords]
words_without_nums = []
for word in filtered_words:
try:
word = num2words(word).split('-')
words_without_nums.extend(word)
except:
words_without_nums.append(word)
return words_without_nums
def process_documents(self, data):
data['text'] = data.apply(lambda x: self.process_single_document(str(x['title'])+' '+str(x['description'])), axis=1)
return data
def process_query(self, text):
tokens = self.process_single_document(text)
return tokens
def ranked_ids(self, query, data):
df = copy.deepcopy(data)
processed_resumes = self.process_documents(df)
featureExtraction = FeatureExtraction()
processed_query = self.process_query(query)
fe = featureExtraction.generate_features(processed_resumes, processed_query)
#return fe.id.values, fe.mean_tfidf.values, fe.bm25.values, self.normalize(fe.drop(['id', 'name', 'description'], axis=1).values)
x = [x for _,x in sorted(zip(list(fe.bm25.values) ,list(fe.id.values)), reverse=True)]
y = [y for y,_ in sorted(zip(list(fe.bm25.values) ,list(fe.id.values)), reverse=True)]
return x,y
def search_movies(self,query):
# get movie titles in genres
genres = """SELECT DISTINCT genre FROM genre"""
genres = [g[0] for g in db.session.execute(genres).all()][:pm.no_imdb_genres]
movie_all = {}
genres_query = lambda g: """SELECT DISTINCT movie_id FROM genre WHERE genre='{}'""".format(g)
movie_query = lambda m: """SELECT id, title, poster_address, url, duration, director, description FROM movies WHERE id='{}'""".format(m)
unique_titles = []
genre_scores = []
for genre in genres:
movies = [db.session.execute(movie_query(g[0])).all() for g in
db.session.execute(genres_query(genre)).all()]
new_movies = []
items = []
for movie in movies:
items.append(movie[0][0])
if movie[0][0] not in unique_titles:
unique_titles.append(movie[0][0])
new_movie = []
for i, m in enumerate(movie[0]):
if i == 2:
#new_movie.append(base64.b64encode(m).decode("utf-8"))
new_movie.append(m)
else:
new_movie.append(m)
new_movies.append(new_movie)
df = pd.DataFrame(new_movies, columns =['id', 'title', 'poster', 'url', 'duration', 'director', 'description'])
#ranking
rank_ids,rank_scores=self.ranked_ids(query, df)
genre_scores.append(sum(rank_scores))
id_order = | CategoricalDtype(rank_ids, ordered=True) | pandas.api.types.CategoricalDtype |
import os
import numpy as np
import pandas as pd
from numpy import abs
from numpy import log
from numpy import sign
from scipy.stats import rankdata
import scipy as sp
import statsmodels.api as sm
from data_source import local_source
from tqdm import tqdm as pb
# region Auxiliary functions
def ts_sum(df, window=10):
"""
Wrapper function to estimate rolling sum.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series sum over the past 'window' days.
"""
return df.rolling(window).sum()
def ts_prod(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).prod()
def sma(df, window=10): #simple moving average
"""
Wrapper function to estimate SMA.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series SMA over the past 'window' days.
"""
return df.rolling(window).mean()
def ema(df, n, m): #exponential moving average
"""
Wrapper function to estimate EMA.
:param df: a pandas DataFrame.
:return: ema_{t}=(m/n)*a_{t}+((n-m)/n)*ema_{t-1}
"""
result = df.copy()
for i in range(1,len(df)):
result.iloc[i]= (m*df.iloc[i-1] + (n-m)*result[i-1]) / n
return result
def wma(df, n):
"""
Wrapper function to estimate WMA.
:param df: a pandas DataFrame.
:return: wma_{t}=0.9*a_{t}+1.8*a_{t-1}+...+0.9*n*a_{t-n+1}
"""
weights = pd.Series(0.9*np.flipud(np.arange(1,n+1)))
result = pd.Series(np.nan, index=df.index)
for i in range(n-1,len(df)):
result.iloc[i]= sum(df[i-n+1:i+1].reset_index(drop=True)*weights.reset_index(drop=True))
return result
def stddev(df, window=10):
"""
Wrapper function to estimate rolling standard deviation.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).std()
def correlation(x, y, window=10):
"""
Wrapper function to estimate rolling corelations.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).corr(y)
def covariance(x, y, window=10):
"""
Wrapper function to estimate rolling covariance.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).cov(y)
def rolling_rank(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The rank of the last value in the array.
"""
return rankdata(na)[-1]
def ts_rank(df, window=10):
"""
Wrapper function to estimate rolling rank.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series rank over the past window days.
"""
return df.rolling(window).apply(rolling_rank)
def rolling_prod(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The product of the values in the array.
"""
return np.prod(na)
def product(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).apply(rolling_prod)
def ts_min(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).min()
def ts_max(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series max over the past 'window' days.
"""
return df.rolling(window).max()
def delta(df, period=1):
"""
Wrapper function to estimate difference.
:param df: a pandas DataFrame.
:param period: the difference grade.
:return: a pandas DataFrame with today’s value minus the value 'period' days ago.
"""
return df.diff(period)
def delay(df, period=1):
"""
Wrapper function to estimate lag.
:param df: a pandas DataFrame.
:param period: the lag grade.
:return: a pandas DataFrame with lagged time series
"""
return df.shift(period)
def rank(df):
"""
Cross sectional rank
:param df: a pandas DataFrame.
:return: a pandas DataFrame with rank along columns.
"""
#return df.rank(axis=1, pct=True)
return df.rank(pct=True)
def scale(df, k=1):
"""
Scaling time serie.
:param df: a pandas DataFrame.
:param k: scaling factor.
:return: a pandas DataFrame rescaled df such that sum(abs(df)) = k
"""
return df.mul(k).div(np.abs(df).sum())
def ts_argmax(df, window=10):
"""
Wrapper function to estimate which day ts_max(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmax) + 1
def ts_argmin(df, window=10):
"""
Wrapper function to estimate which day ts_min(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmin) + 1
def decay_linear(df, period=10):
"""
Linear weighted moving average implementation.
:param df: a pandas DataFrame.
:param period: the LWMA period
:return: a pandas DataFrame with the LWMA.
"""
try:
df = df.to_frame() #Series is not supported for the calculations below.
except:
pass
# Clean data
if df.isnull().values.any():
df.fillna(method='ffill', inplace=True)
df.fillna(method='bfill', inplace=True)
df.fillna(value=0, inplace=True)
na_lwma = np.zeros_like(df)
na_lwma[:period, :] = df.iloc[:period, :]
na_series = df.values
divisor = period * (period + 1) / 2
y = (np.arange(period) + 1) * 1.0 / divisor
# Estimate the actual lwma with the actual close.
# The backtest engine should assure to be snooping bias free.
for row in range(period - 1, df.shape[0]):
x = na_series[row - period + 1: row + 1, :]
na_lwma[row, :] = (np.dot(x.T, y))
return pd.DataFrame(na_lwma, index=df.index, columns=['CLOSE'])
def highday(df, n): #计算df前n期时间序列中最大值距离当前时点的间隔
result = pd.Series(np.nan, index=df.index)
for i in range(n,len(df)):
result.iloc[i]= i - df[i-n:i].idxmax()
return result
def lowday(df, n): #计算df前n期时间序列中最小值距离当前时点的间隔
result = pd.Series(np.nan, index=df.index)
for i in range(n,len(df)):
result.iloc[i]= i - df[i-n:i].idxmin()
return result
def daily_panel_csv_initializer(csv_name): #not used now
if os.path.exists(csv_name)==False:
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY')
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')
dataset=0
for date in date_list["TRADE_DATE"]:
stock_list[date]=stock_list["INDUSTRY"]
stock_list.drop("INDUSTRY",axis=1,inplace=True)
stock_list.set_index("TS_CODE", inplace=True)
dataset = pd.DataFrame(stock_list.stack())
dataset.reset_index(inplace=True)
dataset.columns=["TS_CODE","TRADE_DATE","INDUSTRY"]
dataset.to_csv(csv_name,encoding='utf-8-sig',index=False)
else:
dataset=pd.read_csv(csv_name)
return dataset
def IndustryAverage_vwap():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_vwap.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average vwap data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average vwap data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average vwap data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = VWAP
result_unaveraged_piece.rename("VWAP_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VWAP_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_vwap.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_close():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_close.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average close data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average close data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average close data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = CLOSE
result_unaveraged_piece.rename("CLOSE_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["CLOSE_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_close.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_low():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_low.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average low data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average low data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average low data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
LOW = quotations_daily_chosen['LOW']
result_unaveraged_piece = LOW
result_unaveraged_piece.rename("LOW_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["LOW_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_low.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_volume():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_volume.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average volume data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average volume data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average volume data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = VOLUME
result_unaveraged_piece.rename("VOLUME_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VOLUME_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_volume.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_adv(num):
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_adv{num}.csv".format(num=num))
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average adv{num} data needs not to be updated.".format(num=num))
return result_industryaveraged_df
else:
print("The corresponding industry average adv{num} data needs to be updated.".format(num=num))
first_date_update = date_list_update[0]
except:
print("The corresponding industry average adv{num} data is missing.".format(num=num))
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = sma(VOLUME, num)
result_unaveraged_piece.rename("ADV{num}_UNAVERAGED".format(num=num),inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["ADV{num}_UNAVERAGED".format(num=num)].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_adv{num}.csv".format(num=num),encoding='utf-8-sig')
return result_industryaveraged_df
#(correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close
def IndustryAverage_PreparationForAlpha048():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha048.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha048 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha048 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha048 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = (correlation(delta(CLOSE, 1), delta(delay(CLOSE, 1), 1), 250) *delta(CLOSE, 1)) / CLOSE
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA048_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA048_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha048.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#(vwap * 0.728317) + (vwap *(1 - 0.728317))
def IndustryAverage_PreparationForAlpha059():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha059.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha059 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha059 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha059 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = (VWAP * 0.728317) + (VWAP *(1 - 0.728317))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA059_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA059_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha059.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#(close * 0.60733) + (open * (1 - 0.60733))
def IndustryAverage_PreparationForAlpha079():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha079.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha079 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha079 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha079 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
OPEN = quotations_daily_chosen['OPEN']
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = (CLOSE * 0.60733) + (OPEN * (1 - 0.60733))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA079_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA079_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha079.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#((open * 0.868128) + (high * (1 - 0.868128))
def IndustryAverage_PreparationForAlpha080():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha080.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha080 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha080 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha080 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
OPEN = quotations_daily_chosen['OPEN']
HIGH = quotations_daily_chosen['HIGH']
result_unaveraged_piece = (OPEN * 0.868128) + (HIGH * (1 - 0.868128))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA080_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA080_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha080.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#((low * 0.721001) + (vwap * (1 - 0.721001))
def IndustryAverage_PreparationForAlpha097():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha097.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha097 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha097 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha097 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
LOW = quotations_daily_chosen['LOW']
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = (LOW * 0.721001) + (VWAP * (1 - 0.721001))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA097_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA097_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha097.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#rank(((((close - low) - (high -close)) / (high - low)) * volume))
def IndustryAverage_PreparationForAlpha100_1():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha100_1.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha100_1 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha100_1 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha100_1 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
HIGH = quotations_daily_chosen['HIGH']
LOW = quotations_daily_chosen['LOW']
CLOSE = quotations_daily_chosen['CLOSE']
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = rank(((((CLOSE - LOW) - (HIGH -CLOSE)) / (HIGH - LOW)) * VOLUME))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA100_1_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA100_1_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha100_1.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#(correlation(close, rank(adv20), 5) - rank(ts_argmin(close, 30)))
def IndustryAverage_PreparationForAlpha100_2():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha100_2.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha100_2 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha100_2 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha100_2 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0] -30
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
VOLUME = quotations_daily_chosen['VOL']*100
adv20 = sma(VOLUME, 30)
result_unaveraged_piece = (correlation(CLOSE, rank(adv20), 5) - rank(ts_argmin(CLOSE, 30)))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA100_2_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA100_2_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha100_2.csv",encoding='utf-8-sig')
return result_industryaveraged_df
class Alphas(object):
def __init__(self, ts_code="000001.SZ",start_date=20210101,end_date=20211231):
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition="TS_CODE = " + "'" + ts_code + "'").sort_values(by="TRADE_DATE", ascending=True)
stock_indicators_daily_chosen=local_source.get_stock_indicators_daily(cols='TRADE_DATE,TS_CODE,TOTAL_SHARE',condition="TS_CODE = " + "'" + ts_code + "'").sort_values(by="TRADE_DATE", ascending=True)
stock_data_chosen=pd.merge(quotations_daily_chosen,stock_indicators_daily_chosen,on=['TRADE_DATE','TS_CODE'],how="left")
stock_data_chosen["TOTAL_MV"]=stock_data_chosen["TOTAL_SHARE"]*stock_data_chosen["CLOSE"]
stock_data_chosen=stock_data_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
stock_data_chosen["TRADE_DATE"]=stock_data_chosen["TRADE_DATE"].astype(int)
self.open = stock_data_chosen['OPEN']
self.high = stock_data_chosen['HIGH']
self.low = stock_data_chosen['LOW']
self.close = stock_data_chosen['CLOSE']
self.volume = stock_data_chosen['VOL']*100
self.returns = stock_data_chosen['CHANGE'] / stock_data_chosen['OPEN']
self.vwap = (stock_data_chosen['AMOUNT']*1000)/(stock_data_chosen['VOL']*100+1)
self.cap = stock_data_chosen['TOTAL_MV']
self.industry = local_source.get_stock_list(cols='TS_CODE,INDUSTRY', condition='TS_CODE = '+'"'+ts_code+'"')['INDUSTRY'].iloc[0]
self.available_dates = stock_data_chosen["TRADE_DATE"]
output_dates = stock_data_chosen[(stock_data_chosen["TRADE_DATE"]>=start_date)*(stock_data_chosen["TRADE_DATE"]<=end_date)]["TRADE_DATE"]
start_available_date = output_dates.iloc[0]
end_available_date = output_dates.iloc[-1]
self.start_date_index = stock_data_chosen["TRADE_DATE"][stock_data_chosen["TRADE_DATE"].values == start_available_date].index[0]
self.end_date_index = stock_data_chosen["TRADE_DATE"][stock_data_chosen["TRADE_DATE"].values == end_available_date].index[0] +1
# Alpha#1 (rank(Ts_ArgMax(SignedPower(((returns < 0) ? stddev(returns, 20) : close), 2.), 5)) -0.5)
def alpha001(self):
inner = self.close
inner[self.returns < 0] = stddev(self.returns, 20)
alpha = rank(ts_argmax(inner ** 2, 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#2 (-1 * correlation(rank(delta(log(volume), 2)), rank(((close - open) / open)), 6))
def alpha002(self):
df = -1 * correlation(rank(delta(log(self.volume), 2)), rank((self.close - self.open) / self.open), 6)
alpha = df.replace([-np.inf, np.inf], 0).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#3 (-1 * correlation(rank(open), rank(volume), 10))
def alpha003(self):
df = -1 * correlation(rank(self.open), rank(self.volume), 10)
alpha = df.replace([-np.inf, np.inf], 0).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#4 (-1 * Ts_Rank(rank(low), 9))
def alpha004(self):
alpha = -1 * ts_rank(rank(self.low), 9)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#5 (rank((open - (sum(vwap, 10) / 10))) * (-1 * abs(rank((close - vwap)))))
def alpha005(self):
alpha = (rank((self.open - (sum(self.vwap, 10) / 10))) * (-1 * abs(rank((self.close - self.vwap)))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#6 (-1 * correlation(open, volume, 10))
def alpha006(self):
df = -1 * correlation(self.open, self.volume, 10)
alpha = df.replace([-np.inf, np.inf], 0).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#7 ((adv20 < volume) ? ((-1 * ts_rank(abs(delta(close, 7)), 60)) * sign(delta(close, 7))) : (-1* 1))
def alpha007(self):
adv20 = sma(self.volume, 20)
alpha = -1 * ts_rank(abs(delta(self.close, 7)), 60) * sign(delta(self.close, 7))
alpha[adv20 >= self.volume] = -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#8 (-1 * rank(((sum(open, 5) * sum(returns, 5)) - delay((sum(open, 5) * sum(returns, 5)),10))))
def alpha008(self):
alpha = -1 * (rank(((ts_sum(self.open, 5) * ts_sum(self.returns, 5)) - delay((ts_sum(self.open, 5) * ts_sum(self.returns, 5)), 10))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#9 ((0 < ts_min(delta(close, 1), 5)) ? delta(close, 1) : ((ts_max(delta(close, 1), 5) < 0) ?delta(close, 1) : (-1 * delta(close, 1))))
def alpha009(self):
delta_close = delta(self.close, 1)
cond_1 = ts_min(delta_close, 5) > 0
cond_2 = ts_max(delta_close, 5) < 0
alpha = -1 * delta_close
alpha[cond_1 | cond_2] = delta_close
return alpha[self.start_date_index:self.end_date_index]
# Alpha#10 rank(((0 < ts_min(delta(close, 1), 4)) ? delta(close, 1) : ((ts_max(delta(close, 1), 4) < 0)? delta(close, 1) : (-1 * delta(close, 1)))))
def alpha010(self):
delta_close = delta(self.close, 1)
cond_1 = ts_min(delta_close, 4) > 0
cond_2 = ts_max(delta_close, 4) < 0
alpha = -1 * delta_close
alpha[cond_1 | cond_2] = delta_close
return alpha[self.start_date_index:self.end_date_index]
# Alpha#11 ((rank(ts_max((vwap - close), 3)) + rank(ts_min((vwap - close), 3))) *rank(delta(volume, 3)))
def alpha011(self):
alpha = ((rank(ts_max((self.vwap - self.close), 3)) + rank(ts_min((self.vwap - self.close), 3))) *rank(delta(self.volume, 3)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#12 (sign(delta(volume, 1)) * (-1 * delta(close, 1)))
def alpha012(self):
alpha = sign(delta(self.volume, 1)) * (-1 * delta(self.close, 1))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#13 (-1 * rank(covariance(rank(close), rank(volume), 5)))
def alpha013(self):
alpha = -1 * rank(covariance(rank(self.close), rank(self.volume), 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#14 ((-1 * rank(delta(returns, 3))) * correlation(open, volume, 10))
def alpha014(self):
df = correlation(self.open, self.volume, 10)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * rank(delta(self.returns, 3)) * df
return alpha[self.start_date_index:self.end_date_index]
# Alpha#15 (-1 * sum(rank(correlation(rank(high), rank(volume), 3)), 3))
def alpha015(self):
df = correlation(rank(self.high), rank(self.volume), 3)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * ts_sum(rank(df), 3)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#16 (-1 * rank(covariance(rank(high), rank(volume), 5)))
def alpha016(self):
alpha = -1 * rank(covariance(rank(self.high), rank(self.volume), 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#17 (((-1 * rank(ts_rank(close, 10))) * rank(delta(delta(close, 1), 1))) *rank(ts_rank((volume / adv20), 5)))
def alpha017(self):
adv20 = sma(self.volume, 20)
alpha = -1 * (rank(ts_rank(self.close, 10)) * rank(delta(delta(self.close, 1), 1)) * rank(ts_rank((self.volume / adv20), 5)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#18 (-1 * rank(((stddev(abs((close - open)), 5) + (close - open)) + correlation(close, open,10))))
def alpha018(self):
df = correlation(self.close, self.open, 10)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * (rank((stddev(abs((self.close - self.open)), 5) + (self.close - self.open)) + df))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#19 ((-1 * sign(((close - delay(close, 7)) + delta(close, 7)))) * (1 + rank((1 + sum(returns,250)))))
def alpha019(self):
alpha = ((-1 * sign((self.close - delay(self.close, 7)) + delta(self.close, 7))) * (1 + rank(1 + ts_sum(self.returns, 250))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#20 (((-1 * rank((open - delay(high, 1)))) * rank((open - delay(close, 1)))) * rank((open -delay(low, 1))))
def alpha020(self):
alpha = -1 * (rank(self.open - delay(self.high, 1)) * rank(self.open - delay(self.close, 1)) * rank(self.open - delay(self.low, 1)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#21 ((((sum(close, 8) / 8) + stddev(close, 8)) < (sum(close, 2) / 2)) ? (-1 * 1) : (((sum(close,2) / 2) < ((sum(close, 8) / 8) - stddev(close, 8))) ? 1 : (((1 < (volume / adv20)) || ((volume /adv20) == 1)) ? 1 : (-1 * 1))))
def alpha021(self):
cond_1 = sma(self.close, 8) + stddev(self.close, 8) < sma(self.close, 2)
cond_2 = sma(self.volume, 20) / self.volume < 1
alpha = pd.DataFrame(np.ones_like(self.close), index=self.close.index)
#alpha = pd.DataFrame(np.ones_like(self.close), index=self.close.index, columns=self.close.columns)
alpha[cond_1 | cond_2] = -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#22 (-1 * (delta(correlation(high, volume, 5), 5) * rank(stddev(close, 20))))
def alpha022(self):
df = correlation(self.high, self.volume, 5)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * delta(df, 5) * rank(stddev(self.close, 20))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#23 (((sum(high, 20) / 20) < high) ? (-1 * delta(high, 2)) : 0)
def alpha023(self):
cond = sma(self.high, 20) < self.high
alpha = pd.DataFrame(np.zeros_like(self.close),index=self.close.index,columns=['close'])
alpha.at[cond,'close'] = -1 * delta(self.high, 2).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#24 ((((delta((sum(close, 100) / 100), 100) / delay(close, 100)) < 0.05) ||((delta((sum(close, 100) / 100), 100) / delay(close, 100)) == 0.05)) ? (-1 * (close - ts_min(close,100))) : (-1 * delta(close, 3)))
def alpha024(self):
cond = delta(sma(self.close, 100), 100) / delay(self.close, 100) <= 0.05
alpha = -1 * delta(self.close, 3)
alpha[cond] = -1 * (self.close - ts_min(self.close, 100))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#25 rank(((((-1 * returns) * adv20) * vwap) * (high - close)))
def alpha025(self):
adv20 = sma(self.volume, 20)
alpha = rank(((((-1 * self.returns) * adv20) * self.vwap) * (self.high - self.close)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#26 (-1 * ts_max(correlation(ts_rank(volume, 5), ts_rank(high, 5), 5), 3))
def alpha026(self):
df = correlation(ts_rank(self.volume, 5), ts_rank(self.high, 5), 5)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * ts_max(df, 3)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#27 ((0.5 < rank((sum(correlation(rank(volume), rank(vwap), 6), 2) / 2.0))) ? (-1 * 1) : 1)
def alpha027(self): #there maybe problems
alpha = rank((sma(correlation(rank(self.volume), rank(self.vwap), 6), 2) / 2.0))
alpha[alpha > 0.5] = -1
alpha[alpha <= 0.5]=1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#28 scale(((correlation(adv20, low, 5) + ((high + low) / 2)) - close))
def alpha028(self):
adv20 = sma(self.volume, 20)
df = correlation(adv20, self.low, 5)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = scale(((df + ((self.high + self.low) / 2)) - self.close))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#29 (min(product(rank(rank(scale(log(sum(ts_min(rank(rank((-1 * rank(delta((close - 1),5))))), 2), 1))))), 1), 5) + ts_rank(delay((-1 * returns), 6), 5))
def alpha029(self):
alpha = (ts_min(rank(rank(scale(log(ts_sum(rank(rank(-1 * rank(delta((self.close - 1), 5)))), 2))))), 5) + ts_rank(delay((-1 * self.returns), 6), 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#30 (((1.0 - rank(((sign((close - delay(close, 1))) + sign((delay(close, 1) - delay(close, 2)))) +sign((delay(close, 2) - delay(close, 3)))))) * sum(volume, 5)) / sum(volume, 20))
def alpha030(self):
delta_close = delta(self.close, 1)
inner = sign(delta_close) + sign(delay(delta_close, 1)) + sign(delay(delta_close, 2))
alpha = ((1.0 - rank(inner)) * ts_sum(self.volume, 5)) / ts_sum(self.volume, 20)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#31 ((rank(rank(rank(decay_linear((-1 * rank(rank(delta(close, 10)))), 10)))) + rank((-1 *delta(close, 3)))) + sign(scale(correlation(adv20, low, 12))))
def alpha031(self):
adv20 = sma(self.volume, 20)
df = correlation(adv20, self.low, 12).replace([-np.inf, np.inf], 0).fillna(value=0)
p1=rank(rank(rank(decay_linear((-1 * rank(rank(delta(self.close, 10)))), 10))))
p2=rank((-1 * delta(self.close, 3)))
p3=sign(scale(df))
alpha = p1.CLOSE+p2+p3
return alpha[self.start_date_index:self.end_date_index]
# Alpha#32 (scale(((sum(close, 7) / 7) - close)) + (20 * scale(correlation(vwap, delay(close, 5),230))))
def alpha032(self):
alpha = scale(((sma(self.close, 7) / 7) - self.close)) + (20 * scale(correlation(self.vwap, delay(self.close, 5),230)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#33 rank((-1 * ((1 - (open / close))^1)))
def alpha033(self):
alpha = rank(-1 + (self.open / self.close))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#34 rank(((1 - rank((stddev(returns, 2) / stddev(returns, 5)))) + (1 - rank(delta(close, 1)))))
def alpha034(self):
inner = stddev(self.returns, 2) / stddev(self.returns, 5)
inner = inner.replace([-np.inf, np.inf], 1).fillna(value=1)
alpha = rank(2 - rank(inner) - rank(delta(self.close, 1)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#35 ((Ts_Rank(volume, 32) * (1 - Ts_Rank(((close + high) - low), 16))) * (1 -Ts_Rank(returns, 32)))
def alpha035(self):
alpha = ((ts_rank(self.volume, 32) * (1 - ts_rank(self.close + self.high - self.low, 16))) * (1 - ts_rank(self.returns, 32)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#36 (((((2.21 * rank(correlation((close - open), delay(volume, 1), 15))) + (0.7 * rank((open- close)))) + (0.73 * rank(Ts_Rank(delay((-1 * returns), 6), 5)))) + rank(abs(correlation(vwap,adv20, 6)))) + (0.6 * rank((((sum(close, 200) / 200) - open) * (close - open)))))
def alpha036(self):
adv20 = sma(self.volume, 20)
alpha = (((((2.21 * rank(correlation((self.close - self.open), delay(self.volume, 1), 15))) + (0.7 * rank((self.open- self.close)))) + (0.73 * rank(ts_rank(delay((-1 * self.returns), 6), 5)))) + rank(abs(correlation(self.vwap,adv20, 6)))) + (0.6 * rank((((sma(self.close, 200) / 200) - self.open) * (self.close - self.open)))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#37 (rank(correlation(delay((open - close), 1), close, 200)) + rank((open - close)))
def alpha037(self):
alpha = rank(correlation(delay(self.open - self.close, 1), self.close, 200)) + rank(self.open - self.close)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#38 ((-1 * rank(Ts_Rank(close, 10))) * rank((close / open)))
def alpha038(self):
inner = self.close / self.open
inner = inner.replace([-np.inf, np.inf], 1).fillna(value=1)
alpha = -1 * rank(ts_rank(self.open, 10)) * rank(inner)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#39 ((-1 * rank((delta(close, 7) * (1 - rank(decay_linear((volume / adv20), 9)))))) * (1 +rank(sum(returns, 250))))
def alpha039(self):
adv20 = sma(self.volume, 20)
alpha = ((-1 * rank(delta(self.close, 7) * (1 - rank(decay_linear((self.volume / adv20), 9).CLOSE)))) * (1 + rank(sma(self.returns, 250))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#40 ((-1 * rank(stddev(high, 10))) * correlation(high, volume, 10))
def alpha040(self):
alpha = -1 * rank(stddev(self.high, 10)) * correlation(self.high, self.volume, 10)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#41 (((high * low)^0.5) - vwap)
def alpha041(self):
alpha = pow((self.high * self.low),0.5) - self.vwap
return alpha[self.start_date_index:self.end_date_index]
# Alpha#42 (rank((vwap - close)) / rank((vwap + close)))
def alpha042(self):
alpha = rank((self.vwap - self.close)) / rank((self.vwap + self.close))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#43 (ts_rank((volume / adv20), 20) * ts_rank((-1 * delta(close, 7)), 8))
def alpha043(self):
adv20 = sma(self.volume, 20)
alpha = ts_rank(self.volume / adv20, 20) * ts_rank((-1 * delta(self.close, 7)), 8)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#44 (-1 * correlation(high, rank(volume), 5))
def alpha044(self):
df = correlation(self.high, rank(self.volume), 5)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * df
return alpha[self.start_date_index:self.end_date_index]
# Alpha#45 (-1 * ((rank((sum(delay(close, 5), 20) / 20)) * correlation(close, volume, 2)) *rank(correlation(sum(close, 5), sum(close, 20), 2))))
def alpha045(self):
df = correlation(self.close, self.volume, 2)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * (rank(sma(delay(self.close, 5), 20)) * df * rank(correlation(ts_sum(self.close, 5), ts_sum(self.close, 20), 2)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#46 ((0.25 < (((delay(close, 20) - delay(close, 10)) / 10) - ((delay(close, 10) - close) / 10))) ?(-1 * 1) : (((((delay(close, 20) - delay(close, 10)) / 10) - ((delay(close, 10) - close) / 10)) < 0) ? 1 :((-1 * 1) * (close - delay(close, 1)))))
def alpha046(self):
inner = ((delay(self.close, 20) - delay(self.close, 10)) / 10) - ((delay(self.close, 10) - self.close) / 10)
alpha = (-1 * delta(self.close))
alpha[inner < 0] = 1
alpha[inner > 0.25] = -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#47 ((((rank((1 / close)) * volume) / adv20) * ((high * rank((high - close))) / (sum(high, 5) /5))) - rank((vwap - delay(vwap, 5))))
def alpha047(self):
adv20 = sma(self.volume, 20)
alpha = ((((rank((1 / self.close)) * self.volume) / adv20) * ((self.high * rank((self.high - self.close))) / (sma(self.high, 5) /5))) - rank((self.vwap - delay(self.vwap, 5))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#48 (indneutralize(((correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close), IndClass.subindustry) / sum(((delta(close, 1) / delay(close, 1))^2), 250))
def alpha048(self):
indaverage_data = IndustryAverage_PreparationForAlpha048()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (correlation(delta(self.close, 1), delta(delay(self.close, 1), 1), 250) *delta(self.close, 1)) / self.close
indneutralized_data = unindneutralized_data - indaverage_data
alpha = indneutralized_data / sma(((delta(self.close, 1) / delay(self.close, 1))**2), 250)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#49 (((((delay(close, 20) - delay(close, 10)) / 10) - ((delay(close, 10) - close) / 10)) < (-1 *0.1)) ? 1 : ((-1 * 1) * (close - delay(close, 1))))
def alpha049(self):
inner = (((delay(self.close, 20) - delay(self.close, 10)) / 10) - ((delay(self.close, 10) - self.close) / 10))
alpha = (-1 * delta(self.close))
alpha[inner < -0.1] = 1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#50 (-1 * ts_max(rank(correlation(rank(volume), rank(vwap), 5)), 5))
def alpha050(self):
alpha = (-1 * ts_max(rank(correlation(rank(self.volume), rank(self.vwap), 5)), 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#51 (((((delay(close, 20) - delay(close, 10)) / 10) - ((delay(close, 10) - close) / 10)) < (-1 *0.05)) ? 1 : ((-1 * 1) * (close - delay(close, 1))))
def alpha051(self):
inner = (((delay(self.close, 20) - delay(self.close, 10)) / 10) - ((delay(self.close, 10) - self.close) / 10))
alpha = (-1 * delta(self.close))
alpha[inner < -0.05] = 1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#52 ((((-1 * ts_min(low, 5)) + delay(ts_min(low, 5), 5)) * rank(((sum(returns, 240) -sum(returns, 20)) / 220))) * ts_rank(volume, 5))
def alpha052(self):
alpha = (((-1 * delta(ts_min(self.low, 5), 5)) * rank(((ts_sum(self.returns, 240) - ts_sum(self.returns, 20)) / 220))) * ts_rank(self.volume, 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#53 (-1 * delta((((close - low) - (high - close)) / (close - low)), 9))
def alpha053(self):
inner = (self.close - self.low).replace(0, 0.0001)
alpha = -1 * delta((((self.close - self.low) - (self.high - self.close)) / inner), 9)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#54 ((-1 * ((low - close) * (open^5))) / ((low - high) * (close^5)))
def alpha054(self):
inner = (self.low - self.high).replace(0, -0.0001)
alpha = -1 * (self.low - self.close) * (self.open ** 5) / (inner * (self.close ** 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#55 (-1 * correlation(rank(((close - ts_min(low, 12)) / (ts_max(high, 12) - ts_min(low,12)))), rank(volume), 6))
def alpha055(self):
divisor = (ts_max(self.high, 12) - ts_min(self.low, 12)).replace(0, 0.0001)
inner = (self.close - ts_min(self.low, 12)) / (divisor)
df = correlation(rank(inner), rank(self.volume), 6)
alpha = -1 * df.replace([-np.inf, np.inf], 0).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#56 (0 - (1 * (rank((sum(returns, 10) / sum(sum(returns, 2), 3))) * rank((returns * cap)))))
def alpha056(self):
alpha = (0 - (1 * (rank((sma(self.returns, 10) / sma(sma(self.returns, 2), 3))) * rank((self.returns * self.cap)))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#57 (0 - (1 * ((close - vwap) / decay_linear(rank(ts_argmax(close, 30)), 2))))
def alpha057(self):
alpha = (0 - (1 * ((self.close - self.vwap) / decay_linear(rank(ts_argmax(self.close, 30)), 2).CLOSE)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#58 (-1 * Ts_Rank(decay_linear(correlation(IndNeutralize(vwap, IndClass.sector), volume,3.92795), 7.89291), 5.50322))
def alpha058(self):
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
alpha = (-1 * ts_rank(decay_linear(correlation(indneutralized_vwap, self.volume, 4), 8), 6))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#59 (-1 * Ts_Rank(decay_linear(correlation(IndNeutralize(((vwap * 0.728317) + (vwap *(1 - 0.728317))), IndClass.industry), volume, 4.25197), 16.2289), 8.19648))
def alpha059(self):
indaverage_data = IndustryAverage_PreparationForAlpha059()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (self.vwap * 0.728317) + (self.vwap *(1 - 0.728317))
indneutralized_data = unindneutralized_data - indaverage_data
alpha = (-1 * ts_rank(decay_linear(correlation(indneutralized_data, self.volume, 4), 16), 8))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#60 (0 - (1 * ((2 * scale(rank(((((close - low) - (high - close)) / (high - low)) * volume)))) -scale(rank(ts_argmax(close, 10))))))
def alpha060(self):
divisor = (self.high - self.low).replace(0, 0.0001)
inner = ((self.close - self.low) - (self.high - self.close)) * self.volume / divisor
alpha = - ((2 * scale(rank(inner))) - scale(rank(ts_argmax(self.close, 10))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#61 (rank((vwap - ts_min(vwap, 16.1219))) < rank(correlation(vwap, adv180, 17.9282)))
def alpha061(self):
adv180 = sma(self.volume, 180)
alpha = (rank((self.vwap - ts_min(self.vwap, 16))) < rank(correlation(self.vwap, adv180, 18)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#62 ((rank(correlation(vwap, sum(adv20, 22.4101), 9.91009)) < rank(((rank(open) +rank(open)) < (rank(((high + low) / 2)) + rank(high))))) * -1)
def alpha062(self):
adv20 = sma(self.volume, 20)
alpha = ((rank(correlation(self.vwap, sma(adv20, 22), 10)) < rank(((rank(self.open) +rank(self.open)) < (rank(((self.high + self.low) / 2)) + rank(self.high))))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#63 ((rank(decay_linear(delta(IndNeutralize(close, IndClass.industry), 2.25164), 8.22237))- rank(decay_linear(correlation(((vwap * 0.318108) + (open * (1 - 0.318108))), sum(adv180,37.2467), 13.557), 12.2883))) * -1)
def alpha063(self):
indaverage_close = IndustryAverage_close()
indaverage_close = indaverage_close[indaverage_close.index.isin(self.available_dates)]
indaverage_close = indaverage_close[self.industry]
indaverage_close = indaverage_close.reset_index(drop=True)
indneutralized_close = self.close - indaverage_close
adv180 = sma(self.volume, 180)
alpha = ((rank(decay_linear(delta(indneutralized_close, 2), 8))- rank(decay_linear(correlation(((self.vwap * 0.318108) + (self.open * (1 - 0.318108))), sma(adv180, 38), 14), 12))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#64 ((rank(correlation(sum(((open * 0.178404) + (low * (1 - 0.178404))), 12.7054),sum(adv120, 12.7054), 16.6208)) < rank(delta(((((high + low) / 2) * 0.178404) + (vwap * (1 -0.178404))), 3.69741))) * -1)
def alpha064(self):
adv120 = sma(self.volume, 120)
alpha = ((rank(correlation(sma(((self.open * 0.178404) + (self.low * (1 - 0.178404))), 13),sma(adv120, 13), 17)) < rank(delta(((((self.high + self.low) / 2) * 0.178404) + (self.vwap * (1 -0.178404))), 3.69741))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#65 ((rank(correlation(((open * 0.00817205) + (vwap * (1 - 0.00817205))), sum(adv60,8.6911), 6.40374)) < rank((open - ts_min(open, 13.635)))) * -1)
def alpha065(self):
adv60 = sma(self.volume, 60)
alpha = ((rank(correlation(((self.open * 0.00817205) + (self.vwap * (1 - 0.00817205))), sma(adv60,9), 6)) < rank((self.open - ts_min(self.open, 14)))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#66 ((rank(decay_linear(delta(vwap, 3.51013), 7.23052)) + Ts_Rank(decay_linear(((((low* 0.96633) + (low * (1 - 0.96633))) - vwap) / (open - ((high + low) / 2))), 11.4157), 6.72611)) * -1)
def alpha066(self):
alpha = ((rank(decay_linear(delta(self.vwap, 4), 7).CLOSE) + ts_rank(decay_linear(((((self.low* 0.96633) + (self.low * (1 - 0.96633))) - self.vwap) / (self.open - ((self.high + self.low) / 2))), 11).CLOSE, 7)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#67 ((rank((high - ts_min(high, 2.14593)))^rank(correlation(IndNeutralize(vwap,IndClass.sector), IndNeutralize(adv20, IndClass.subindustry), 6.02936))) * -1)
def alpha067(self):
indaverage_adv20 = IndustryAverage_adv(20)
indaverage_adv20 = indaverage_adv20[indaverage_adv20.index.isin(self.available_dates)]
indaverage_adv20 = indaverage_adv20[self.industry]
indaverage_adv20 = indaverage_adv20.reset_index(drop=True)
adv20 = sma(self.volume, 20)
indneutralized_adv20 = adv20 - indaverage_adv20
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
alpha = rank((self.high - ts_min(self.high, 2))) ** rank(correlation(indneutralized_vwap, indneutralized_adv20, 6)) * -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#68 ((Ts_Rank(correlation(rank(high), rank(adv15), 8.91644), 13.9333) <rank(delta(((close * 0.518371) + (low * (1 - 0.518371))), 1.06157))) * -1)
def alpha068(self):
adv15 = sma(self.volume, 15)
alpha = ((ts_rank(correlation(rank(self.high), rank(adv15), 9), 14) <rank(delta(((self.close * 0.518371) + (self.low * (1 - 0.518371))), 1.06157))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#69 ((rank(ts_max(delta(IndNeutralize(vwap, IndClass.industry), 2.72412),4.79344))^Ts_Rank(correlation(((close * 0.490655) + (vwap * (1 - 0.490655))), adv20, 4.92416),9.0615)) * -1)
def alpha069(self):
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
adv20 = sma(self.volume, 20)
alpha = ((rank(ts_max(delta(indneutralized_vwap, 3),5)) ** ts_rank(correlation(((self.close * 0.490655) + (self.vwap * (1 - 0.490655))), adv20, 5),9)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#70 ((rank(delta(vwap, 1.29456))^Ts_Rank(correlation(IndNeutralize(close,IndClass.industry), adv50, 17.8256), 17.9171)) * -1)
def alpha070(self):
indaverage_close = IndustryAverage_close()
indaverage_close = indaverage_close[indaverage_close.index.isin(self.available_dates)]
indaverage_close = indaverage_close[self.industry]
indaverage_close = indaverage_close.reset_index(drop=True)
indneutralized_close = self.close - indaverage_close
adv50 = sma(self.volume, 50)
alpha = (rank(delta(self.vwap, 1)) ** ts_rank(correlation(indneutralized_close, adv50, 18), 18)) * -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#71 max(Ts_Rank(decay_linear(correlation(Ts_Rank(close, 3.43976), Ts_Rank(adv180,12.0647), 18.0175), 4.20501), 15.6948), Ts_Rank(decay_linear((rank(((low + open) - (vwap +vwap)))^2), 16.4662), 4.4388))
def alpha071(self):
adv180 = sma(self.volume, 180)
p1=ts_rank(decay_linear(correlation(ts_rank(self.close, 3), ts_rank(adv180,12), 18), 4).CLOSE, 16)
p2=ts_rank(decay_linear((rank(((self.low + self.open) - (self.vwap +self.vwap))).pow(2)), 16).CLOSE, 4)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = df['max']
#alpha = max(ts_rank(decay_linear(correlation(ts_rank(self.close, 3), ts_rank(adv180,12), 18).to_frame(), 4).CLOSE, 16), ts_rank(decay_linear((rank(((self.low + self.open) - (self.vwap +self.vwap))).pow(2)).to_frame(), 16).CLOSE, 4))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#72 (rank(decay_linear(correlation(((high + low) / 2), adv40, 8.93345), 10.1519)) /rank(decay_linear(correlation(Ts_Rank(vwap, 3.72469), Ts_Rank(volume, 18.5188), 6.86671),2.95011)))
def alpha072(self):
adv40 = sma(self.volume, 40)
alpha = (rank(decay_linear(correlation(((self.high + self.low) / 2), adv40, 9).to_frame(), 10).CLOSE) /rank(decay_linear(correlation(ts_rank(self.vwap, 4), ts_rank(self.volume, 19), 7).to_frame(),3).CLOSE))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#73 (max(rank(decay_linear(delta(vwap, 4.72775), 2.91864)),Ts_Rank(decay_linear(((delta(((open * 0.147155) + (low * (1 - 0.147155))), 2.03608) / ((open *0.147155) + (low * (1 - 0.147155)))) * -1), 3.33829), 16.7411)) * -1)
def alpha073(self):
p1=rank(decay_linear(delta(self.vwap, 5).to_frame(), 3).CLOSE)
p2=ts_rank(decay_linear(((delta(((self.open * 0.147155) + (self.low * (1 - 0.147155))), 2) / ((self.open *0.147155) + (self.low * (1 - 0.147155)))) * -1).to_frame(), 3).CLOSE, 17)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = -1 * df['max']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#74 ((rank(correlation(close, sum(adv30, 37.4843), 15.1365)) <rank(correlation(rank(((high * 0.0261661) + (vwap * (1 - 0.0261661)))), rank(volume), 11.4791)))* -1)
def alpha074(self):
adv30 = sma(self.volume, 30)
alpha = ((rank(correlation(self.close, sma(adv30, 37), 15)) <rank(correlation(rank(((self.high * 0.0261661) + (self.vwap * (1 - 0.0261661)))), rank(self.volume), 11)))* -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#75 (rank(correlation(vwap, volume, 4.24304)) < rank(correlation(rank(low), rank(adv50),12.4413)))
def alpha075(self):
adv50 = sma(self.volume, 50)
alpha = (rank(correlation(self.vwap, self.volume, 4)) < rank(correlation(rank(self.low), rank(adv50),12)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#76 (max(rank(decay_linear(delta(vwap, 1.24383), 11.8259)),Ts_Rank(decay_linear(Ts_Rank(correlation(IndNeutralize(low, IndClass.sector), adv81,8.14941), 19.569), 17.1543), 19.383)) * -1)
def alpha076(self):
indaverage_low = IndustryAverage_low()
indaverage_low = indaverage_low[indaverage_low.index.isin(self.available_dates)]
indaverage_low = indaverage_low[self.industry]
indaverage_low = indaverage_low.reset_index(drop=True)
indneutralized_low = self.low - indaverage_low
adv81 = sma(self.volume, 81)
p1 = rank(decay_linear(delta(self.vwap.to_frame(), 1), 12))
p2 = ts_rank(decay_linear(ts_rank(correlation(indneutralized_low, adv81, 8).to_frame(), 20), 17), 19)
p1=p1.iloc[:,0]
p2=p2.iloc[:,0]
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = -1 * df['max']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#77 min(rank(decay_linear(((((high + low) / 2) + high) - (vwap + high)), 20.0451)),rank(decay_linear(correlation(((high + low) / 2), adv40, 3.1614), 5.64125)))
def alpha077(self):
adv40 = sma(self.volume, 40)
p1=rank(decay_linear(((((self.high + self.low) / 2) + self.high) - (self.vwap + self.high)).to_frame(), 20).CLOSE)
p2=rank(decay_linear(correlation(((self.high + self.low) / 2), adv40, 3).to_frame(), 6).CLOSE)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'min']=df['p2']
df.at[df['p2']>=df['p1'],'min']=df['p1']
alpha = df['min']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#78 (rank(correlation(sum(((low * 0.352233) + (vwap * (1 - 0.352233))), 19.7428),sum(adv40, 19.7428), 6.83313))^rank(correlation(rank(vwap), rank(volume), 5.77492)))
def alpha078(self):
adv40 = sma(self.volume, 40)
alpha = (rank(correlation(ts_sum(((self.low * 0.352233) + (self.vwap * (1 - 0.352233))), 20),ts_sum(adv40,20), 7)).pow(rank(correlation(rank(self.vwap), rank(self.volume), 6))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#79 (rank(delta(IndNeutralize(((close * 0.60733) + (open * (1 - 0.60733))),IndClass.sector), 1.23438)) < rank(correlation(Ts_Rank(vwap, 3.60973), Ts_Rank(adv150,9.18637), 14.6644)))
def alpha079(self):
indaverage_data = IndustryAverage_PreparationForAlpha079()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (self.close * 0.60733) + (self.open * (1 - 0.60733))
indneutralized_data = unindneutralized_data - indaverage_data
adv150 = sma(self.volume, 150)
alpha = (rank(delta(indneutralized_data, 1)) < rank(correlation(ts_rank(self.vwap, 4), ts_rank(adv150, 9), 15))) *1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#80 ((rank(Sign(delta(IndNeutralize(((open * 0.868128) + (high * (1 - 0.868128))),IndClass.industry), 4.04545)))^Ts_Rank(correlation(high, adv10, 5.11456), 5.53756)) * -1)
def alpha080(self):
indaverage_data = IndustryAverage_PreparationForAlpha080()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (self.open * 0.868128) + (self.high * (1 - 0.868128))
indneutralized_data = unindneutralized_data - indaverage_data
adv10 = sma(self.volume, 10)
alpha = rank(sign(delta(indneutralized_data, 4))) ** (ts_rank(correlation(self.high, adv10, 5), 6)) * -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#81 ((rank(Log(product(rank((rank(correlation(vwap, sum(adv10, 49.6054),8.47743))^4)), 14.9655))) < rank(correlation(rank(vwap), rank(volume), 5.07914))) * -1)
def alpha081(self):
adv10 = sma(self.volume, 10)
alpha = ((rank(log(product(rank((rank(correlation(self.vwap, ts_sum(adv10, 50),8)).pow(4))), 15))) < rank(correlation(rank(self.vwap), rank(self.volume), 5))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#82 (min(rank(decay_linear(delta(open, 1.46063), 14.8717)),Ts_Rank(decay_linear(correlation(IndNeutralize(volume, IndClass.sector), ((open * 0.634196) +(open * (1 - 0.634196))), 17.4842), 6.92131), 13.4283)) * -1)
def alpha082(self):
indaverage_volume = IndustryAverage_volume()
indaverage_volume = indaverage_volume[indaverage_volume.index.isin(self.available_dates)]
indaverage_volume = indaverage_volume[self.industry]
indaverage_volume = indaverage_volume.reset_index(drop=True)
indneutralized_volume = self.volume - indaverage_volume
p1 = rank(decay_linear(delta(self.open, 1).to_frame(), 15))
p2 = ts_rank(decay_linear(correlation(indneutralized_volume, ((self.open * 0.634196)+(self.open * (1 - 0.634196))), 17).to_frame(), 7), 13)
p1=p1.iloc[:,0]
p2=p2.iloc[:,0]
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'min']=df['p2']
df.at[df['p2']>=df['p1'],'min']=df['p1']
alpha = -1 * df['min']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#83 ((rank(delay(((high - low) / (sum(close, 5) / 5)), 2)) * rank(rank(volume))) / (((high -low) / (sum(close, 5) / 5)) / (vwap - close)))
def alpha083(self):
alpha = ((rank(delay(((self.high - self.low) / (ts_sum(self.close, 5) / 5)), 2)) * rank(rank(self.volume))) / (((self.high -self.low) / (ts_sum(self.close, 5) / 5)) / (self.vwap - self.close)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#84 SignedPower(Ts_Rank((vwap - ts_max(vwap, 15.3217)), 20.7127), delta(close,4.96796))
def alpha084(self):
alpha = pow(ts_rank((self.vwap - ts_max(self.vwap, 15)), 21), delta(self.close,5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#85 (rank(correlation(((high * 0.876703) + (close * (1 - 0.876703))), adv30,9.61331))^rank(correlation(Ts_Rank(((high + low) / 2), 3.70596), Ts_Rank(volume, 10.1595),7.11408)))
def alpha085(self):
adv30 = sma(self.volume, 30)
alpha = (rank(correlation(((self.high * 0.876703) + (self.close * (1 - 0.876703))), adv30,10)).pow(rank(correlation(ts_rank(((self.high + self.low) / 2), 4), ts_rank(self.volume, 10),7))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#86 ((Ts_Rank(correlation(close, sum(adv20, 14.7444), 6.00049), 20.4195) < rank(((open+ close) - (vwap + open)))) * -1)
def alpha086(self):
adv20 = sma(self.volume, 20)
alpha = ((ts_rank(correlation(self.close, sma(adv20, 15), 6), 20) < rank(((self.open+ self.close) - (self.vwap +self.open)))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#87 (max(rank(decay_linear(delta(((close * 0.369701) + (vwap * (1 - 0.369701))),1.91233), 2.65461)), Ts_Rank(decay_linear(abs(correlation(IndNeutralize(adv81,IndClass.industry), close, 13.4132)), 4.89768), 14.4535)) * -1)
def alpha087(self):
indaverage_adv81 = IndustryAverage_adv(81)
indaverage_adv81 = indaverage_adv81[indaverage_adv81.index.isin(self.available_dates)]
indaverage_adv81 = indaverage_adv81[self.industry]
indaverage_adv81 = indaverage_adv81.reset_index(drop=True)
adv81 = sma(self.volume, 81)
indneutralized_adv81 = adv81 - indaverage_adv81
p1 = rank(decay_linear(delta(((self.close * 0.369701) + (self.vwap * (1 - 0.369701))),2).to_frame(), 3))
p2 = ts_rank(decay_linear(abs(correlation(indneutralized_adv81, self.close, 13)), 5), 14)
p1=p1.iloc[:,0]
p2=p2.iloc[:,0]
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = -1 * df['max']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#88 min(rank(decay_linear(((rank(open) + rank(low)) - (rank(high) + rank(close))),8.06882)), Ts_Rank(decay_linear(correlation(Ts_Rank(close, 8.44728), Ts_Rank(adv60,20.6966), 8.01266), 6.65053), 2.61957))
def alpha088(self):
adv60 = sma(self.volume, 60)
p1=rank(decay_linear(((rank(self.open) + rank(self.low)) - (rank(self.high) + rank(self.close))).to_frame(),8).CLOSE)
p2=ts_rank(decay_linear(correlation(ts_rank(self.close, 8), ts_rank(adv60,21), 8).to_frame(), 7).CLOSE, 3)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'min']=df['p2']
df.at[df['p2']>=df['p1'],'min']=df['p1']
alpha = df['min']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#89 (Ts_Rank(decay_linear(correlation(((low * 0.967285) + (low * (1 - 0.967285))), adv10,6.94279), 5.51607), 3.79744) - Ts_Rank(decay_linear(delta(IndNeutralize(vwap,IndClass.industry), 3.48158), 10.1466), 15.3012))
def alpha089(self):
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
adv10 = sma(self.volume, 10)
alpha = ts_rank(decay_linear(correlation(((self.low * 0.967285) + (self.low * (1 - 0.967285))), adv10, 7), 6), 4) - ts_rank(decay_linear(delta(indneutralized_vwap, 10)), 15)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#90 ((rank((close - ts_max(close, 4.66719)))^Ts_Rank(correlation(IndNeutralize(adv40,IndClass.subindustry), low, 5.38375), 3.21856)) * -1)
def alpha090(self):
indaverage_adv40 = IndustryAverage_adv(40)
indaverage_adv40 = indaverage_adv40[indaverage_adv40.index.isin(self.available_dates)]
indaverage_adv40 = indaverage_adv40[self.industry]
indaverage_adv40 = indaverage_adv40.reset_index(drop=True)
adv40 = sma(self.volume, 40)
indneutralized_adv40 = adv40 - indaverage_adv40
alpha = ((rank((self.close - ts_max(self.close, 5))) ** ts_rank(correlation(indneutralized_adv40, self.low, 5), 3)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#91 ((Ts_Rank(decay_linear(decay_linear(correlation(IndNeutralize(close,IndClass.industry), volume, 9.74928), 16.398), 3.83219), 4.8667) -rank(decay_linear(correlation(vwap, adv30, 4.01303), 2.6809))) * -1)
def alpha091(self):
indaverage_close = IndustryAverage_close()
indaverage_close = indaverage_close[indaverage_close.index.isin(self.available_dates)]
indaverage_close = indaverage_close[self.industry]
indaverage_close = indaverage_close.reset_index(drop=True)
indneutralized_close = self.close - indaverage_close
adv30 = sma(self.volume, 30)
alpha = ((ts_rank(decay_linear(decay_linear(correlation(indneutralized_close, self.volume, 10), 16), 4), 5) -rank(decay_linear(correlation(self.vwap, adv30, 4), 3))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#92 min(Ts_Rank(decay_linear(((((high + low) / 2) + close) < (low + open)), 14.7221),18.8683), Ts_Rank(decay_linear(correlation(rank(low), rank(adv30), 7.58555), 6.94024),6.80584))
def alpha092(self):
adv30 = sma(self.volume, 30)
p1=ts_rank(decay_linear(((((self.high + self.low) / 2) + self.close) < (self.low + self.open)).to_frame(), 15).CLOSE,19)
p2=ts_rank(decay_linear(correlation(rank(self.low), rank(adv30), 8).to_frame(), 7).CLOSE,7)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'min']=df['p2']
df.at[df['p2']>=df['p1'],'min']=df['p1']
alpha = df['min']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#93 (Ts_Rank(decay_linear(correlation(IndNeutralize(vwap, IndClass.industry), adv81,17.4193), 19.848), 7.54455) / rank(decay_linear(delta(((close * 0.524434) + (vwap * (1 -0.524434))), 2.77377), 16.2664)))
def alpha093(self):
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
adv81 = sma(self.volume, 81)
alpha = (ts_rank(decay_linear(correlation(indneutralized_vwap, adv81, 17).to_frame(), 20), 8) / rank(decay_linear(delta(((self.close * 0.524434) + (self.vwap * (1 -0.524434))), 3).to_frame(), 16)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#94 ((rank((vwap - ts_min(vwap, 11.5783)))^Ts_Rank(correlation(Ts_Rank(vwap,19.6462), Ts_Rank(adv60, 4.02992), 18.0926), 2.70756)) * -1)
def alpha094(self):
adv60 = sma(self.volume, 60)
alpha = ((rank((self.vwap - ts_min(self.vwap, 12))).pow(ts_rank(correlation(ts_rank(self.vwap,20), ts_rank(adv60, 4), 18), 3)) * -1))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#95 (rank((open - ts_min(open, 12.4105))) < Ts_Rank((rank(correlation(sum(((high + low)/ 2), 19.1351), sum(adv40, 19.1351), 12.8742))^5), 11.7584))
def alpha095(self):
adv40 = sma(self.volume, 40)
alpha = (rank((self.open - ts_min(self.open, 12))) < ts_rank((rank(correlation(sma(((self.high + self.low)/ 2), 19), sma(adv40, 19), 13)).pow(5)), 12))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#96 (max(Ts_Rank(decay_linear(correlation(rank(vwap), rank(volume), 3.83878),4.16783), 8.38151), Ts_Rank(decay_linear(Ts_ArgMax(correlation(Ts_Rank(close, 7.45404),Ts_Rank(adv60, 4.13242), 3.65459), 12.6556), 14.0365), 13.4143)) * -1)
def alpha096(self):
adv60 = sma(self.volume, 60)
p1=ts_rank(decay_linear(correlation(rank(self.vwap), rank(self.volume).to_frame(), 4),4).CLOSE, 8)
p2=ts_rank(decay_linear(ts_argmax(correlation(ts_rank(self.close, 7),ts_rank(adv60, 4), 4), 13).to_frame(), 14).CLOSE, 13)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = -1*df['max']
#alpha = (max(ts_rank(decay_linear(correlation(rank(self.vwap), rank(self.volume).to_frame(), 4),4).CLOSE, 8), ts_rank(decay_linear(ts_argmax(correlation(ts_rank(self.close, 7),ts_rank(adv60, 4), 4), 13).to_frame(), 14).CLOSE, 13)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#97 ((rank(decay_linear(delta(IndNeutralize(((low * 0.721001) + (vwap * (1 - 0.721001))),IndClass.industry), 3.3705), 20.4523)) - Ts_Rank(decay_linear(Ts_Rank(correlation(Ts_Rank(low,7.87871), Ts_Rank(adv60, 17.255), 4.97547), 18.5925), 15.7152), 6.71659)) * -1)
def alpha097(self):
indaverage_data = IndustryAverage_PreparationForAlpha097()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (self.low * 0.721001) + (self.vwap * (1 - 0.721001))
indneutralized_data = unindneutralized_data - indaverage_data
adv60 = sma(self.volume, 60)
alpha = ((rank(decay_linear(delta(indneutralized_data, 3).to_frame(), 20)) - ts_rank(decay_linear(ts_rank(correlation(ts_rank(self.low,8), ts_rank(adv60, 17), 5), 19).to_frame(), 16), 7)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#98 (rank(decay_linear(correlation(vwap, sum(adv5, 26.4719), 4.58418), 7.18088)) -rank(decay_linear(Ts_Rank(Ts_ArgMin(correlation(rank(open), rank(adv15), 20.8187), 8.62571),6.95668), 8.07206)))
def alpha098(self):
adv5 = sma(self.volume, 5)
adv15 = sma(self.volume, 15)
alpha = (rank(decay_linear(correlation(self.vwap, sma(adv5, 26), 5).to_frame(), 7).CLOSE) -rank(decay_linear(ts_rank(ts_argmin(correlation(rank(self.open), rank(adv15), 21), 9),7).to_frame(), 8).CLOSE))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#99 ((rank(correlation(sum(((high + low) / 2), 19.8975), sum(adv60, 19.8975), 8.8136)) <rank(correlation(low, volume, 6.28259))) * -1)
def alpha099(self):
adv60 = sma(self.volume, 60)
alpha = ((rank(correlation(ts_sum(((self.high + self.low) / 2), 20), ts_sum(adv60, 20), 9)) <rank(correlation(self.low, self.volume, 6))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#100 (0 - (1 * (((1.5 * scale(indneutralize(indneutralize(rank(((((close - low) - (high -close)) / (high - low)) * volume)), IndClass.subindustry), IndClass.subindustry))) -scale(indneutralize((correlation(close, rank(adv20), 5) - rank(ts_argmin(close, 30))),IndClass.subindustry))) * (volume / adv20))))
def alpha100(self):
indaverage_data_1 = IndustryAverage_PreparationForAlpha100_1()
indaverage_data_1 = indaverage_data_1[indaverage_data_1.index.isin(self.available_dates)]
indaverage_data_1 = indaverage_data_1[self.industry]
indaverage_data_1 = indaverage_data_1.reset_index(drop=True)
unindneutralized_data_1 = rank(((((self.close - self.low) - (self.high - self.close)) / (self.high - self.low)) * self.volume))
indneutralized_data_1 = unindneutralized_data_1 - indaverage_data_1 #there's a problem in calculation here.
indaverage_data_2 = IndustryAverage_PreparationForAlpha100_2()
indaverage_data_2 = indaverage_data_2[indaverage_data_2.index.isin(self.available_dates)]
indaverage_data_2 = indaverage_data_2[self.industry]
indaverage_data_2 = indaverage_data_2.reset_index(drop=True)
adv20 = sma(self.volume, 20)
unindneutralized_data_2 = (correlation(self.close, rank(adv20), 5) - rank(ts_argmin(self.close, 30)))
indneutralized_data_2 = unindneutralized_data_2 - indaverage_data_2
alpha = (0 - (1 * (((1.5 * scale(indneutralized_data_1))-scale(indneutralized_data_2)) * (self.volume / adv20))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#101 ((close - open) / ((high - low) + .001))
def alpha101(self):
alpha = (self.close - self.open) /((self.high - self.low) + 0.001)
return alpha[self.start_date_index:self.end_date_index]
class GTJAalphas(object):
def __init__(self, ts_code="000001.SZ",start_date=20210101,end_date=20211231):
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition="TS_CODE = " + "'" + ts_code + "'").sort_values(by="TRADE_DATE", ascending=True)
stock_indicators_daily_chosen=local_source.get_stock_indicators_daily(cols='TRADE_DATE,TS_CODE,TOTAL_SHARE',condition="TS_CODE = " + "'" + ts_code + "'").sort_values(by="TRADE_DATE", ascending=True)
stock_data_chosen=pd.merge(quotations_daily_chosen,stock_indicators_daily_chosen,on=['TRADE_DATE','TS_CODE'],how="left")
stock_data_chosen["TOTAL_MV"]=stock_data_chosen["TOTAL_SHARE"]*stock_data_chosen["CLOSE"]
stock_data_chosen=stock_data_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
stock_data_chosen["TRADE_DATE"]=stock_data_chosen["TRADE_DATE"].astype(int)
self.open = stock_data_chosen['OPEN']
self.high = stock_data_chosen['HIGH']
self.low = stock_data_chosen['LOW']
self.close = stock_data_chosen['CLOSE']
self.volume = stock_data_chosen['VOL']*100
self.amount = stock_data_chosen['AMOUNT']*1000
self.returns = stock_data_chosen['CHANGE'] / stock_data_chosen['OPEN']
self.vwap = (stock_data_chosen['AMOUNT']*1000)/(stock_data_chosen['VOL']*100+1)
self.cap = stock_data_chosen['TOTAL_MV']
self.industry = local_source.get_stock_list(cols='TS_CODE,INDUSTRY', condition='TS_CODE = '+'"'+ts_code+'"')['INDUSTRY'].iloc[0]
self.available_dates = stock_data_chosen["TRADE_DATE"]
if ts_code[-2:]=='SZ': index_code = "399001.SZ"
else: index_code = "000001.SH"
indices_daily_chosen=local_source.get_indices_daily(cols='TRADE_DATE,INDEX_CODE,OPEN,CLOSE',condition='INDEX_CODE = '+'"'+index_code+'"').sort_values(by="TRADE_DATE", ascending=True)
indices_daily_chosen=indices_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
indices_daily_chosen = pd.merge(stock_data_chosen["TRADE_DATE"], indices_daily_chosen, on=['TRADE_DATE'], how="left")
self.benchmarkindexopen = indices_daily_chosen['OPEN']
self.benchmarkindexclose = indices_daily_chosen['CLOSE']
output_dates = stock_data_chosen[(stock_data_chosen["TRADE_DATE"]>=start_date)*(stock_data_chosen["TRADE_DATE"]<=end_date)]["TRADE_DATE"]
start_available_date = output_dates.iloc[0]
end_available_date = output_dates.iloc[-1]
self.start_date_index = stock_data_chosen["TRADE_DATE"][stock_data_chosen["TRADE_DATE"].values == start_available_date].index[0]
self.end_date_index = stock_data_chosen["TRADE_DATE"][stock_data_chosen["TRADE_DATE"].values == end_available_date].index[0] +1
#Alpha1 (-1 * CORR(RANK(DELTA(LOG(VOLUME), 1)), RANK(((CLOSE - OPEN) / OPEN)), 6))
def GTJAalpha001(self):
alpha = -1 * correlation(rank(delta(np.log(self.volume),1)),rank(((self.close-self.open)/self.open)), 6)
return alpha[self.start_date_index:self.end_date_index]
#Alpha2 (-1 * DELTA((((CLOSE - LOW) - (HIGH - CLOSE)) / (HIGH - LOW)), 1))
def GTJAalpha002(self):
alpha = -1 * delta((((self.close - self.low) - (self.high - self.close)) / (self.high - self.low)), 1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha3 SUM((CLOSE=DELAY(CLOSE,1)?0:CLOSE-(CLOSE>DELAY(CLOSE,1)?MIN(LOW,DELAY(CLOSE,1)):MAX(HIGH,DELAY(CLOSE,1)))),6)
def GTJAalpha003(self):
delay1 = self.close.shift()
condition1 = (self.close > delay1)
inner1_true = np.minimum(self.low, delay1)
inner1_false = np.maximum(self.low, delay1)
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close == delay1)
inner2_true = pd.Series(np.zeros(len(condition2)))
inner2_false = self.close - inner1
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ts_sum(inner2, 6)
return alpha[self.start_date_index:self.end_date_index]
#Alpha4 ((((SUM(CLOSE, 8) / 8) + STD(CLOSE, 8)) < (SUM(CLOSE, 2) / 2)) ? (-1 * 1) : (((SUM(CLOSE, 2) / 2) < ((SUM(CLOSE, 8) / 8) - STD(CLOSE, 8))) ? 1 : (((1 < (VOLUME / MEAN(VOLUME,20))) || ((VOLUME / MEAN(VOLUME,20)) == 1)) ? 1 : (-1 * 1))))
def GTJAalpha004(self):
condition1 = ((1 < (self.volume / sma(self.volume,20))) | ((self.volume / sma(self.volume,20)) == 1))
inner1_true = pd.Series(np.ones(len(condition1)))
inner1_false = -1 * pd.Series(np.ones(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = ((ts_sum(self.close, 2) / 2) < ((ts_sum(self.close, 8) / 8) - stddev(self.close, 8)))
inner2_true = -1 * pd.Series(np.ones(len(condition2)))
inner2_false = inner1
alpha = pd.Series(np.where(condition2, inner2_true, inner2_false))
return alpha[self.start_date_index:self.end_date_index]
#Alpha5 (-1 * TSMAX(CORR(TSRANK(VOLUME, 5), TSRANK(HIGH, 5), 5), 3))
def GTJAalpha005(self):
alpha = -1 * ts_max(correlation(ts_rank(self.volume,5), ts_rank(self.high,5), 5) ,3)
return alpha[self.start_date_index:self.end_date_index]
#Alpha6 (RANK(SIGN(DELTA((((OPEN * 0.85) + (HIGH * 0.15))), 4)))* -1)
def GTJAalpha006(self):
alpha = rolling_rank(sign(delta((((self.open * 0.85) + (self.high * 0.15))), 4)))* -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha7 ((RANK(MAX((VWAP - CLOSE), 3)) + RANK(MIN((VWAP - CLOSE), 3))) * RANK(DELTA(VOLUME, 3)))
def GTJAalpha007(self):
alpha = (rolling_rank(np.maximum((self.vwap - self.close), 3)) + rolling_rank(np.minimum((self.vwap - self.close), 3))) * rolling_rank(delta(self.volume, 3))
return alpha[self.start_date_index:self.end_date_index]
#Alpha8 RANK(DELTA(((((HIGH + LOW) / 2) * 0.2) + (VWAP * 0.8)), 4) * -1)
def GTJAalpha008(self):
alpha = rolling_rank(delta(((((self.high + self.low) / 2) * 0.2) + (self.vwap * 0.8)), 4) * -1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha9 SMA(((HIGH+LOW)/2-(DELAY(HIGH,1)+DELAY(LOW,1))/2)*(HIGH-LOW)/VOLUME,7,2)
def GTJAalpha009(self):
alpha = ema(((self.high+self.low)/2-(delay(self.high,1)+delay(self.low,1))/2)*(self.high-self.low)/self.volume,7,2)
return alpha[self.start_date_index:self.end_date_index]
#Alpha10 (RANK(MAX(((RET < 0) ? STD(RET, 20) : CLOSE)^2),5))
def GTJAalpha010(self):
condition1 = (self.returns < 0)
inner1_true = stddev(self.returns, 20)
inner1_false = self.close
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
alpha = rolling_rank(np.maximum(inner1**2, 5))
return alpha[self.start_date_index:self.end_date_index]
#Alpha11 SUM(((CLOSE-LOW)-(HIGH-CLOSE))./(HIGH-LOW).*VOLUME,6)
def GTJAalpha011(self):
alpha = ts_sum(((self.close-self.low)-(self.high-self.close))/(self.high-self.low)*self.volume,6)
return alpha[self.start_date_index:self.end_date_index]
#Alpha12 (RANK((OPEN - (SUM(VWAP, 10) / 10)))) * (-1 * (RANK(ABS((CLOSE - VWAP)))))
def GTJAalpha012(self):
alpha = rolling_rank((self.open - (ts_sum(self.vwap, 10) / 10))) * -1 * (rolling_rank(abs((self.close - self.vwap))))
return alpha[self.start_date_index:self.end_date_index]
#Alpha13 (((HIGH * LOW)^0.5) - VWAP)
def GTJAalpha013(self):
alpha = ((self.high * self.low)**0.5) - self.vwap
return alpha[self.start_date_index:self.end_date_index]
#Alpha14 CLOSE-DELAY(CLOSE,5)
def GTJAalpha014(self):
alpha = self.close - delay(self.close,5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha15 OPEN/DELAY(CLOSE,1)-1
def GTJAalpha015(self):
alpha = (self.open/delay(self.close, 1)) -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha16 (-1 * TSMAX(RANK(CORR(RANK(VOLUME), RANK(VWAP), 5)), 5))
def GTJAalpha016(self):
alpha = -1 * ts_max(rolling_rank(correlation(rolling_rank(self.volume), rolling_rank(self.vwap), 5)), 5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha17 RANK((VWAP - MAX(VWAP, 15)))^DELTA(CLOSE, 5)
def GTJAalpha17(self):
alpha = rolling_rank((self.vwap - np.maximum(self.vwap, 15)))**delta(self.close, 5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha18 CLOSE/DELAY(CLOSE,5)
def GTJAalpha018(self):
alpha = self.close / delay(self.close, 5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha19 (CLOSE<DELAY(CLOSE,5)?(CLOSE-DELAY(CLOSE,5))/DELAY(CLOSE,5):(CLOSE=DELAY(CLOSE,5)?0:(CLOSE-DELAY(CLOSE,5))/CLOSE))
def GTJAalpha019(self):
condition1 = (self.close == delay(self.close,5))
inner1_true=pd.Series(np.zeros(len(condition1)))
inner1_false=(self.close-delay(self.close,5))/self.close
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close<delay(self.close,5))
inner2_true = (self.close-delay(self.close,5))/delay(self.close,5)
inner2_false = inner1
alpha = pd.Series(np.where(condition2, inner2_true, inner2_false))
return alpha[self.start_date_index:self.end_date_index]
#Alpha20 (CLOSE-DELAY(CLOSE,6))/DELAY(CLOSE,6)*100
def GTJAalpha020(self):
alpha = (self.close-delay(self.close,6)) / delay(self.close,6) *100
return alpha[self.start_date_index:self.end_date_index]
#Alpha21 REGBETA(MEAN(CLOSE,6),SEQUENCE(6))
def GTJAalpha021(self): #I'm not sure if I've understood the formula correctly.
y = sma(self.close, 6)
alpha = pd.Series(np.nan, index=self.close.index)
for i in range(6-1,len(y)):
alpha.iloc[i]=sp.stats.linregress(pd.Series(np.arange(1,7)), y[i-6+1:i+1])[0]
return alpha[self.start_date_index:self.end_date_index]
#Alpha22 SMEAN(((CLOSE-MEAN(CLOSE,6))/MEAN(CLOSE,6)-DELAY((CLOSE-MEAN(CLOSE,6))/MEAN(CLOSE,6),3)),12,1)
def GTJAalpha022(self):
alpha = ema(((self.close-sma(self.close,6))/sma(self.close,6)-delay((self.close-sma(self.close,6))/sma(self.close,6),3)),12,1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha23 SMA((CLOSE>DELAY(CLOSE,1)?STD(CLOSE,20):0),20,1) / (SMA((CLOSE>DELAY(CLOSE,1)?STD(CLOSE,20):0),20,1)+SMA((CLOSE<=DELAY(CLOSE,1)?STD(CLOSE,20):0),20,1))*100
def GTJAalpha023(self):
condition1 = (self.close > delay(self.close,1))
inner1_true= stddev(self.close, 20)
inner1_false = pd.Series(np.zeros(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close <= delay(self.close,1))
inner2_true= stddev(self.close, 20)
inner2_false = pd.Series(np.zeros(len(condition2)))
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ema(inner1,20,1) / (ema(inner1,20,1)+ema(inner2,20,1))*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha24 SMA(CLOSE-DELAY(CLOSE,5),5,1)
def GTJAalpha024(self):
alpha = ema(self.close-delay(self.close,5),5,1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha25 ((-1 * RANK((DELTA(CLOSE, 7) * (1 - RANK(DECAYLINEAR((VOLUME / MEAN(VOLUME,20)), 9)))))) * (1 + RANK(SUM(RET, 250))))
def GTJAalpha025(self):
alpha = ((-1 * rolling_rank((delta(self.close, 7) * (1 - rolling_rank(decay_linear((self.volume / sma(self.volume,20)), 9)))))) * (1 + rolling_rank(ts_sum(self.returns, 250))))
return alpha[self.start_date_index:self.end_date_index]
#Alpha26 ((((SUM(CLOSE, 7) / 7) - CLOSE)) + ((CORR(VWAP, DELAY(CLOSE, 5), 230))))
def GTJAalpha026(self):
alpha = (((ts_sum(self.close, 7) / 7) - self.close)) + ((correlation(self.vwap, delay(self.close, 5), 230)))
return alpha[self.start_date_index:self.end_date_index]
#Alpha27 WMA((CLOSE-DELAY(CLOSE,3))/DELAY(CLOSE,3)*100+(CLOSE-DELAY(CLOSE,6))/DELAY(CLOSE,6)*100,12)
def GTJAalpha027(self):
alpha = wma(( self.close-delay(self.close,3))/delay(self.close,3)*100 + (self.close-delay(self.close,6))/delay(self.close,6)*100 ,12)
return alpha[self.start_date_index:self.end_date_index]
#Alpha28 3*SMA((CLOSE-TSMIN(LOW,9))/(TSMAX(HIGH,9)-TSMIN(LOW,9))*100,3,1)-2*SMA(SMA((CLOSE-TSMIN(LOW,9))/(MAX(HIGH,9)-TSMAX(LOW,9))*100,3,1),3,1)
def GTJAalpha028(self):
alpha = 3*ema((self.close-ts_min(self.low,9))/(ts_max(self.high,9)-ts_min(self.low,9))*100,3,1)-2*ema(ema((self.close-ts_min(self.low,9))/(ts_max(self.high,9)-ts_min(self.low,9))*100,3,1),3,1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha29 (CLOSE-DELAY(CLOSE,6))/DELAY(CLOSE,6)*VOLUME
def GTJAalpha029(self):
alpha = (self.close-delay(self.close,6))/delay(self.close,6)*self.volume
return alpha[self.start_date_index:self.end_date_index]
#Alpha30 WMA((REGRESI(CLOSE/DELAY(CLOSE)-1,MKT,SMB,HML,60))^2,20)
def GTJAalpha030(self):
y = (self.close/delay(self.close)) -1
y.rename("y",inplace=True)
y = pd.concat([self.available_dates, y],axis=1)
MKT = self.benchmarkindexclose.pct_change()
MKT.rename("MKT", inplace=True)
MKT = pd.concat([self.available_dates,MKT],axis=1)
FFfactor_data=pd.read_csv("FFfactors_daily.csv")
FFfactor_data_needed = FFfactor_data[["TRADE_DATE","SMB","HML"]]
dt = pd.merge(y, MKT, on=['TRADE_DATE'], how="left")
dt = pd.merge(dt, FFfactor_data_needed, on=['TRADE_DATE'], how="left")
dt["const"]=1
result = pd.Series(np.nan, index=dt.index)
for i in range(60-1,len(y)):
dt_piece = dt[i-60+1:i+1]
dt_piece= dt_piece.dropna()
y = dt_piece["y"]
x = dt_piece[["MKT","SMB","HML","const"]]
if len(y)!=0:
model = sm.OLS(y,x)
result.iloc[i] = model.fit().params.loc["const"]
print((result)**2)
alpha = wma((result)**2,20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha31 (CLOSE-MEAN(CLOSE,12))/MEAN(CLOSE,12)*100
def GTJAalpha031(self):
alpha = (self.close-sma(self.close,12))/sma(self.close,12)*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha32 (-1 * SUM(RANK(CORR(RANK(HIGH), RANK(VOLUME), 3)), 3))
def GTJAalpha032(self):
alpha = -1 * ts_sum(rolling_rank(correlation(rolling_rank(self.high), rolling_rank(self.volume), 3)), 3)
return alpha[self.start_date_index:self.end_date_index]
#Alpha33 ((((-1 * TSMIN(LOW, 5)) + DELAY(TSMIN(LOW, 5), 5)) * RANK(((SUM(RET, 240) - SUM(RET, 20)) / 220))) * TSRANK(VOLUME, 5))
def GTJAalpha033(self):
alpha = (((-1 * ts_min(self.low, 5)) + delay(ts_min(self.low, 5), 5)) * ts_rank(((ts_sum(self.returns, 240) - ts_sum(self.returns, 20)) / 220))) * ts_rank(self.volume, 5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha34 MEAN(CLOSE,12)/CLOSE
def GTJAalpha034(self):
alpha = sma(self.close,12) / self.close
return alpha[self.start_date_index:self.end_date_index]
#Alpha35 (MIN(RANK(DECAYLINEAR(DELTA(OPEN, 1), 15)), RANK(DECAYLINEAR(CORR((VOLUME), ((OPEN * 0.65) + (OPEN *0.35)), 17),7))) * -1)
def GTJAalpha035(self):
alpha = np.minimum(rolling_rank(decay_linear(delta(self.open, 1), 15)), rolling_rank(decay_linear(correlation((self.volume), ((self.open * 0.65) + (self.open *0.35)), 17),7))) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha36 RANK(SUM(CORR(RANK(VOLUME), RANK(VWAP)), 6), 2)
def GTJAalpha036(self):
alpha = rolling_rank(ts_sum(correlation(rolling_rank(self.volume), rolling_rank(self.vwap)), 6), 2)
return alpha[self.start_date_index:self.end_date_index]
#Alpha37 (-1 * RANK(((SUM(OPEN, 5) * SUM(RET, 5)) - DELAY((SUM(OPEN, 5) * SUM(RET, 5)), 10))))
def GTJAalpha037(self):
alpha = -1 * rolling_rank(((ts_sum(self.open, 5) * ts_sum(self.returns, 5)) - delay((ts_sum(self.open, 5) * ts_sum(self.returns, 5)), 10)))
return alpha[self.start_date_index:self.end_date_index]
#Alpha38 (((SUM(HIGH, 20) / 20) < HIGH) ? (-1 * DELTA(HIGH, 2)) : 0)
def GTJAalpha038(self):
condition1 = ((ts_sum(self.high, 20) / 20) < self.high)
inner1_true= -1 * delta(self.high, 2)
inner1_false = pd.Series(np.zeros(len(condition1)))
alpha = pd.Series(np.where(condition1, inner1_true, inner1_false))
return alpha[self.start_date_index:self.end_date_index]
#Alpha39 ((RANK(DECAYLINEAR(DELTA((CLOSE), 2),8)) - RANK(DECAYLINEAR(CORR(((VWAP * 0.3) + (OPEN * 0.7)), SUM(MEAN(VOLUME,180), 37), 14), 12))) * -1)
def GTJAalpha039(self):
alpha = ((rolling_rank(decay_linear(delta((self.close), 2),8)) - rolling_rank(decay_linear(correlation(((self.vwap * 0.3) + (self.open * 0.7)), ts_sum(sma(self.volume,180), 37), 14), 12))) * -1)
return alpha
#Alpha40 SUM((CLOSE>DELAY(CLOSE,1)?VOLUME:0),26)/SUM((CLOSE<=DELAY(CLOSE,1)?VOLUME:0),26)*100
def GTJAalpha040(self):
condition1 = (self.close > delay(self.close,1))
inner1_true= self.volume
inner1_false = pd.Series(np.zeros(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close <= delay(self.close,1))
inner2_true= self.volume
inner2_false = pd.Series(np.zeros(len(condition2)))
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ts_sum(inner1,26) / ts_sum(inner2,26)*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha41 (RANK(MAX(DELTA((VWAP), 3), 5))* -1)
def GTJAalpha041(self):
alpha = rolling_rank(np.maximum(delta((self.vwap), 3), 5))* -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha42 ((-1 * RANK(STD(HIGH, 10))) * CORR(HIGH, VOLUME, 10))
def GTJAalpha042(self):
alpha = (-1 * rolling_rank(stddev(self.high, 10))) * correlation(self.high, self.volume, 10)
return alpha[self.start_date_index:self.end_date_index]
#Alpha43 SUM((CLOSE>DELAY(CLOSE,1)?VOLUME:(CLOSE<DELAY(CLOSE,1)?-VOLUME:0)),6)
def GTJAalpha043(self):
condition1 = (self.close < delay(self.close,1))
inner1_true = -1* self.volume
inner1_false = pd.Series(np.zeros(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close > delay(self.close,1))
inner2_true = self.volume
inner2_false = inner1
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ts_sum(inner2,6)
return alpha[self.start_date_index:self.end_date_index]
#Alpha44 (TSRANK(DECAYLINEAR(CORR(((LOW )), MEAN(VOLUME,10), 7), 6),4) + TSRANK(DECAYLINEAR(DELTA((VWAP), 3), 10), 15))
def GTJAalpha044(self):
alpha = ts_rank(decay_linear(correlation(self.low, sma(self.volume,10), 7), 6),4) + ts_rank(decay_linear(delta((self.vwap), 3), 10), 15)
return alpha[self.start_date_index:self.end_date_index]
#Alpha45 (RANK(DELTA((((CLOSE * 0.6) + (OPEN *0.4))), 1)) * RANK(CORR(VWAP, MEAN(VOLUME,150), 15)))
def GTJAalpha045(self):
alpha = rolling_rank(delta((((self.close * 0.6) + (self.open *0.4))), 1)) * rolling_rank(correlation(self.vwap, sma(self.volume,150), 15))
return alpha[self.start_date_index:self.end_date_index]
#Alpha46 (MEAN(CLOSE,3)+MEAN(CLOSE,6)+MEAN(CLOSE,12)+MEAN(CLOSE,24))/(4*CLOSE)
def GTJAalpha046(self):
alpha = (sma(self.close,3)+sma(self.close,6)+sma(self.close,12)+sma(self.close,24))/(4*self.close)
return alpha[self.start_date_index:self.end_date_index]
#Alpha47 SMA((TSMAX(HIGH,6)-CLOSE)/(TSMAX(HIGH,6)-TSMIN(LOW,6))*100,9,1)
def GTJAalpha047(self):
alpha = ema((ts_max(self.high,6)-self.close)/(ts_max(self.high,6)-ts_min(self.low,6))*100,9,1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha48 (-1*((RANK(((SIGN((CLOSE - DELAY(CLOSE, 1))) + SIGN((DELAY(CLOSE, 1) - DELAY(CLOSE, 2)))) + SIGN((DELAY(CLOSE, 2) - DELAY(CLOSE, 3)))))) * SUM(VOLUME, 5)) / SUM(VOLUME, 20))
def GTJAalpha048(self):
alpha = -1*((rolling_rank(((sign((self.close - delay(self.close, 1))) + sign((delay(self.close, 1) - delay(self.close, 2)))) + sign((delay(self.close, 2) - delay(self.close, 3)))))) * ts_sum(self.volume, 5)) / ts_sum(self.volume, 20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha49 SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)/(SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)+SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12))
def GTJAalpha049(self):
condition1 = ((self.high+self.low)>=(delay(self.high,1)+delay(self.low,1)))
inner1_true = pd.Series(np.zeros(len(condition1)))
inner1_false = np.maximum(abs(self.high-delay(self.high,1)),abs(self.low-delay(self.low,1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = ((self.high+self.low)<=(delay(self.high,1)+delay(self.low,1)))
inner2 = pd.Series(np.where(condition2, inner1_true, inner1_false))
alpha = ts_sum(inner1,12) / (ts_sum(inner1,12)+ts_sum(inner2,12))
return alpha[self.start_date_index:self.end_date_index]
#Alpha50 SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)/(SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)+SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HI GH,1)),ABS(LOW-DELAY(LOW,1)))),12))-SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HI GH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)/(SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0: MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)+SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELA Y(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12))
def GTJAalpha050(self):
condition1 = ((self.high+self.low) >= (delay(self.high,1)+delay(self.low,1)))
inner1_true = pd.Series(np.zeros(len(condition1)))
inner1_false = np.maximum(abs(self.high-delay(self.high,1)),abs(self.low-delay(self.low,1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = ((self.high+self.low) <= (delay(self.high,1)+delay(self.low,1)))
inner2 = pd.Series(np.where(condition2, inner1_true, inner1_false))
alpha = (ts_sum(inner2,12)-ts_sum(inner1,12))/(ts_sum(inner2,12)+ts_sum(inner1,12))
return alpha[self.start_date_index:self.end_date_index]
#Alpha51 SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)/(SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)+SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HI GH,1)),ABS(LOW-DELAY(LOW,1)))),12))
def GTJAalpha051(self):
condition1 = ((self.high+self.low) >= (delay(self.high,1)+delay(self.low,1)))
inner1_true = pd.Series(np.zeros(len(condition1)))
inner1_false = np.maximum(abs(self.high-delay(self.high,1)),abs(self.low-delay(self.low,1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = ((self.high+self.low) <= (delay(self.high,1)+delay(self.low,1)))
inner2 = pd.Series(np.where(condition2, inner1_true, inner1_false))
alpha = ts_sum(inner2,12) / (ts_sum(inner1,12)+ts_sum(inner2,12))
return alpha[self.start_date_index:self.end_date_index]
#Alpha52 SUM(MAX(0,HIGH-DELAY((HIGH+LOW+CLOSE)/3,1)),26)/SUM(MAX(0,DELAY((HIGH+LOW+CLOSE)/3,1)-LOW),26)* 100
def GTJAalpha052(self):
alpha = ts_sum(np.maximum(0,self.high-delay((self.high+self.low+self.close)/3,1)),26)/ts_sum(np.maximum(0,delay((self.high+self.low+self.close)/3,1)-self.low),26)* 100
return alpha[self.start_date_index:self.end_date_index]
#Alpha53 COUNT(CLOSE>DELAY(CLOSE,1),12)/12*100
def GTJAalpha053(self):
condition = (self.close>delay(self.close,1))
count = pd.Series(np.nan, index=self.close.index)
for i in range(12-1,len(condition)):
count.iloc[i]=condition[i-12+1,i+1].sum()
alpha = count / 12 * 100
return alpha[self.start_date_index:self.end_date_index]
#Alpha54 (-1 * RANK((STD(ABS(CLOSE - OPEN)) + (CLOSE - OPEN)) + CORR(CLOSE, OPEN,10)))
def GTJAalpha054(self):
alpha = -1 * rolling_rank((stddev(abs(self.close - self.open)) + (self.close - self.open)) + correlation(self.close,self.open,10))
return alpha[self.start_date_index:self.end_date_index]
#Alpha55 SUM(16*(CLOSE-DELAY(CLOSE,1)+(CLOSE-OPEN)/2+DELAY(CLOSE,1)-DELAY(OPEN,1))/((ABS(HIGH-DELAY(CLOSE,1))>ABS(LOW-DELAY(CLOSE,1)) & ABS(HIGH-DELAY(CLOSE,1))>ABS(HIGH-DELAY(LOW,1))?ABS(HIGH-DELAY(CLOSE,1))+ABS(LOW-DELAY(CLOSE,1))/2+ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4:(ABS(LOW-DELAY(CLOSE,1))>ABS(HIGH-DELAY(LOW,1)) & ABS(LOW-DELAY(CLOSE,1))>ABS(HIGH-DELAY(CLOSE,1))?ABS(LOW-DELAY(CLOSE,1))+ABS(HIGH-DELAY(CLO SE,1))/2+ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4:ABS(HIGH-DELAY(LOW,1))+ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4)))*MAX(ABS(HIGH-DELAY(CLOSE,1)),ABS(LOW-DELAY(CLOSE,1))),20)
def GTJAalpha055(self):
condition1 = (abs(self.low-delay(self.close,1))>abs(self.high-delay(self.low,1))) & (abs(self.low-delay(self.close,1))>abs(self.high-delay(self.close,1)))
inner1_true = abs(self.low-delay(self.close,1)) + abs(self.high-delay(self.close,1))/2 + abs(delay(self.close,1)-delay(self.open,1))/4
inner1_false = abs(self.high-delay(self.low,1)) + abs(delay(self.close,1)-delay(self.open,1))/4
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (abs(self.high-delay(self.close,1))>abs(self.low-delay(self.close,1))) & (abs(self.high-delay(self.close,1))>abs(self.high-delay(self.low,1)))
inner2_true = abs(self.high-delay(self.close,1))+abs(self.low-delay(self.close,1))/2+abs(delay(self.close,1)-delay(self.open,1))/4
inner2_false = inner1
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ts_sum(16*(self.close-delay(self.close,1)+(self.close-self.open)/2+delay(self.close,1)-delay(self.open,1))/(inner2)*np.maximum(abs(self.high-delay(self.close,1)),abs(self.low-delay(self.close,1))),20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha56 (RANK((OPEN - TSMIN(OPEN, 12))) < RANK((RANK(CORR(SUM(((HIGH + LOW) / 2), 19), SUM(MEAN(VOLUME,40), 19), 13))^5)))
def GTJAalpha056(self):
alpha = rolling_rank((self.open - ts_min(self.open, 12))) < rolling_rank((rolling_rank(correlation(ts_sum(((self.high + self.low) / 2), 19), ts_sum(sma(self.volume,40), 19), 13))**5))
return alpha[self.start_date_index:self.end_date_index]
#Alpha57 SMA((CLOSE-TSMIN(LOW,9))/(TSMAX(HIGH,9)-TSMIN(LOW,9))*100,3,1)
def GTJAalpha057(self):
alpha = ema((self.close-ts_min(self.low,9))/(ts_max(self.high,9)-ts_min(self.low,9))*100,3,1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha58 COUNT(CLOSE>DELAY(CLOSE,1),20)/20*10
def GTJAalpha058(self):
condition = (self.close>delay(self.close,1))
count = pd.Series(np.nan, index=self.close.index)
for i in range(20-1,len(condition)):
count.iloc[i]=condition[i-20+1,i+1].sum()
alpha = count / 20 * 10
return alpha[self.start_date_index:self.end_date_index]
#Alpha59 SUM((CLOSE=DELAY(CLOSE,1)?0:CLOSE-(CLOSE>DELAY(CLOSE,1)?MIN(LOW,DELAY(CLOSE,1)):MAX(HIGH,DELAY(CLOSE,1)))),20)
def GTJAalpha059(self):
condition1 = self.close > delay(self.close,1)
inner1_true = np.minimum(self.low,delay(self.close,1))
inner1_false = np.maximum(self.high,delay(self.close,1))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close == delay(self.close,1))
inner2_true = pd.Series(np.zeros(len(condition2)))
inner2_false = self.close-inner1
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ts_sum(inner2, 20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha60 SUM(((CLOSE-LOW)-(HIGH-CLOSE))./(HIGH-LOW).*VOLUME,20)
def GTJAalpha060(self):
alpha = ts_sum(((self.close-self.low)-(self.high-self.close))/(self.high-self.low)*self.volume,20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha61 (MAX(RANK(DECAYLINEAR(DELTA(VWAP, 1), 12)), RANK(DECAYLINEAR(RANK(CORR((LOW),MEAN(VOLUME,80), 8)), 17))) * -1)
def GTJAalpha061(self):
alpha = np.maximum(rolling_rank(decay_linear(delta(self.vwap, 1), 12)), rolling_rank(decay_linear(rolling_rank(correlation(self.low,sma(self.volume,80), 8)), 17))) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha62 (-1 * CORR(HIGH, RANK(VOLUME), 5))
def GTJAalpha062(self):
alpha = -1 * correlation(self.high, rolling_rank(self.volume), 5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha63 SMA(MAX(CLOSE-DELAY(CLOSE,1),0),6,1)/SMA(ABS(CLOSE-DELAY(CLOSE,1)),6,1)*100
def GTJAalpha063(self):
alpha = ema(np.maximum(self.close-delay(self.close,1),0),6,1) / ema(abs(self.close-delay(self.close,1)),6,1)*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha64 (MAX(RANK(DECAYLINEAR(CORR(RANK(VWAP), RANK(VOLUME), 4), 4)), RANK(DECAYLINEAR(MAX(CORR(RANK(CLOSE), RANK(MEAN(VOLUME,60)), 4), 13), 14))) * -1)
def GTJAalpha064(self):
alpha = np.maximum(rolling_rank(decay_linear(correlation(rolling_rank(self.vwap), rolling_rank(self.volume), 4), 4)), rolling_rank(decay_linear(np.maximum(correlation(rolling_rank(self.close), rolling_rank(sma(self.volume,60)), 4), 13), 14))) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha65 MEAN(CLOSE,6)/CLOSE
def GTJAalpha065(self):
alpha = sma(self.close,6)/self.close
return alpha[self.start_date_index:self.end_date_index]
#Alpha66 (CLOSE-MEAN(CLOSE,6))/MEAN(CLOSE,6)*100
def GTJAalpha066(self):
alpha = (self.close-sma(self.close,6)) / sma(self.close,6) *100
return alpha[self.start_date_index:self.end_date_index]
#Alpha67 SMA(MAX(CLOSE-DELAY(CLOSE,1),0),24,1)/SMA(ABS(CLOSE-DELAY(CLOSE,1)),24,1)*100
def GTJAalpha067(self):
alpha = ema(np.maximum(self.close-delay(self.close,1),0),24,1) / ema(abs(self.close-delay(self.close,1)),24,1) *100
return alpha[self.start_date_index:self.end_date_index]
#Alpha68 SMA(((HIGH+LOW)/2-(DELAY(HIGH,1)+DELAY(LOW,1))/2)*(HIGH-LOW)/VOLUME,15,2)
def GTJAalpha068(self):
alpha = ema(((self.high+self.low)/2-(delay(self.high,1)+delay(self.low,1))/2)*(self.high-self.low)/self.volume, 15, 2)
return alpha[self.start_date_index:self.end_date_index]
#Alpha69 (SUM(DTM,20)>SUM(DBM,20)?(SUM(DTM,20)-SUM(DBM,20))/SUM(DTM,20):(SUM(DTM,20)=SUM(DBM,20)?0:(SUM(DTM,20)-SUM(DBM,20))/SUM(DBM,20)))
def GTJAalpha069(self):
condition_dtm = (self.open<=delay(self.open,1))
dtm_true = pd.Series(np.zeros(len(condition_dtm)))
dtm_false = np.maximum(self.high-self.open, self.open-delay(self.open,1))
dtm = pd.Series(np.where(condition_dtm, dtm_true, dtm_false))
condition_dbm = (self.open>=delay(self.open,1))
dbm_true = pd.Series(np.zeros(len(condition_dbm)))
dbm_false = np.maximum(self.open-self.low, self.open-delay(self.open,1))
dbm = pd.Series(np.where(condition_dbm, dbm_true, dbm_false))
condition1 = (ts_sum(dtm,20) == ts_sum(dbm,20))
inner1_true = pd.Series(np.zeros(len(condition1)))
inner1_false = (ts_sum(dtm,20)-ts_sum(dbm,20)) / ts_sum(dbm,20)
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (ts_sum(dtm,20) > ts_sum(dbm,20))
inner2_true = (ts_sum(dtm,20)-ts_sum(dbm,20)) / ts_sum(dtm,20)
inner2_false = inner1
alpha = pd.Series(np.where(condition2, inner2_true, inner2_false))
return alpha[self.start_date_index:self.end_date_index]
#Alpha70 STD(AMOUNT,6)
def GTJAalpha070(self):
alpha = stddev(self.amount, 6)
return alpha[self.start_date_index:self.end_date_index]
#Alpha71 (CLOSE-MEAN(CLOSE,24))/MEAN(CLOSE,24)*100
def GTJAalpha071(self):
alpha = (self.close-sma(self.close,24))/sma(self.close,24)*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha72 SMA((TSMAX(HIGH,6)-CLOSE)/(TSMAX(HIGH,6)-TSMIN(LOW,6))*100,15,1)
def GTJAalpha072(self):
alpha = ema((ts_max(self.high,6)-self.close)/(ts_max(self.high,6)-ts_min(self.low,6))*100, 15, 1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha73 ((TSRANK(DECAYLINEAR(DECAYLINEAR(CORR((CLOSE), VOLUME, 10), 16), 4), 5) - RANK(DECAYLINEAR(CORR(VWAP, MEAN(VOLUME,30), 4),3))) * -1)
def GTJAalpha073(self):
alpha = (ts_rank(decay_linear(decay_linear(correlation(self.close, self.volume, 10), 16), 4), 5) - rolling_rank(decay_linear(correlation(self.vwap, sma(self.volume,30), 4),3))) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha74 (RANK(CORR(SUM(((LOW * 0.35) + (VWAP * 0.65)), 20), SUM(MEAN(VOLUME,40), 20), 7)) + RANK(CORR(RANK(VWAP), RANK(VOLUME), 6)))
def GTJAalpha074(self):
alpha = rolling_rank(correlation(ts_sum(((self.low * 0.35) + (self.vwap * 0.65)), 20), ts_sum(sma(self.volume,40), 20), 7)) + rolling_rank(correlation(rolling_rank(self.vwap), rolling_rank(self.volume), 6))
return alpha[self.start_date_index:self.end_date_index]
#Alpha75 COUNT(CLOSE>OPEN & BANCHMARKINDEXCLOSE<BANCHMARKINDEXOPEN,50)/COUNT(BANCHMARKINDEXCLOSE<BANCHMARKINDEXOPEN,50)
def GTJAalpha075(self):
condition_count1 = ((self.close>self.open) & (self.benchmarkclose<self.benchmarkopen))
count1 = pd.Series(np.nan, index=condition_count1.index)
for i in range(50-1,len(condition_count1)):
count1.iloc[i]=condition_count1[i-50+1,i+1].sum()
condition_count2 = (self.benchmarkclose < self.benchmarkopen)
count2 = pd.Series(np.nan, index=condition_count2.index)
for i in range(50-1,len(condition_count2)):
count2.iloc[i]=condition_count1[i-50+1,i+1].sum()
alpha = count1 / count2
return alpha[self.start_date_index:self.end_date_index]
#Alpha76 STD(ABS((CLOSE/DELAY(CLOSE,1)-1))/VOLUME,20)/MEAN(ABS((CLOSE/DELAY(CLOSE,1)-1))/VOLUME,20)
def GTJAalpha076(self):
alpha = stddev(abs((self.close/delay(self.close,1)-1))/self.volume,20)/sma(abs((self.close/delay(self.close,1)-1))/self.volume, 20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha77 MIN(RANK(DECAYLINEAR(((((HIGH + LOW) / 2) + HIGH) - (VWAP + HIGH)), 20)), RANK(DECAYLINEAR(CORR(((HIGH + LOW) / 2), MEAN(VOLUME,40), 3), 6)))
def GTJAalpha077(self):
alpha = np.minimum(rolling_rank(decay_linear(((((self.high + self.low) / 2) + self.high) - (self.vwap + self.high)), 20)), rolling_rank(decay_linear(correlation(((self.high + self.low) / 2), sma(self.volume,40), 3), 6)))
return alpha[self.start_date_index:self.end_date_index]
#Alpha78 ((HIGH+LOW+CLOSE)/3-MA((HIGH+LOW+CLOSE)/3,12))/(0.015*MEAN(ABS(CLOSE-MEAN((HIGH+LOW+CLOSE)/3,12)),12))
def GTJAalpha078(self):
alpha = ((self.high+self.low+self.close)/3-sma((self.high+self.low+self.close)/3,12)) / (0.015*sma(abs(self.close-sma((self.high+self.low+self.close)/3,12)),12))
return alpha[self.start_date_index:self.end_date_index]
#Alpha79 SMA(MAX(CLOSE-DELAY(CLOSE,1),0),12,1)/SMA(ABS(CLOSE-DELAY(CLOSE,1)),12,1)*100
def GTJAalpha079(self):
alpha = ema(np.maximum(self.close-delay(self.close,1),0),12,1) / ema(abs(self.close-delay(self.close,1)),12,1)*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha80 (VOLUME-DELAY(VOLUME,5))/DELAY(VOLUME,5)*100
def GTJAalpha080(self):
alpha = (self.volume-delay(self.volume,5)) / delay(self.volume,5) * 100
return alpha[self.start_date_index:self.end_date_index]
#Alpha81 SMA(VOLUME,21,2)
def GTJAalpha081(self):
alpha = ema(self.volume, 21, 2)
return alpha[self.start_date_index:self.end_date_index]
#Alpha82 SMA((TSMAX(HIGH,6)-CLOSE)/(TSMAX(HIGH,6)-TSMIN(LOW,6))*100,20,1)
def GTJAalpha082(self):
alpha = ema((ts_max(self.high,6)-self.close)/(ts_max(self.high,6)-ts_min(self.low,6))*100, 20, 1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha83 (-1 * RANK(COVIANCE(RANK(HIGH), RANK(VOLUME), 5)))
def GTJAalpha083(self):
alpha = -1 * rolling_rank(covariance(rolling_rank(self.high), rolling_rank(self.volume), 5))
return alpha[self.start_date_index:self.end_date_index]
#Alpha84 SUM((CLOSE>DELAY(CLOSE,1)?VOLUME:(CLOSE<DELAY(CLOSE,1)?-VOLUME:0)),20)
def GTJAalpha084(self):
condition1 = (self.close < delay(self.close,1))
inner1_true = -1 * self.volume
inner1_false = pd.Series(np.zeros(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close > delay(self.close,1))
inner2_true = self.volume
inner2_false = inner1
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ts_sum(inner2, 20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha85 (TSRANK((VOLUME / MEAN(VOLUME,20)), 20) * TSRANK((-1 * DELTA(CLOSE, 7)), 8))
def GTJAalpha085(self):
alpha = ts_rank((self.volume / sma(self.volume,20)), 20) * ts_rank((-1 * delta(self.close, 7)), 8)
return alpha[self.start_date_index:self.end_date_index]
#Alpha86 ((0.25 < (((DELAY(CLOSE, 20) - DELAY(CLOSE, 10)) / 10) - ((DELAY(CLOSE, 10) - CLOSE) / 10))) ? (-1 * 1) : (((((DELAY(CLOSE, 20) - DELAY(CLOSE, 10)) / 10) - ((DELAY(CLOSE, 10) - CLOSE) / 10)) < 0) ? 1 : ((-1 * 1) * (CLOSE - DELAY(CLOSE, 1)))))
def GTJAalpha086(self):
condition1 = ((((delay(self.close, 20) - delay(self.close, 10)) / 10) - ((delay(self.close, 10) - self.close) / 10)) < 0)
inner1_true = pd.Series(np.ones(len(condition1)))
inner1_false = -1 * (self.close - delay(self.close, 1))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (0.25 < (((delay(self.close, 20) - delay(self.close, 10)) / 10) - ((delay(self.close, 10) - self.close) / 10)))
inner2_true = -1 * pd.Series(np.ones(len(condition2)))
inner2_false = inner1
alpha = pd.Series(np.where(condition2, inner2_true, inner2_false))
return alpha[self.start_date_index:self.end_date_index]
#Alpha87 ((RANK(DECAYLINEAR(DELTA(VWAP, 4), 7)) + TSRANK(DECAYLINEAR(((((LOW * 0.9) + (LOW * 0.1)) - VWAP) / (OPEN - ((HIGH + LOW) / 2))), 11), 7)) * -1)
def GTJAalpha087(self):
alpha = (ts_rank(decay_linear(delta(self.vwap, 4), 7)) + ts_rank(decay_linear(((((self.low * 0.9) + (self.low * 0.1)) - self.vwap) / (self.open - ((self.high + self.low) / 2))), 11), 7)) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha88 (CLOSE-DELAY(CLOSE,20))/DELAY(CLOSE,20)*100
def GTJAalpha088(self):
alpha = (self.close-delay(self.close,20))/delay(self.close,20) * 100
return alpha[self.start_date_index:self.end_date_index]
#Alpha89 2*(SMA(CLOSE,13,2)-SMA(CLOSE,27,2)-SMA(SMA(CLOSE,13,2)-SMA(CLOSE,27,2),10,2))
def GTJAalpha089(self):
alpha = 2*(ema(self.close,13,2)-ema(self.close,27,2)-ema(ema(self.close,13,2)-ema(self.close,27,2),10,2))
return alpha[self.start_date_index:self.end_date_index]
#Alpha90 (RANK(CORR(RANK(VWAP), RANK(VOLUME), 5)) * -1)
def GTJAalpha090(self):
alpha = rolling_rank(correlation(rolling_rank(self.vwap), rolling_rank(self.volume), 5)) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha91 ((RANK((CLOSE - MAX(CLOSE, 5)))*RANK(CORR((MEAN(VOLUME,40)), LOW, 5))) * -1)
def GTJAalpha091(self):
alpha = (rolling_rank((self.close - self.max(self.close, 5)))*rolling_rank(rolling_rank((sma(self.volume,40)), self.low, 5))) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha92 (MAX(RANK(DECAYLINEAR(DELTA(((CLOSE * 0.35) + (VWAP *0.65)), 2), 3)), TSRANK(DECAYLINEAR(ABS(CORR((MEAN(VOLUME,180)), CLOSE, 13)), 5), 15)) * -1)
def GTJAalpha092(self):
alpha = np.maximum(rolling_rank(decay_linear(delta(((self.close * 0.35) + (self.vwap *0.65)), 2), 3)), ts_rank(decay_linear(abs(correlation((sma(self.volume,180)), self.close, 13)), 5), 15)) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha93 SUM((OPEN>=DELAY(OPEN,1)?0:MAX((OPEN-LOW),(OPEN-DELAY(OPEN,1)))),20)
def GTJAalpha093(self):
condition1 = (self.open >= delay(self.open,1))
inner1_true = pd.Series(np.zeros(len(condition1)))
inner1_false = np.maximum((self.open-self.low),(self.open-delay(self.open,1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
alpha = ts_sum(inner1, 20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha94 SUM((CLOSE>DELAY(CLOSE,1)?VOLUME:(CLOSE<DELAY(CLOSE,1)?-VOLUME:0)),30)
def GTJAalpha094(self):
condition1 = (self.close < delay(self.close,1))
inner1_true = -1 * self.volume
inner1_false = pd.Series(np.zeros(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close > delay(self.close,1))
inner2_true = self.volume
inner2_false = inner1
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ts_sum(inner2, 30)
return alpha[self.start_date_index:self.end_date_index]
#Alpha95 STD(AMOUNT,20)
def GTJAalpha095(self):
alpha = stddev(self.amount, 20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha96 SMA(SMA((CLOSE-TSMIN(LOW,9))/(TSMAX(HIGH,9)-TSMIN(LOW,9))*100,3,1),3,1)
def GJTAalpha096(self):
alpha = ema(ema((self.close-ts_min(self.low,9))/(ts_max(self.high,9)-ts_min(self.low,9))*100,3,1),3,1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha97 STD(VOLUME,10)
def GTJAalpha097(self):
alpha = stddev(self.volume, 10)
return alpha[self.start_date_index:self.end_date_index]
#Alpha98 ((((DELTA((SUM(CLOSE, 100) / 100), 100) / DELAY(CLOSE, 100)) < 0.05) || ((DELTA((SUM(CLOSE, 100) / 100), 100) / DELAY(CLOSE, 100)) == 0.05)) ? (-1 * (CLOSE - TSMIN(CLOSE, 100))) : (-1 * DELTA(CLOSE, 3)))
def GTJAalpha098(self):
condition1 = ((delta((ts_sum(self.close, 100) / 100), 100) / delay(self.close, 100)) < 0.05) | ((delta((ts_sum(self.close, 100) / 100), 100) / delay(self.close, 100)) == 0.05)
inner1_true = -1 * (self.close - ts_min(self.close, 100))
inner1_false = -1 * delta(self.close, 3)
alpha = pd.Series(np.where(condition1, inner1_true, inner1_false))
return alpha[self.start_date_index:self.end_date_index]
#Alpha99 (-1 * RANK(COVIANCE(RANK(CLOSE), RANK(VOLUME), 5)))
def GTJAalpha099(self):
alpha = -1 * rolling_rank(covariance(rolling_rank(self.close), rolling_rank(self.volume), 5))
return alpha[self.start_date_index:self.end_date_index]
#Alpha100 STD(VOLUME,20)
def GTJAalpha100(self):
alpha = stddev(self.volume, 20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha101 ((RANK(CORR(CLOSE, SUM(MEAN(VOLUME,30), 37), 15)) < RANK(CORR(RANK(((HIGH * 0.1) + (VWAP * 0.9))), RANK(VOLUME), 11))) * -1)
def GTJAalpha101(self):
alpha = (ts_rank(correlation(self.close, ts_sum(sma(self.volume,30), 37), 15)) < ts_rank(correlation(ts_rank(((self.high * 0.1) + (self.vwap * 0.9))), ts_rank(self.volume), 11))) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha102 SMA(MAX(VOLUME-DELAY(VOLUME,1),0),6,1)/SMA(ABS(VOLUME-DELAY(VOLUME,1)),6,1)*100
def GTJAalpha102(self):
alpha = ema(np.maximum(self.volume-delay(self.volume,1),0),6,1)/ema(abs(self.volume-delay(self.volume,1)),6,1)*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha103 ((20-LOWDAY(LOW,20))/20)*100
def GTJAalpha103(self):
alpha = ((20-lowday(self.low,20))/20)*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha104 (-1 * (DELTA(CORR(HIGH, VOLUME, 5), 5) * RANK(STD(CLOSE, 20))))
def GTJAalpha104(self):
alpha = -1 * (delta(correlation(self.high, self.volume, 5), 5) * rolling_rank(stddev(self.close, 20)))
return alpha[self.start_date_index:self.end_date_index]
#Alpha105 (-1 * CORR(RANK(OPEN), RANK(VOLUME), 10))
def GTJAalpha105(self):
alpha = -1 * correlation(rolling_rank(self.open), rolling_rank(self.volume), 10)
return alpha[self.start_date_index:self.end_date_index]
#Alpha106 CLOSE-DELAY(CLOSE,20)
def GTJAalpha106(self):
alpha = self.close - delay(self.close, 20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha107 (((-1 * RANK((OPEN - DELAY(HIGH, 1)))) * RANK((OPEN - DELAY(CLOSE, 1)))) * RANK((OPEN - DELAY(LOW, 1))))
def GTJAalpha107(self):
alpha = ((-1 * rolling_rank((self.open - delay(self.high, 1)))) * rolling_rank((self.open - delay(self.close, 1)))) * rolling_rank((self.open - delay(self.low, 1)))
return alpha[self.start_date_index:self.end_date_index]
#Alpha108 ((RANK((HIGH - MIN(HIGH, 2)))^RANK(CORR((VWAP), (MEAN(VOLUME,120)), 6))) * -1)
def GTJAalpha108(self):
alpha = (rolling_rank((self.high - np.minimum(self.high, 2)))**rolling_rank(correlation((self.vwap), (sma(self.volume,120)), 6))) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha109 SMA(HIGH-LOW,10,2)/SMA(SMA(HIGH-LOW,10,2),10,2)
def GTJAalpha109(self):
alpha = ema(self.high-self.low,10,2) / ema(ema(self.high-self.low,10,2), 10, 2)
return alpha[self.start_date_index:self.end_date_index]
#Alpha110 SUM(MAX(0,HIGH-DELAY(CLOSE,1)),20)/SUM(MAX(0,DELAY(CLOSE,1)-LOW),20)*100
def GTJAalpha110(self):
alpha = ts_sum(np.maximum(0,self.high-delay(self.close,1)),20) / ts_sum(np.maximum(0,delay(self.close,1)-self.low),20) * 100
return alpha[self.start_date_index:self.end_date_index]
#Alpha111 SMA(VOL*((CLOSE-LOW)-(HIGH-CLOSE))/(HIGH-LOW),11,2)-SMA(VOL*((CLOSE-LOW)-(HIGH-CLOSE))/(HIGH-LOW),4,2)
def GTJAalpha111(self):
alpha= ema(self.volume*((self.close-self.low)-(self.high-self.close))/(self.high-self.low),11,2) - ema(self.volume*((self.close-self.low)-(self.high-self.close))/(self.high-self.low),4,2)
return alpha[self.start_date_index:self.end_date_index]
#Alpha112 (SUM((CLOSE-DELAY(CLOSE,1)>0?CLOSE-DELAY(CLOSE,1):0),12) - SUM((CLOSE-DELAY(CLOSE,1)<0?ABS(CLOSE-DELAY(CLOSE,1)):0),12)) / (SUM((CLOSE-DELAY(CLOSE,1)>0?CLOSE-DELAY(CLOSE,1):0),12) + SUM((CLOSE-DELAY(CLOSE,1)<0?ABS(CLOSE-DELAY(CLOSE,1)):0),12))*100
def GTJAalpha112(self):
condition1 = (self.close-delay(self.close,1) > 0)
condition2 = (self.close-delay(self.close,1) < 0)
inner1_true = self.close-delay(self.close,1)
inner2_true = abs(self.close-delay(self.close,1))
inner_false = pd.Series(np.zeros(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner_false))
inner2 = pd.Series(np.where(condition2, inner2_true, inner_false))
alpha = (ts_sum(inner1,12)-ts_sum(inner2,12)) / (ts_sum(inner1,12)+ts_sum(inner2,12)) * 100
return alpha[self.start_date_index:self.end_date_index]
#Alpha113 (-1 * ((RANK((SUM(DELAY(CLOSE, 5), 20) / 20)) * CORR(CLOSE, VOLUME, 2)) * RANK(CORR(SUM(CLOSE, 5), SUM(CLOSE, 20), 2))))
def GTJAalpha113(self):
alpha = -1 * ((rolling_rank((ts_sum(delay(self.close, 5), 20) / 20)) * correlation(self.close, self.volume, 2)) * rolling_rank(correlation(ts_sum(self.close, 5), ts_sum(self.close, 20), 2)))
return alpha[self.start_date_index:self.end_date_index]
#Alpha114 ((RANK(DELAY(((HIGH - LOW) / (SUM(CLOSE, 5) / 5)), 2)) * RANK(RANK(VOLUME))) / (((HIGH - LOW) / (SUM(CLOSE, 5) / 5)) / (VWAP - CLOSE)))
def GTJAalpha114(self):
alpha = (rolling_rank(delay(((self.high - self.low) / (ts_sum(self.close, 5) / 5)), 2)) * rolling_rank(rolling_rank(self.volume))) / (((self.high - self.low) / (ts_sum(self.close, 5) / 5)) / (self.vwap - self.close))
return alpha[self.start_date_index:self.end_date_index]
#Alpha115 (RANK(CORR(((HIGH * 0.9) + (CLOSE * 0.1)), MEAN(VOLUME,30), 10))^RANK(CORR(TSRANK(((HIGH + LOW) / 2), 4), TSRANK(VOLUME, 10), 7)))
def GTJAalpha115(self):
alpha = rolling_rank(correlation(((self.high * 0.9) + (self.close * 0.1)), sma(self.volume,30), 10)) ** rolling_rank(correlation(ts_rank(((self.high + self.low) / 2), 4), ts_rank(self.volume, 10), 7))
return alpha[self.start_date_index:self.end_date_index]
#Alpha116 REGBETA(CLOSE,SEQUENCE,20)
def GTJAalpha116(self):
y = self.close
alpha = pd.Series(np.nan, index=self.close.index)
for i in range(20-1,len(y)):
alpha.iloc[i]=sp.stats.linregress(pd.Series(np.arange(1,21)), y[i-20+1:i+1])[0]
return alpha[self.start_date_index:self.end_date_index]
#Alpha117 ((TSRANK(VOLUME, 32) * (1 - TSRANK(((CLOSE + HIGH) - LOW), 16))) * (1 - TSRANK(RET, 32)))
def GTJAalpha117(self):
alpha = (ts_rank(self.volume, 32) * (1 - ts_rank(((self.close + self.high) - self.low), 16))) * (1 - ts_rank(self.returns, 32))
return alpha[self.start_date_index:self.end_date_index]
#Alpha118 SUM(HIGH-OPEN,20)/SUM(OPEN-LOW,20)*100
def GTJAalpha118(self):
alpha = ts_sum(self.high-self.open,20) / ts_sum(self.open-self.low,20) * 100
return alpha[self.start_date_index:self.end_date_index]
#Alpha119 (RANK(DECAYLINEAR(CORR(VWAP, SUM(MEAN(VOLUME,5), 26), 5), 7)) - RANK(DECAYLINEAR(TSRANK(MIN(CORR(RANK(OPEN), RANK(MEAN(VOLUME,15)), 21), 9), 7), 8)))
def GTJAalpha119(self):
alpha = rolling_rank(decay_linear(correlation(self.vwap, ts_sum(sma(self.volume,5), 26), 5), 7)) - rolling_rank(decay_linear(ts_rank(np.minimum(correlation(rolling_rank(self.open), rolling_rank(sma(self.volume,15)), 21), 9), 7), 8))
return alpha[self.start_date_index:self.end_date_index]
#Alpha120 (RANK((VWAP - CLOSE)) / RANK((VWAP + CLOSE)))
def GTJAalpha120(self):
alpha = rolling_rank((self.vwap - self.close)) / rolling_rank((self.vwap + self.close))
return alpha[self.start_date_index:self.end_date_index]
#Alpha121 ((RANK((VWAP - MIN(VWAP, 12)))^TSRANK(CORR(TSRANK(VWAP, 20), TSRANK(MEAN(VOLUME,60), 2), 18), 3)) * -1)
def GTJAalpha121(self):
alpha = (rolling_rank((self.vwap - np.minimum(self.vwap, 12))) ** ts_rank(correlation(ts_rank(self.vwap, 20), ts_rank(sma(self.volume,60), 2), 18), 3)) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha122 (SMA(SMA(SMA(LOG(CLOSE),13,2),13,2),13,2)-DELAY(SMA(SMA(SMA(LOG(CLOSE),13,2),13,2),13,2),1))/DELAY(SMA(SMA(SMA(LOG(CLOSE),13,2),13,2),13,2),1)
def GTJAalpha122(self):
alpha = (ema(ema(ema(np.log(self.close),13,2),13,2),13,2)-delay(ema(ema(ema(np.log(self.close),13,2),13,2),13,2),1)) / delay(ema(ema(ema(np.log(self.close),13,2),13,2),13,2),1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha123 (RANK(CORR(SUM(((HIGH + LOW) / 2), 20), SUM(MEAN(VOLUME,60), 20), 9)) < RANK(CORR(LOW, VOLUME, 6))) * -1
def GTJAalpha123(self):
alpha = ( rolling_rank(correlation(ts_sum(((self.high + self.low) / 2), 20), ts_sum(sma(self.volume, 60), 20), 9)) < rolling_rank(correlation(self.low, self.volume, 6)) ) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha124 (CLOSE - VWAP) / DECAYLINEAR(RANK(TSMAX(CLOSE, 30)),2)
def GTJAalpha124(self):
alpha = (self.close - self.vwap) / decay_linear(rolling_rank(ts_max(self.close, 30)),2)
return alpha[self.start_date_index:self.end_date_index]
#Alpha125 (RANK(DECAYLINEAR(CORR((VWAP), MEAN(VOLUME,80),17), 20)) / RANK(DECAYLINEAR(DELTA(((CLOSE * 0.5) + (VWAP * 0.5)), 3), 16)))
def GTJAalpha125(self):
alpha = rolling_rank(decay_linear(correlation((self.vwap), sma(self.volume,80),17), 20)) / rolling_rank(decay_linear(delta(((self.close * 0.5) + (self.vwap * 0.5)), 3), 16))
return alpha[self.start_date_index:self.end_date_index]
#Alpha126 (CLOSE+HIGH+LOW)/3
def GTJAalpha126(self):
alpha = (self.close+self.high+self.low)/3
return alpha[self.start_date_index:self.end_date_index]
#Alpha127 (MEAN((100*(CLOSE-MAX(CLOSE,12))/(MAX(CLOSE,12)))^2))^(1/2)
def GTJAalpha127(self):
alpha = (sma((100*(self.close-np.maximum(self.close,12))/(np.maximum(self.close,12)))**2)) ** 0.5
return alpha[self.start_date_index:self.end_date_index]
#Alpha128 100-(100/(1+SUM(((HIGH+LOW+CLOSE)/3>DELAY((HIGH+LOW+CLOSE)/3,1)?(HIGH+LOW+CLOSE)/3*VOLUME:0),14)/SUM(((HIGH+LOW+CLOSE)/3<DELAY((HIGH+LOW+CLOSE)/3,1)?(HIGH+LOW+CLOSE)/3*VOLUME:0),14)))
def GTJAalpha128(self):
condition1 = ((self.high+self.low+self.close)/3 > delay((self.high+self.low+self.close)/3,1))
condition2 = ((self.high+self.low+self.close)/3 < delay((self.high+self.low+self.close)/3,1))
inner_true = (self.high+self.low+self.close)/3 * self.volume
inner_false = pd.Series(np.zeros(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner_true, inner_false))
inner2 = pd.Series(np.where(condition2, inner_true, inner_false))
alpha = 100-(100/(1+ts_sum(inner1,14)/ts_sum(inner2,14)))
return alpha[self.start_date_index:self.end_date_index]
#Alpha129 SUM((CLOSE-DELAY(CLOSE,1)<0?ABS(CLOSE-DELAY(CLOSE,1)):0),12)
def GTJAalpha129(self):
condition1 = (self.close-delay(self.close,1) < 0)
inner1_true = abs(self.close-delay(self.close,1))
inner1_false = pd.Series(np.zeros(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
alpha = ts_sum(inner1,12)
return alpha[self.start_date_index:self.end_date_index]
#Alpha130 (RANK(DECAYLINEAR(CORR(((HIGH + LOW) / 2), MEAN(VOLUME,40), 9), 10)) / RANK(DECAYLINEAR(CORR(RANK(VWAP), RANK(VOLUME), 7),3)))
def GTJAalpha130(self):
alpha = rolling_rank(decay_linear(correlation(((self.high + self.low) / 2), sma(self.volume,40), 9), 10)) / rolling_rank(decay_linear(correlation(rolling_rank(self.vwap), rolling_rank(self.volume), 7),3))
return alpha[self.start_date_index:self.end_date_index]
#Alpha131 (RANK(DELAT(VWAP, 1))^TSRANK(CORR(CLOSE,MEAN(VOLUME,50), 18), 18))
def GTJAalpha131(self): #"DELAT" for delay or delta?
alpha = rolling_rank(delay(self.vwap, 1)) ** ts_rank(correlation(self.close,sma(self.volume,50), 18), 18)
return alpha[self.start_date_index:self.end_date_index]
#Alpha132 MEAN(AMOUNT,20)
def GTJAalpha132(self):
alpha = sma(self.amount, 20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha133 ((20-HIGHDAY(HIGH,20))/20)*100-((20-LOWDAY(LOW,20))/20)*100
def GTJAalpha133(self):
alpha = ((20-highday(self.high,20))/20)*100 - ((20-lowday(self.low,20))/20)*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha134 (CLOSE-DELAY(CLOSE,12))/DELAY(CLOSE,12)*VOLUME
def GTJAalpha134(self):
alpha = (self.close-delay(self.close,12)) / delay(self.close,12) * self.volume
return alpha[self.start_date_index:self.end_date_index]
#Alpha135 SMA(DELAY(CLOSE/DELAY(CLOSE,20),1),20,1)
def GTJAalpha135(self):
alpha = ema(delay(self.close/delay(self.close,20),1),20,1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha136 ((-1 * RANK(DELTA(RET, 3))) * CORR(OPEN, VOLUME, 10))
def GTJAalpha136(self):
alpha = (-1 * rolling_rank(delta(self.returns, 3))) * correlation(self.open, self.volume, 10)
return alpha[self.start_date_index:self.end_date_index]
#Alpha137 16*(CLOSE-DELAY(CLOSE,1)+(CLOSE-OPEN)/2+DELAY(CLOSE,1)-DELAY(OPEN,1)) / ((ABS(HIGH-DELAY(CLOSE,1))>ABS(LOW-DELAY(CLOSE,1)) & ABS(HIGH-DELAY(CLOSE,1))>ABS(HIGH-DELAY(LOW,1))?ABS(HIGH-DELAY(CLOSE,1))+ABS(LOW-DELAY(CLOSE,1))/2+ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4:(ABS(LOW-DELAY(CLOSE,1))>ABS(HIGH-DELAY(LOW,1)) & ABS(LOW-DELAY(CLOSE,1))>ABS(HIGH-DELAY(CLOSE,1))?ABS(LOW-DELAY(CLOSE,1))+ABS(HIGH-DELAY(CLOSE,1))/2+ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4:ABS(HIGH-DELAY(LOW,1))+ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4))) * MAX(ABS(HIGH-DELAY(CLOSE,1)),ABS(LOW-DELAY(CLOSE,1)))
def GTJAalpha137(self):
condition1 = abs(self.low-delay(self.close,1))>abs(self.high-delay(self.low,1)) & abs(self.low-delay(self.close,1))>abs(self.high-delay(self.close,1))
inner1_true = abs(self.low-delay(self.close,1)) + abs(self.high-delay(self.close,1))/2 + abs(delay(self.close,1)-delay(self.open,1))/4
inner1_false = abs(self.high-delay(self.low,1)) + abs(delay(self.close,1)-delay(self.open,1))/4
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = abs(self.high-delay(self.close,1))>abs(self.low-delay(self.close,1)) & abs(self.high-delay(self.close,1))>abs(self.high-delay(self.low,1))
inner2_true = abs(self.high-delay(self.close,1)) + abs(self.low-delay(self.close,1))/2 + abs(delay(self.close,1)-delay(self.open,1))/4
inner2_false = inner1
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = 16*(self.close-delay(self.close,1)+(self.close-self.open)/2+delay(self.close,1)-delay(self.open,1)) / inner2 * np.maximum(abs(self.high-delay(self.close,1)),abs(self.low-delay(self.close,1)))
return alpha[self.start_date_index:self.end_date_index]
#Alpha138 ((RANK(DECAYLINEAR(DELTA((((LOW * 0.7) + (VWAP *0.3))), 3), 20)) - TSRANK(DECAYLINEAR(TSRANK(CORR(TSRANK(LOW, 8), TSRANK(MEAN(VOLUME,60), 17), 5), 19), 16), 7)) * -1)
def GTJAalpha138(self):
alpha = (rolling_rank(decay_linear(delta((((self.low * 0.7) + (self.vwap *0.3))), 3), 20)) - ts_rank(decay_linear(ts_rank(correlation(ts_rank(self.low, 8), ts_rank(sma(self.volume,60), 17), 5), 19), 16), 7)) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha139 (-1 * CORR(OPEN, VOLUME, 10))
def GTJAalpha139(self):
alpha = -1 * correlation(self.open,self.volume,10)
return alpha[self.start_date_index:self.end_date_index]
#Alpha140 MIN(RANK(DECAYLINEAR(((RANK(OPEN) + RANK(LOW)) - (RANK(HIGH) + RANK(CLOSE))), 8)), TSRANK(DECAYLINEAR(CORR(TSRANK(CLOSE, 8), TSRANK(MEAN(VOLUME,60), 20), 8), 7), 3))
def GTJAalpha140(self):
alpha = np.minimum(rolling_rank(decay_linear(((rolling_rank(self.open) + rolling_rank(self.low)) - (rolling_rank(self.high) + rolling_rank(self.close))), 8)), ts_rank(decay_linear(correlation(ts_rank(self.close,8), ts_rank(sma(self.volume,60), 20), 8), 7), 3))
return alpha[self.start_date_index:self.end_date_index]
#Alpha141 (RANK(CORR(RANK(HIGH), RANK(MEAN(VOLUME,15)), 9))* -1)
def GTJAalpha141(self):
alpha = rolling_rank(correlation(rolling_rank(self.high), rolling_rank(sma(self.volume,15)), 9))* -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha142 (((-1 * RANK(TSRANK(CLOSE, 10))) * RANK(DELTA(DELTA(CLOSE, 1), 1))) * RANK(TSRANK((VOLUME /MEAN(VOLUME,20)), 5)))
def GTJAalpha142(self):
alpha = ((-1 * rolling_rank(ts_rank(self.close, 10))) * ts_rank(delta(delta(self.close, 1), 1))) * rolling_rank(ts_rank((self.volume/sma(self.volume,20)), 5))
return alpha[self.start_date_index:self.end_date_index]
#Alpha143 CLOSE>DELAY(CLOSE,1)?(CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1)*SELF:SELF
def GTJAalpha143(self):
alpha = pd.Series(1, index=self.close.index)
for i in range(1,len(alpha)):
if self.close.iloc[i]>self.close.iloc[i-1]:
alpha.iloc[i] = ((self.close.iloc[i]-self.close.iloc[i-1])/self.close.iloc[i-1]) * alpha.iloc[i-1]
else:
alpha.iloc[i] = alpha.iloc[i-1]
return alpha[self.start_date_index:self.end_date_index]
#Alpha144 SUMIF(ABS(CLOSE/DELAY(CLOSE,1)-1)/AMOUNT,20,CLOSE<DELAY(CLOSE,1)) / COUNT(CLOSE<DELAY(CLOSE,1),20)
def GTJAalpha144(self):
condition = self.close < delay(self.close,1)
inner_sumif = abs(self.close/delay(self.close,1)-1) / self.amount
value_sumif = inner_sumif.apply(lambda x: (inner_sumif[x-20:x]*condition[x-20:x]).sum())
count = pd.Series(np.nan, index=condition.index)
for i in range(20-1,len(condition)):
count.iloc[i]=condition[i-20+1,i+1].sum()
alpha = value_sumif / count
return alpha[self.start_date_index:self.end_date_index]
#Alpha145 (MEAN(VOLUME,9)-MEAN(VOLUME,26))/MEAN(VOLUME,12)*100
def GTJAalpha145(self):
alpha = (sma(self.volume,9)-sma(self.volume,26))/sma(self.volume,12)*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha146 MEAN((CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1)-SMA((CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1),61,2),20)*((CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1)-SMA((CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1),61,2))/SMA(((CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1)-((CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1)-SMA((CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1),61,2)))^2,60)
def GTJAalpha146(self):
alpha = sma((self.close-delay(self.close,1))/delay(self.close,1)-ema((self.close-delay(self.close,1))/delay(self.close,1),61,2),20) * ((self.close-delay(self.close,1))/delay(self.close,1)-ema((self.close-delay(self.close,1))/delay(self.close,1),61,2)) / ema(((self.close-delay(self.close,1))/delay(self.close,1)-((self.close-delay(self.close,1))/delay(self.close,1)-ema((self.close-delay(self.close,1))/delay(self.close,1),61,2)))**2,60)
return alpha[self.start_date_index:self.end_date_index]
#Alpha147 REGBETA(MEAN(CLOSE,12),SEQUENCE(12))
def GTJAalpha147(self):
y = sma(self.close, 12)
alpha = pd.Series(np.nan, index=self.close.index)
for i in range(12-1,len(y)):
alpha.iloc[i]=sp.stats.linregress(pd.Series(np.arange(1,13)), y[i-12+1:i+1])[0]
return alpha[self.start_date_index:self.end_date_index]
#Alpha148 ((RANK(CORR((OPEN), SUM(MEAN(VOLUME,60), 9), 6)) < RANK((OPEN - TSMIN(OPEN, 14)))) * -1)
def GTJAalpha148(self):
alpha = (rolling_rank(correlation(self.open, ts_sum(sma(self.volume,60), 9), 6)) < rolling_rank((self.open - ts_min(self.open, 14)))) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha149 REGBETA(FILTER(CLOSE/DELAY(CLOSE,1)-1,BANCHMARKINDEXCLOSE<DELAY(BANCHMARKINDEXCLOSE,1)),FILTER(BANCHMARKINDEXCLOSE/DELAY(BANCHMARKINDEXCLOSE,1)-1,BANCHMARKINDEXCLOSE<DELAY(BANCHMARKINDEXCLOSE,1)),252)
def GTJAalpha149(self):
y = self.close/delay(self.close,1)-1
y = y[self.benchmarkindexclose < delay(self.benchmarkindexclose,1)]
x = self.benchmarkindexclose/delay(self.benchmarkindexclose,1) -1
x = x[self.benchmarkindexclose < delay(self.benchmarkindexclose,1)]
alpha = | pd.Series(np.nan, index=y.index) | pandas.Series |
#:!/usr/bin/env python
#: -*- coding: utf-8 -*-
__author__ = 'mayanqiong'
from collections import namedtuple
from datetime import datetime
from typing import Callable, Tuple
import aiohttp
from pandas import DataFrame, Series
from sgqlc.operation import Operation
from tqsdk.backtest import TqBacktest
from tqsdk.datetime import _get_expire_rest_days, _str_to_timestamp_nano
from tqsdk.ins_schema import ins_schema, _add_all_frags
from tqsdk.objs import Quote
from tqsdk.diff import _get_obj
from tqsdk.utils import _query_for_quote, _generate_uuid
from tqsdk.tafunc import _get_t_series, get_impv, _get_d1, get_delta, get_theta, get_gamma, get_vega, get_rho
"""
这两个类只在 api 中用到,主要为了支持用户异步中 await
没有继承 Entity 类
"""
async def ensure_quote(api, quote):
if quote.price_tick > 0 and quote.datetime != "":
return quote
async with api.register_update_notify(quote) as update_chan:
async for _ in update_chan:
if quote.price_tick > 0 and quote.datetime != "":
return quote
async def ensure_quote_with_underlying(api, quote):
await ensure_quote(api, quote)
if quote.underlying_symbol:
await ensure_quote(api, quote.underlying_quote)
return quote
class QuoteList(list):
"""
请求合约信息和行情信息,self._task 完成时,所有的合约已经收到了合约信息和行情信息
"""
def __init__(self, api, quotes):
self._api = api
list.__init__(self, quotes)
self._task = api.create_task(self._ensure_quotes(), _caller_api=True)
for quote in quotes:
# 为每个 quote 对象创建 _task
if not hasattr(quote, '_task'):
quote._task = api.create_task(ensure_quote_with_underlying(api, quote), _caller_api=True)
async def _ensure_symbols(self):
if all([q.price_tick > 0 for q in self]):
return
query_symbols = [q._path[-1] for q in self if not q.price_tick > 0]
query_pack = _query_for_quote(query_symbols)
self._api._send_pack(query_pack)
async with self._api.register_update_notify(self) as update_chan:
async for _ in update_chan:
if all([q.price_tick > 0 for q in self]):
return
async def _ensure_quotes(self):
await self._ensure_symbols()
self._api._auth._has_md_grants([q._path[-1] for q in self]) # 权限检查
# 发送的请求会请求到所有字段,如果是期权也会请求标的的合约信息
underlying_symbols = set([q.underlying_symbol for q in self if q.underlying_symbol])
need_quotes = set([q._path[-1] for q in self]).union(underlying_symbols)
if need_quotes - self._api._requests["quotes"] != set():
self._api._requests["quotes"] = self._api._requests["quotes"].union(need_quotes)
self._api._send_pack({
"aid": "subscribe_quote",
"ins_list": ",".join(self._api._requests["quotes"]),
})
if all([q.datetime != "" for q in self]):
return self
all_quotes = self + [_get_obj(self._api._data, ["quotes", s], self._api._prototype["quotes"]["#"]) for s in underlying_symbols]
async with self._api.register_update_notify(self) as update_chan:
async for _ in update_chan:
if all([q.datetime != "" for q in all_quotes]):
return self
def __await__(self):
return self._task.__await__()
async def _query_graphql_async(api, query_id, query):
api._send_pack({
"aid": "ins_query",
"query_id": query_id,
"query": query
})
symbols = _get_obj(api._data, ["symbols"])
async with api.register_update_notify(symbols) as update_chan:
async for _ in update_chan:
s = symbols.get(query_id, {})
if s.get("query") == query:
break
class SymbolList(list):
"""
query 系列函数返回对象
"""
def __init__(self, api, query_id: str, query: str, filter: Callable[[dict], list]):
self._api = api
self._query_id = query_id
self._query = query
self._filter = filter
list.__init__(self, [])
self._task = api.create_task(self._query_graphql(), _caller_api=True)
async def _query_graphql(self):
pack = {"query": self._query}
symbols = _get_obj(self._api._data, ["symbols"])
query_result = None
for symbol in symbols.values():
if symbol.items() >= pack.items(): # 检查是否发送过相同的请求
query_result = symbol
if query_result is None:
await _query_graphql_async(self._api, self._query_id, self._query)
query_result = symbols.get(self._query_id)
self += self._filter(query_result)
if isinstance(self._api._backtest, TqBacktest): # 回测时,清空缓存的请求
self._api._send_pack({
"aid": "ins_query",
"query_id": self._query_id,
"query": ""
})
return self
def __await__(self):
return self._task.__await__()
class SymbolLevelList(namedtuple('SymbolLevel', ['in_money_options', 'at_money_options', 'out_of_money_options'])):
"""
query 系列函数返回对象
"""
def __new__(cls, *args, **kwargs):
return super(SymbolLevelList, cls).__new__(cls, in_money_options=[], at_money_options=[], out_of_money_options=[])
def __init__(self, api, query_id: str, query: str, filter: Callable[[dict], Tuple[list, list, list]]):
self._api = api
self._query_id = query_id
self._query = query
self._filter = filter
self._task = api.create_task(self._query_graphql(), _caller_api=True)
async def _query_graphql(self):
pack = {"query": self._query}
symbols = _get_obj(self._api._data, ["symbols"])
query_result = None
for symbol in symbols.values():
if symbol.items() >= pack.items(): # 检查是否发送过相同的请求
query_result = symbol
if query_result is None:
await _query_graphql_async(self._api, self._query_id, self._query)
query_result = symbols.get(self._query_id)
l0, l1, l2 = self._filter(query_result)
self[0].extend(l0)
self[1].extend(l1)
self[2].extend(l2)
if isinstance(self._api._backtest, TqBacktest): # 回测时,清空缓存的请求
self._api._send_pack({
"aid": "ins_query",
"query_id": self._query_id,
"query": ""
})
return self
def __await__(self):
return self._task.__await__()
class TqDataFrame(DataFrame):
def __init__(self, api, *args, **kwargs):
super(TqDataFrame, self).__init__(*args, **kwargs)
self.__dict__["_api"] = api
self.__dict__["_task"] = api.create_task(self.async_update(), _caller_api=True)
async def async_update(self):
async with self._api.register_update_notify(self) as update_chan:
async for _ in update_chan:
if self._api._serials.get(id(self))["init"]:
return self
def __await__(self):
return self.__dict__["_task"].__await__()
class TqSymbolDataFrame(DataFrame):
def __init__(self, api, symbol_list, backtest_timestamp, *args, **kwargs):
self.__dict__["_api"] = api
self.__dict__["_symbol_list"] = symbol_list
self.__dict__["_backtest_timestamp"] = backtest_timestamp
self.__dict__["_columns"] = [
"ins_class",
"instrument_id",
"instrument_name",
"price_tick",
"volume_multiple",
"max_limit_order_volume",
"max_market_order_volume",
"underlying_symbol",
"strike_price",
"exchange_id",
"product_id",
"expired",
"expire_datetime",
"expire_rest_days",
"delivery_year",
"delivery_month",
"last_exercise_datetime",
"exercise_year",
"exercise_month",
"option_class",
"upper_limit",
"lower_limit",
"pre_settlement",
"pre_open_interest",
"pre_close",
"trading_time_day",
"trading_time_night"
]
default_quote = Quote(None)
data = [{k: (s if k == "instrument_id" else default_quote.get(k, None)) for k in self.__dict__["_columns"]} for s in symbol_list]
super(TqSymbolDataFrame, self).__init__(data=data, columns=self.__dict__["_columns"], *args, **kwargs)
self.__dict__["_task"] = api.create_task(self.async_update(), _caller_api=True)
async def async_update(self):
query_id = _generate_uuid("PYSDK_api")
op = Operation(ins_schema.rootQuery)
variables = {"instrument_id": self.__dict__["_symbol_list"]}
if self.__dict__["_backtest_timestamp"]:
variables["timestamp"] = self.__dict__["_backtest_timestamp"]
query = op.multi_symbol_info(**variables)
_add_all_frags(query)
self.__dict__["_api"]._send_pack({
"aid": "ins_query",
"query_id": query_id,
"query": op.__to_graphql__()
})
symbols = _get_obj(self.__dict__["_api"]._data, ["symbols"])
async with self.__dict__["_api"].register_update_notify(symbols) as update_chan:
async for _ in update_chan:
query_result = symbols.get(query_id, {})
if query_result:
all_keys = set(self.__dict__["_columns"])
all_keys.add('trading_time')
quotes = self.__dict__["_api"]._symbols_to_quotes(query_result, keys=all_keys)
self._quotes_to_dataframe(quotes)
if self.__dict__["_backtest_timestamp"]:
# 回测时这些字段应该为 nan
self.loc[:, ["upper_limit", "lower_limit", "pre_settlement", "pre_open_interest", "pre_close"]] = float('nan')
# 回测时清空请求,不缓存请求内容
self.__dict__["_api"]._send_pack({
"aid": "ins_query",
"query_id": query_id,
"query": ""
})
return self
def _get_trading_time(self, quotes, symbol, key):
v = quotes[symbol].get('trading_time', {'day': [], 'night': []}).get(key, [])
return v if v else None
def _quotes_to_dataframe(self, quotes):
default_quote = Quote(None)
for col in self.__dict__["_columns"]:
if col == "expire_rest_days":
current_dt = self._api._get_current_datetime().timestamp()
self.loc[:, col] = [_get_expire_rest_days(quotes[s]['expire_datetime'], current_dt)
if quotes[s].get('expire_datetime') else float('nan')
for s in self.__dict__["_symbol_list"]]
elif col == "trading_time_day" or col == "trading_time_night":
k = 'day' if col == "trading_time_day" else 'night'
self.loc[:, col] = Series([self._get_trading_time(quotes, s, k) for s in self.__dict__["_symbol_list"]])
else:
self.loc[:, col] = Series([quotes[s].get(col, default_quote[col]) for s in self.__dict__["_symbol_list"]])
def __await__(self):
return self.__dict__["_task"].__await__()
class TqSymbolRankingDataFrame(DataFrame):
def __init__(self, api, symbol, ranking_type, days, start_dt, broker):
self.__dict__["_api"] = api
params = {'symbol': symbol}
if days is not None:
params['days'] = days
if start_dt is not None:
params['start_date'] = start_dt.strftime("%Y%m%d")
if broker is not None:
params['broker'] = broker
self.__dict__["_params"] = params
self.__dict__["_symbol"] = symbol
self.__dict__["_ranking_type"] = f"{ranking_type.lower()}_ranking"
self.__dict__["_columns"] = [
"datetime",
"symbol",
"exchange_id",
"instrument_id",
"broker",
"volume",
"volume_change",
"volume_ranking",
"long_oi",
"long_change",
"long_ranking",
"short_oi",
"short_change",
"short_ranking"
]
super(TqSymbolRankingDataFrame, self).__init__(data=[], columns=self.__dict__["_columns"])
self.__dict__["_task"] = api.create_task(self.async_update(), _caller_api=True)
async def _get_ranking_data(self, ranking_id):
# 下载持仓排名数据,并将数据发回到 api.recv_chan
async with aiohttp.ClientSession(headers=self.__dict__["_api"]._base_headers) as session:
url = "https://symbol-ranking-system-fc-api.shinnytech.com/srs"
async with session.get(url, params=self.__dict__["_params"]) as response:
response.raise_for_status()
content = await response.json()
await self.__dict__["_api"]._ws_md_recv_chan.send({
"aid": "rtn_data",
"data": [{
"_symbol_rankings": {
ranking_id: content
}
}]
})
async def async_update(self):
await self.__dict__["_api"]._ensure_symbol_async(self.__dict__["_symbol"])
ranking_id = _generate_uuid("PYSDK_rank")
self.__dict__["_api"].create_task(self._get_ranking_data(ranking_id), _caller_api=True) # 错误会抛给 api 处理
symbol_rankings = _get_obj(self.__dict__["_api"]._data, ["_symbol_rankings"])
async with self.__dict__["_api"].register_update_notify(symbol_rankings) as update_chan:
async for _ in update_chan:
content = symbol_rankings.get(ranking_id, None)
if content is None:
continue
data = self._content_to_list(content)
for i, d in enumerate(data):
self.loc[i] = d
self.dropna(subset=[self.__dict__["_ranking_type"]], inplace=True)
self.sort_values(by=['datetime', self.__dict__["_ranking_type"]], inplace=True, ignore_index=True)
# 读完数据,清空数据
await self.__dict__["_api"]._ws_md_recv_chan.send({
"aid": "rtn_data",
"data": [{
"_symbol_rankings": {
ranking_id: None
}
}]
})
return self
def _content_to_list(self, content):
data = {}
for dt in content.keys():
for symbol in content[dt].keys():
if content[dt][symbol] is None:
continue
for data_type, rankings in content[dt][symbol].items():
for broker, rank_item in rankings.items():
item = data.setdefault((dt, symbol, broker), self._get_default_item(dt, symbol, broker))
if data_type == 'volume_ranking':
item['volume'] = rank_item['volume']
item['volume_change'] = rank_item['varvolume']
item['volume_ranking'] = rank_item['ranking']
elif data_type == 'long_ranking':
item['long_oi'] = rank_item['volume']
item['long_change'] = rank_item['varvolume']
item['long_ranking'] = rank_item['ranking']
elif data_type == 'short_ranking':
item['short_oi'] = rank_item['volume']
item['short_change'] = rank_item['varvolume']
item['short_ranking'] = rank_item['ranking']
return data.values()
def _get_default_item(self, dt, symbol, broker):
return {
"datetime": dt,
"symbol": symbol,
"exchange_id": symbol.split(".", maxsplit=1)[0],
"instrument_id": symbol.split(".", maxsplit=1)[1],
"broker": broker,
"volume": float('nan'),
"volume_change": float('nan'),
"volume_ranking": float('nan'),
"long_oi": float('nan'),
"long_change": float('nan'),
"long_ranking": float('nan'),
"short_oi": float('nan'),
"short_change": float('nan'),
"short_ranking": float('nan')
}
def __await__(self):
return self.__dict__["_task"].__await__()
class TqOptionGreeksDataFrame(DataFrame):
def __init__(self, api, symbol_list, v_list, r):
self.__dict__["_api"] = api
self.__dict__["_symbol_list"] = symbol_list
self.__dict__["_v_list"] = v_list
self.__dict__["_r"] = r
self.__dict__["_columns"] = [
"instrument_id",
"instrument_name",
"option_class",
"expire_rest_days",
"expire_datetime",
"underlying_symbol",
"strike_price",
"delta",
"gamma",
"theta",
"vega",
"rho"
]
super(TqOptionGreeksDataFrame, self).__init__(data=[], columns=self.__dict__["_columns"])
self.__dict__["_task"] = api.create_task(self.async_update(), _caller_api=True)
async def async_update(self):
symbol_list = self.__dict__["_symbol_list"]
quotes = await self.__dict__["_api"].get_quote_list(symbol_list)
if not all([q.ins_class.endswith("OPTION") for q in quotes]):
raise Exception("quote 参数列表中元素必须是期权类型")
for i, q in enumerate(quotes):
self.loc[i] = {k: q.get(k, float('nan')) for k in self.__dict__["_columns"]}
self._get_greeks(quotes)
return self
def _get_greeks(self, quotes):
series_close = Series(data=[q.last_price for q in quotes]) # 期权最新价
series_close1 = Series(data=[q.underlying_quote.last_price for q in quotes]) # 标的最新价
series_o = Series(data=[q.option_class for q in quotes])
series_datetime = Series(data=[_str_to_timestamp_nano(q.datetime) for q in quotes])
series_expire_datetime = | Series(data=[q.expire_datetime for q in quotes]) | pandas.Series |
from posixpath import join
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
def get_csv(dir_name,case_name):
file_name = []
# find .csv file
base_name = os.path.join(os.path.dirname( __file__ ),"..")
join_name = os.path.join(base_name, dir_name, case_name)
for file in os.listdir(join_name):
if file.endswith(".csv"):
file_name.append( os.path.join(join_name,file) )
return file_name
def get_min_data(file_name,set_min=None):
# numpy
min_len = 1e6
data = []
for i in range(len(file_name)):
temp = pd.read_csv(file_name[i]).to_numpy()
if min_len>temp.shape[0]: min_len = temp.shape[0]
data.append(temp)
if set_min is not None and set_min<min_len: min_len = set_min
new_data = np.zeros((min_len,len(file_name)))
for i in range(len(data)):
new_data[:,i]=data[i][:min_len,-1]
return new_data
def save_sns_plot(sns_plot, fig_title):
sns_plot.set(title=fig_title)
sns_plot.figure.savefig( os.path.join(os.path.dirname( __file__ ),f"{fig_title}.png"))
def liver_p_to_p():
dir_name = "liver_p_to_p"
case_name = "2021-12-18_ppo"
fig_title = "Point-to-Point without Collision"
file_name = get_csv(dir_name,case_name)
new_data = get_min_data(file_name,set_min=500)
sns.set_theme(style='darkgrid')
plt.figure(figsize=(10,5))
new_data = pd.DataFrame(data=new_data,index=np.arange(new_data.shape[0]),columns=[f"data" for i in range(len(file_name))])
sns_plot = sns.lineplot(data=new_data,legend=False)
save_sns_plot(sns_plot,fig_title)
print("done")
def pure_p_to_p():
dir_name = 'pure_p_to_p'
case_names = ['2021-12-18_ddpg','2021-12-18_ppo']
fig_title = "Point-to-Point without Soft Tissue"
file_name = []
for case_name in case_names:
file_name += get_csv(dir_name,case_name)
print(file_name)
del file_name[-1]
data = get_min_data(file_name,set_min=200)
sns.set_theme(style='darkgrid')
plt.figure(figsize=(10,5))
# ax = plt.gca()
# new_data = []
# sigma = 20
# for i in range(len(file_name)):
# d = data[:,i]
# # Smoothing
# smooth_data = gaussian_filter1d(d, sigma=sigma)
# # Error formating
# upper_err = smooth_data + sigma
# lower_err = smooth_data - sigma
# ax.plot(np.arange(d.shape[0]),d,'--')
# ax.plot(np.arange(d.shape[0]),smooth_data)
# ax.fill_between(np.arange(d.shape[0]),upper_err,lower_err)
# #df = pd.DataFrame(data=np.c_[smooth_data,upper_err,lower_err],columns=["smooth","upper_err","lower_err"])
# #new_data.append(df)
# plt.show(block=False)
new_data = pd.DataFrame(data=data,index=np.arange(data.shape[0]),columns=["DDPG","PPO"])
sns_plot = sns.lineplot(data=new_data)
save_sns_plot(sns_plot,fig_title)
print("done")
def train_ddpg():
dir_name = 'ddpg_test_amd'
case_names = ['2021-12-19_10-11-01-ddpg_s0_t5', '2021-12-19_01-15-44-ddpg_s0_should_t5']
fig_title = "Training Curve of DDPG"
file_name = []
for case_name in case_names:
file_name += get_csv(dir_name,case_name)
print(file_name)
new_data = get_min_data(file_name,set_min=250)
sns.set_theme(style='darkgrid')
plt.figure(figsize=(10,5))
new_data = pd.DataFrame(data=new_data,index=np.arange(new_data.shape[0]))
sns_plot = sns.lineplot(data=new_data,legend=False)
save_sns_plot(sns_plot,fig_title)
print("done")
def train_ppo():
dir_name = 'ppo_test_amd'
case_names = ['2021-12-19_15-00-57-ppo_s0_outliner','2021-12-21_11-09-31-ppo_s0_outliner']
fig_title = "Training Curve of PPO"
file_name = []
for case_name in case_names:
file_name += get_csv(dir_name,case_name)
print(file_name)
new_data = get_min_data(file_name,set_min=250)
sns.set_theme(style='darkgrid')
plt.figure(figsize=(10,5))
new_data = pd.DataFrame(data=new_data,index=np.arange(new_data.shape[0]),columns=[f"data" for i in range(len(file_name))])
sns_plot = sns.lineplot(data=new_data,legend=False)
save_sns_plot(sns_plot,fig_title)
print("done")
def train_ppo_ddpg_sep():
fig_title = "PPO_DDPG_SEP"
dir_name = 'ppo_test_amd'
case_names = ['2021-12-19_15-00-57-ppo_s0_outliner','2021-12-21_11-09-31-ppo_s0_outliner']
ppo_file_name = []
for case_name in case_names:
ppo_file_name += get_csv(dir_name,case_name)
dir_name = 'ddpg_test_amd'
case_names = ['2021-12-19_10-11-01-ddpg_s0_t5', '2021-12-19_01-15-44-ddpg_s0_should_t5']
ddpg_file_name = []
for case_name in case_names:
ddpg_file_name += get_csv(dir_name,case_name)
ppo_data = get_min_data(ppo_file_name,set_min=250)
ddpg_data = get_min_data(ddpg_file_name,set_min=250)
data = np.c_[ppo_data,ddpg_data]
new_data = pd.DataFrame(data=data,index=np.arange(data.shape[0]), columns=['PPO1','PPO2','DDPG1','DDPG2'])
#new_data['index'] = new_data.index
sns.set_theme(style='darkgrid')
plt.figure(figsize=(7,5))
sns_plot=sns.lineplot(data=new_data)
sns_plot.set_xlabel("Epochs")
sns_plot.set_ylabel("Reward")
sns_plot.set(title="Training Curve for PPO and DDPG")
sns_plot.figure.savefig( os.path.join(os.path.dirname( __file__ ),f"{fig_title}.png"))
print("done")
def train_ppo_ddpg_group():
fig_title = "PPO_DDPG_GROUP"
dir_name = 'ppo_test_amd'
case_names = ['2021-12-19_15-00-57-ppo_s0_outliner','2021-12-21_11-09-31-ppo_s0_outliner']
ppo_file_name = []
for case_name in case_names:
ppo_file_name += get_csv(dir_name,case_name)
dir_name = 'ddpg_test_amd'
case_names = ['2021-12-19_10-11-01-ddpg_s0_t5', '2021-12-19_01-15-44-ddpg_s0_should_t5']
ddpg_file_name = []
for case_name in case_names:
ddpg_file_name += get_csv(dir_name,case_name)
ppo_data = get_min_data(ppo_file_name,set_min=250)
ddpg_data = get_min_data(ddpg_file_name,set_min=250)
ppo_df = | pd.DataFrame(data=ppo_data) | pandas.DataFrame |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., | pd.Timestamp('2015-01-09') | pandas.Timestamp |
# -*- coding: utf-8 -*-
from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler
from collections import Counter
from pathlib import Path
import subprocess
import importlib
import math
import sys
import glob
import json
import pickle
import re
import warnings
from sklearn.datasets.base import Bunch
from skimage.draw import polygon
import skimage.transform
import shapely.wkt
from shapely.geometry import MultiPolygon, Polygon
import pandas as pd
import numpy as np
import tables as tb
import scipy
import rasterio
import rasterio.features
import tqdm
import cv2
import gdal
import click
import skimage.draw
import shapely.wkt
import shapely.ops
import shapely.geometry
import fiona
import affine
from keras.models import Model
from keras.engine.topology import merge as merge_l
from keras.layers import (
Input, Convolution2D, MaxPooling2D, UpSampling2D,
Reshape, core, Dropout,
Activation, BatchNormalization)
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, EarlyStopping, History
from keras import backend as K
MODEL_NAME = 'v16'
ORIGINAL_SIZE = 650
INPUT_SIZE = 256
STRIDE_SZ = 197
LOGFORMAT = '%(asctime)s %(levelname)s %(message)s'
BASE_DIR = "/data/train" # train data
BASE_TEST_DIR = "/data/test" # test data
WORKING_DIR = "/data/working"
IMAGE_DIR = "/data/working/images/{}".format('v16')
V12_IMAGE_DIR = "/data/working/images/{}".format('v12') # for mask and mul
V5_IMAGE_DIR = "/data/working/images/{}".format('v5')
MODEL_DIR = "/data/working/models/{}".format(MODEL_NAME)
FN_SOLUTION_CSV = "/data/output/{}.csv".format(MODEL_NAME)
# ---------------------------------------------------------
# Parameters
MIN_POLYGON_AREA = 30
# ---------------------------------------------------------
# Input files
FMT_TRAIN_SUMMARY_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("summaryData/{prefix:s}_Train_Building_Solutions.csv"))
FMT_TRAIN_RGB_IMAGE_PATH = str(
Path("{datapath:s}/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TEST_RGB_IMAGE_PATH = str(
Path("{datapath:s}/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TRAIN_MSPEC_IMAGE_PATH = str(
Path("{datapath:s}/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
FMT_TEST_MSPEC_IMAGE_PATH = str(
Path("{datapath:s}/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
# ---------------------------------------------------------
# Preprocessing result
FMT_RGB_BANDCUT_TH_PATH = V12_IMAGE_DIR + "/rgb_bandcut{}.csv"
FMT_MUL_BANDCUT_TH_PATH = V12_IMAGE_DIR + "/mul_bandcut{}.csv"
# ---------------------------------------------------------
# Image list, Image container and mask container
FMT_VALTRAIN_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_valtrain_ImageId.csv"
FMT_VALTEST_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_valtest_ImageId.csv"
FMT_TRAIN_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_train_ImageId.csv"
FMT_TEST_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_test_ImageId.csv"
# Mask
FMT_VALTRAIN_MASK_STORE = V12_IMAGE_DIR + "/valtrain_{}_mask.h5"
FMT_VALTEST_MASK_STORE = V12_IMAGE_DIR + "/valtest_{}_mask.h5"
FMT_TRAIN_MASK_STORE = V12_IMAGE_DIR + "/train_{}_mask.h5"
# MUL
FMT_VALTRAIN_MUL_STORE = V12_IMAGE_DIR + "/valtrain_{}_mul.h5"
FMT_VALTEST_MUL_STORE = V12_IMAGE_DIR + "/valtest_{}_mul.h5"
FMT_TRAIN_MUL_STORE = V12_IMAGE_DIR + "/train_{}_mul.h5"
FMT_TEST_MUL_STORE = V12_IMAGE_DIR + "/test_{}_mul.h5"
FMT_MULMEAN = V12_IMAGE_DIR + "/{}_mulmean.h5"
# OSM
FMT_VALTRAIN_OSM_STORE = IMAGE_DIR + "/valtrain_{}_osm.h5"
FMT_VALTEST_OSM_STORE = IMAGE_DIR + "/valtest_{}_osm.h5"
FMT_TRAIN_OSM_STORE = IMAGE_DIR + "/train_{}_osm.h5"
FMT_TEST_OSM_STORE = IMAGE_DIR + "/test_{}_osm.h5"
FMT_OSM_MEAN = IMAGE_DIR + "/{}_osmmean.h5"
# ---------------------------------------------------------
# Model files
FMT_VALMODEL_PATH = MODEL_DIR + "/{}_val_weights.h5"
FMT_FULLMODEL_PATH = MODEL_DIR + "/{}_full_weights.h5"
FMT_VALMODEL_HIST = MODEL_DIR + "/{}_val_hist.csv"
FMT_VALMODEL_EVALHIST = MODEL_DIR + "/{}_val_evalhist.csv"
FMT_VALMODEL_EVALTHHIST = MODEL_DIR + "/{}_val_evalhist_th.csv"
# ---------------------------------------------------------
# Prediction & polygon result
FMT_TESTPRED_PATH = MODEL_DIR + "/{}_pred.h5"
FMT_VALTESTPRED_PATH = MODEL_DIR + "/{}_eval_pred.h5"
FMT_VALTESTPOLY_PATH = MODEL_DIR + "/{}_eval_poly.csv"
FMT_VALTESTTRUTH_PATH = MODEL_DIR + "/{}_eval_poly_truth.csv"
FMT_VALTESTPOLY_OVALL_PATH = MODEL_DIR + "/eval_poly.csv"
FMT_VALTESTTRUTH_OVALL_PATH = MODEL_DIR + "/eval_poly_truth.csv"
FMT_TESTPOLY_PATH = MODEL_DIR + "/{}_poly.csv"
# ---------------------------------------------------------
# Model related files (others)
FMT_VALMODEL_LAST_PATH = MODEL_DIR + "/{}_val_weights_last.h5"
FMT_FULLMODEL_LAST_PATH = MODEL_DIR + "/{}_full_weights_last.h5"
# OSM dataset (Extracted from https://mapzen.com/data/metro-extracts/)
FMT_OSMSHAPEFILE = "/root/osmdata/{name:}/{name:}_{layer:}.shp"
FMT_SERIALIZED_OSMDATA = WORKING_DIR + "/osm_{}_subset.pkl"
LAYER_NAMES = [
'buildings',
'landusages',
'roads',
'waterareas',
]
# ---------------------------------------------------------
# warnins and logging
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", FutureWarning)
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter('%(asctime)s %(levelname)s %(message)s'))
fh_handler = FileHandler(".{}.log".format(MODEL_NAME))
fh_handler.setFormatter(Formatter('%(asctime)s %(levelname)s %(message)s'))
logger = getLogger(__name__)
logger.setLevel(INFO)
if __name__ == '__main__':
logger.addHandler(handler)
logger.addHandler(fh_handler)
# Fix seed for reproducibility
np.random.seed(1145141919)
def directory_name_to_area_id(datapath):
"""
Directory name to AOI number
Usage:
>>> directory_name_to_area_id("/data/test/AOI_2_Vegas")
2
"""
dir_name = Path(datapath).name
if dir_name.startswith('AOI_2_Vegas'):
return 2
elif dir_name.startswith('AOI_3_Paris'):
return 3
elif dir_name.startswith('AOI_4_Shanghai'):
return 4
elif dir_name.startswith('AOI_5_Khartoum'):
return 5
else:
raise RuntimeError("Unsupported city id is given.")
def _remove_interiors(line):
if "), (" in line:
line_prefix = line.split('), (')[0]
line_terminate = line.split('))",')[-1]
line = (
line_prefix +
'))",' +
line_terminate
)
return line
def _calc_fscore_per_aoi(area_id):
prefix = area_id_to_prefix(area_id)
truth_file = FMT_VALTESTTRUTH_PATH.format(prefix)
poly_file = FMT_VALTESTPOLY_PATH.format(prefix)
cmd = [
'java',
'-jar',
'/root/visualizer-2.0/visualizer.jar',
'-truth',
truth_file,
'-solution',
poly_file,
'-no-gui',
'-band-triplets',
'/root/visualizer-2.0/data/band-triplets.txt',
'-image-dir',
'pass',
]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = proc.communicate()
lines = [line for line in stdout_data.decode('utf8').split('\n')[-10:]]
"""
Overall F-score : 0.85029
AOI_2_Vegas:
TP : 27827
FP : 4999
FN : 4800
Precision: 0.847712
Recall : 0.852883
F-score : 0.85029
"""
if stdout_data.decode('utf8').strip().endswith("Overall F-score : 0"):
overall_fscore = 0
tp = 0
fp = 0
fn = 0
precision = 0
recall = 0
fscore = 0
elif len(lines) > 0 and lines[0].startswith("Overall F-score : "):
assert lines[0].startswith("Overall F-score : ")
assert lines[2].startswith("AOI_")
assert lines[3].strip().startswith("TP")
assert lines[4].strip().startswith("FP")
assert lines[5].strip().startswith("FN")
assert lines[6].strip().startswith("Precision")
assert lines[7].strip().startswith("Recall")
assert lines[8].strip().startswith("F-score")
overall_fscore = float(re.findall("([\d\.]+)", lines[0])[0])
tp = int(re.findall("(\d+)", lines[3])[0])
fp = int(re.findall("(\d+)", lines[4])[0])
fn = int(re.findall("(\d+)", lines[5])[0])
precision = float(re.findall("([\d\.]+)", lines[6])[0])
recall = float(re.findall("([\d\.]+)", lines[7])[0])
fscore = float(re.findall("([\d\.]+)", lines[8])[0])
else:
logger.warn("Unexpected data >>> " + stdout_data.decode('utf8'))
raise RuntimeError("Unsupported format")
return {
'overall_fscore': overall_fscore,
'tp': tp,
'fp': fp,
'fn': fn,
'precision': precision,
'recall': recall,
'fscore': fscore,
}
def prefix_to_area_id(prefix):
area_dict = {
'AOI_2_Vegas': 2,
'AOI_3_Paris': 3,
'AOI_4_Shanghai': 4,
'AOI_5_Khartoum': 5,
}
return area_dict[area_id]
def area_id_to_prefix(area_id):
area_dict = {
2: 'AOI_2_Vegas',
3: 'AOI_3_Paris',
4: 'AOI_4_Shanghai',
5: 'AOI_5_Khartoum',
}
return area_dict[area_id]
def area_id_to_osmprefix(area_id):
area_id_to_osmprefix_dict = {
2: 'las-vegas_nevada_osm',
3: 'paris_france_osm',
4: 'shanghai_china_osm',
5: 'ex_s2cCo6gpCXAvihWVygCAfSjNVksnQ_osm',
}
return area_id_to_osmprefix_dict[area_id]
# ---------------------------------------------------------
# main
def _get_model_parameter(area_id):
prefix = area_id_to_prefix(area_id)
fn_hist = FMT_VALMODEL_EVALTHHIST.format(prefix)
best_row = pd.read_csv(fn_hist).sort_values(
by='fscore',
ascending=False,
).iloc[0]
param = dict(
fn_epoch=int(best_row['zero_base_epoch']),
min_poly_area=int(best_row['min_area_th']),
)
return param
def _internal_test_predict_best_param(area_id,
save_pred=True):
prefix = area_id_to_prefix(area_id)
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
min_th = param['min_poly_area']
# Prediction phase
logger.info("Prediction phase: {}".format(prefix))
dict_n_osm_layers = {
2: 4,
3: 5,
4: 4,
5: 4,
}
osm_layers = dict_n_osm_layers[area_id]
n_input_layers = 8 + osm_layers
X_mean = get_mul_mean_image(area_id)
X_osm_mean = np.zeros((
osm_layers,
INPUT_SIZE,
INPUT_SIZE,
))
X_mean = np.vstack([X_mean, X_osm_mean])
# Load model weights
# Predict and Save prediction result
fn = FMT_TESTPRED_PATH.format(prefix)
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet(input_layers=n_input_layers)
model.load_weights(fn_model)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_test_batch(
area_id,
batch_size=64,
immean=X_mean,
enable_tqdm=True,
),
val_samples=len(df_test) * 9,
)
del model
# Save prediction result
if save_pred:
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'pred', atom, y_pred.shape,
filters=filters)
ds[:] = y_pred
return y_pred
def _internal_test(area_id, enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
y_pred = _internal_test_predict_best_param(area_id, save_pred=False)
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn_out = FMT_TESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_image_list = df_test.index.tolist()
for idx, image_id in tqdm.tqdm(enumerate(test_image_list)):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
y_pred[slice_idx][0]
)
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
pred_values = pred_values / pred_count
df_poly = mask_to_poly(pred_values, min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
def _internal_validate_predict_best_param(area_id,
enable_tqdm=False):
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
y_pred = _internal_validate_predict(
area_id,
epoch=epoch,
save_pred=False,
enable_tqdm=enable_tqdm)
return y_pred
def _internal_validate_predict(area_id,
epoch=3,
save_pred=True,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
dict_n_osm_layers = {
2: 4,
3: 5,
4: 4,
5: 4,
}
osm_layers = dict_n_osm_layers[area_id]
n_input_layers = 8 + osm_layers
# Image Mean
X_mean = get_mul_mean_image(area_id)
X_osm_mean = np.zeros((
osm_layers,
INPUT_SIZE,
INPUT_SIZE,
))
X_mean = np.vstack([X_mean, X_osm_mean])
# Load model weights
# Predict and Save prediction result
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet(input_layers=n_input_layers)
model.load_weights(fn_model)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_valtest_batch(
area_id,
batch_size=32,
immean=X_mean,
enable_tqdm=enable_tqdm,
),
val_samples=len(df_test) * 9,
)
del model
# Save prediction result
if save_pred:
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(
f.root,
'pred',
atom,
y_pred.shape,
filters=filters,
)
ds[:] = y_pred
return y_pred
def _internal_validate_fscore_wo_pred_file(area_id,
epoch=3,
min_th=MIN_POLYGON_AREA,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
# ------------------------
# Prediction phase
logger.info("Prediction phase")
y_pred = _internal_validate_predict(
area_id,
save_pred=False,
epoch=epoch,
enable_tqdm=enable_tqdm)
# ------------------------
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_list = df_test.index.tolist()
iterator = enumerate(test_list)
for idx, image_id in tqdm.tqdm(iterator, total=len(test_list)):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
y_pred[slice_idx][0]
)
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
pred_values = pred_values / pred_count
df_poly = mask_to_poly(pred_values, min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# ------------------------
# Validation solution file
logger.info("Validation solution file")
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_" from ImageId column
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
def _internal_validate_fscore(area_id,
epoch=3,
predict=True,
min_th=MIN_POLYGON_AREA,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
# ------------------------
# Prediction phase
logger.info("Prediction phase")
if predict:
_internal_validate_predict(
area_id,
epoch=epoch,
enable_tqdm=enable_tqdm)
# ------------------------
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'r') as f:
y_pred = np.array(f.get_node('/pred'))
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_list = df_test.index.tolist()
iterator = enumerate(test_list)
for idx, image_id in tqdm.tqdm(iterator, total=len(test_list)):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
y_pred[slice_idx][0]
)
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
pred_values = pred_values / pred_count
df_poly = mask_to_poly(pred_values, min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# ------------------------
# Validation solution file
logger.info("Validation solution file")
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_" from ImageId column
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
def mask_to_poly(mask, min_polygon_area_th=MIN_POLYGON_AREA):
mask = (mask > 0.5).astype(np.uint8)
shapes = rasterio.features.shapes(mask.astype(np.int16), mask > 0)
poly_list = []
mp = shapely.ops.cascaded_union(
shapely.geometry.MultiPolygon([
shapely.geometry.shape(shape)
for shape, value in shapes
]))
if isinstance(mp, shapely.geometry.Polygon):
df = pd.DataFrame({
'area_size': [mp.area],
'poly': [mp],
})
else:
df = pd.DataFrame({
'area_size': [p.area for p in mp],
'poly': [p for p in mp],
})
df = df[df.area_size > min_polygon_area_th].sort_values(
by='area_size', ascending=False)
df.loc[:, 'wkt'] = df.poly.apply(lambda x: shapely.wkt.dumps(
x, rounding_precision=0))
df.loc[:, 'bid'] = list(range(1, len(df) + 1))
df.loc[:, 'area_ratio'] = df.area_size / df.area_size.max()
return df
def jaccard_coef(y_true, y_pred):
smooth = 1e-12
intersection = K.sum(y_true * y_pred, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def jaccard_coef_int(y_true, y_pred):
smooth = 1e-12
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
intersection = K.sum(y_true * y_pred_pos, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred_pos, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def generate_test_batch(area_id,
batch_size=64,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_test = pd.read_csv(FMT_TEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_TEST_MUL_STORE.format(prefix)
fn_osm = FMT_TEST_OSM_STORE.format(prefix)
slice_id_list = []
for idx, row in df_test.iterrows():
for slice_pos in range(9):
slice_id = row.ImageId + '_' + str(slice_pos)
slice_id_list.append(slice_id)
if enable_tqdm:
pbar = tqdm.tqdm(total=len(slice_id_list))
while 1:
total_sz = len(slice_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_osm, 'r') as f_osm:
for i_batch in range(n_batch):
target_slice_ids = slice_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_slice_ids) == 0:
continue
X_test = []
y_test = []
for slice_id in target_slice_ids:
im = np.array(f_im.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
im2 = np.array(f_osm.get_node('/' + slice_id))
im2 = np.swapaxes(im2, 0, 2)
im2 = np.swapaxes(im2, 1, 2)
im = np.vstack([im, im2])
X_test.append(im)
mask = np.zeros((INPUT_SIZE, INPUT_SIZE)).astype(np.uint8)
y_test.append(mask)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_test = y_test.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_test = X_test - immean
if enable_tqdm:
pbar.update(y_test.shape[0])
yield (X_test, y_test)
if enable_tqdm:
pbar.close()
def generate_valtest_batch(area_id,
batch_size=8,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTEST_MUL_STORE.format(prefix)
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
fn_osm = FMT_VALTEST_OSM_STORE.format(prefix)
slice_id_list = []
for idx, row in df_train.iterrows():
for slice_pos in range(9):
slice_id = row.ImageId + '_' + str(slice_pos)
slice_id_list.append(slice_id)
if enable_tqdm:
pbar = tqdm.tqdm(total=len(slice_id_list))
while 1:
total_sz = len(slice_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_osm, 'r') as f_osm,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_slice_ids = slice_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_slice_ids) == 0:
continue
X_train = []
y_train = []
for slice_id in target_slice_ids:
im = np.array(f_im.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
im2 = np.array(f_osm.get_node('/' + slice_id))
im2 = np.swapaxes(im2, 0, 2)
im2 = np.swapaxes(im2, 1, 2)
im = np.vstack([im, im2])
X_train.append(im)
mask = np.array(f_mask.get_node('/' + slice_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
if enable_tqdm:
pbar.update(y_train.shape[0])
yield (X_train, y_train)
if enable_tqdm:
pbar.close()
def generate_valtrain_batch(area_id, batch_size=8, immean=None):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
fn_osm = FMT_VALTRAIN_OSM_STORE.format(prefix)
slice_id_list = []
for idx, row in df_train.iterrows():
for slice_pos in range(9):
slice_id = row.ImageId + '_' + str(slice_pos)
slice_id_list.append(slice_id)
np.random.shuffle(slice_id_list)
while 1:
total_sz = len(slice_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_osm, 'r') as f_osm,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_slice_ids = slice_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_slice_ids) == 0:
continue
X_train = []
y_train = []
for slice_id in target_slice_ids:
im = np.array(f_im.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
im2 = np.array(f_osm.get_node('/' + slice_id))
im2 = np.swapaxes(im2, 0, 2)
im2 = np.swapaxes(im2, 1, 2)
im = np.vstack([im, im2])
X_train.append(im)
mask = np.array(f_mask.get_node('/' + slice_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
yield (X_train, y_train)
def get_unet(input_layers=15):
conv_params = dict(activation='relu', border_mode='same')
merge_params = dict(mode='concat', concat_axis=1)
inputs = Input((input_layers, 256, 256))
conv1 = Convolution2D(32, 3, 3, **conv_params)(inputs)
conv1 = Convolution2D(32, 3, 3, **conv_params)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(pool1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(pool2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(pool3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(pool4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(conv5)
up6 = merge_l([UpSampling2D(size=(2, 2))(conv5), conv4], **merge_params)
conv6 = Convolution2D(256, 3, 3, **conv_params)(up6)
conv6 = Convolution2D(256, 3, 3, **conv_params)(conv6)
up7 = merge_l([UpSampling2D(size=(2, 2))(conv6), conv3], **merge_params)
conv7 = Convolution2D(128, 3, 3, **conv_params)(up7)
conv7 = Convolution2D(128, 3, 3, **conv_params)(conv7)
up8 = merge_l([UpSampling2D(size=(2, 2))(conv7), conv2], **merge_params)
conv8 = Convolution2D(64, 3, 3, **conv_params)(up8)
conv8 = Convolution2D(64, 3, 3, **conv_params)(conv8)
up9 = merge_l([UpSampling2D(size=(2, 2))(conv8), conv1], **merge_params)
conv9 = Convolution2D(32, 3, 3, **conv_params)(up9)
conv9 = Convolution2D(32, 3, 3, **conv_params)(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
optimizer = SGD(lr=0.01, momentum=0.9, nesterov=True)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['accuracy', jaccard_coef, jaccard_coef_int])
return model
def get_mul_mean_image(area_id):
prefix = area_id_to_prefix(area_id)
with tb.open_file(FMT_MULMEAN.format(prefix), 'r') as f:
im_mean = np.array(f.get_node('/mulmean'))
return im_mean
def __load_band_cut_th(band_fn, bandsz=3):
df = pd.read_csv(band_fn, index_col='area_id')
all_band_cut_th = {area_id: {} for area_id in range(2, 6)}
for area_id, row in df.iterrows():
for chan_i in range(bandsz):
all_band_cut_th[area_id][chan_i] = dict(
min=row['chan{}_min'.format(chan_i)],
max=row['chan{}_max'.format(chan_i)],
)
return all_band_cut_th
def get_mask_im(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = (im_mask > 0.5).astype(np.uint8)
return im_mask
def get_slice_mask_im(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = (im_mask > 0.5).astype(np.uint8)
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im_mask_part = im_mask[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im_mask_part.shape == (256, 256)
yield slice_pos, im_mask_part
def get_test_image_path_from_imageid(image_id, datapath, mul=False):
if mul:
return FMT_TEST_MSPEC_IMAGE_PATH.format(
datapath=datapath, image_id=image_id)
else:
return FMT_TEST_RGB_IMAGE_PATH.format(
datapath=datapath, image_id=image_id)
def get_train_image_path_from_imageid(image_id, datapath, mul=False):
if mul:
return FMT_TRAIN_MSPEC_IMAGE_PATH.format(
datapath=datapath, image_id=image_id)
else:
return FMT_TRAIN_RGB_IMAGE_PATH.format(
datapath=datapath, image_id=image_id)
def image_id_to_prefix(image_id):
prefix = image_id.split('img')[0][:-1]
return prefix
def load_train_summary_data(area_id):
prefix = area_id_to_prefix(area_id)
fn = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df = pd.read_csv(fn)
# df.loc[:, 'ImageId'] = df.ImageId.str[4:]
return df
def split_val_train_test(area_id):
prefix = area_id_to_prefix(area_id)
df = load_train_summary_data(area_id)
df_agg = df.groupby('ImageId').agg('first')
image_id_list = df_agg.index.tolist()
np.random.shuffle(image_id_list)
sz_valtrain = int(len(image_id_list) * 0.7)
sz_valtest = len(image_id_list) - sz_valtrain
# Parent directory
parent_dir = Path(FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)).parent
if not parent_dir.exists():
parent_dir.mkdir(parents=True)
pd.DataFrame({'ImageId': image_id_list[:sz_valtrain]}).to_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index=False)
pd.DataFrame({'ImageId': image_id_list[sz_valtrain:]}).to_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index=False)
# ---------------------------------------------------------
def calc_multiband_cut_threshold(path_list):
band_values = {k: [] for k in range(3)}
band_cut_th = {k: dict(max=0, min=0) for k in range(3)}
for path in path_list:
with rasterio.open(path, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
band_values[i_chan].append(values[i_chan].ravel())
for i_chan in range(3):
band_values[i_chan] = np.concatenate(band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(band_values[i_chan], 2)
return band_cut_th
def tif_to_latlon(path):
ds = gdal.Open(path)
width = ds.RasterXSize
height = ds.RasterYSize
gt = ds.GetGeoTransform()
minx = gt[0]
miny = gt[3] + width*gt[4] + height*gt[5]
maxx = gt[0] + width*gt[1] + height*gt[2]
maxy = gt[3]
return Bunch(
minx=minx,
maxx=maxx,
miny=miny,
maxy=maxy,
xcenter=(minx+maxx)/2.0,
ycenter=(miny+maxy)/2.0)
def location_summary(area_id, datapath):
area_prefix = area_id_to_prefix(area_id)
rows = []
glob_path = str(
Path(datapath) /
Path("PAN/PAN_{prefix:s}_img*.tif")
).format(prefix=area_prefix)
for path in sorted(glob.glob(glob_path)):
image_id = path.split('/')[-1][:-4]
pos = tif_to_latlon(path)
rows.append(dict(ImageId=image_id, path=path, pos=pos))
df_location = pd.DataFrame(rows)
df_location.loc[:, 'xcenter'] = df_location.pos.apply(lambda x: x.xcenter)
df_location.loc[:, 'ycenter'] = df_location.pos.apply(lambda x: x.ycenter)
return df_location
def location_summary_test(area_id, datapath):
area_prefix = area_id_to_prefix(area_id)
rows = []
glob_path = str(
Path(datapath) /
Path("PAN/PAN_{prefix:s}_img*.tif")
).format(prefix=area_prefix)
for path in sorted(glob.glob(glob_path)):
image_id = path.split('/')[-1][:-4]
pos = tif_to_latlon(path)
rows.append(dict(ImageId=image_id, path=path, pos=pos))
df_location = pd.DataFrame(rows)
df_location.loc[:, 'xcenter'] = df_location.pos.apply(lambda x: x.xcenter)
df_location.loc[:, 'ycenter'] = df_location.pos.apply(lambda x: x.ycenter)
return df_location
def get_mapzen_osm_name(area_id):
area_id_to_mapzen_name = {
2: 'las-vegas_nevada_osm',
3: 'paris_france_osm',
4: 'shanghai_china_osm',
5: 'ex_s2cCo6gpCXAvihWVygCAfSjNVksnQ_osm',
}
mapzen_name = area_id_to_mapzen_name[area_id]
return mapzen_name
def extract_buildings_geoms(area_id):
mapzen_name = get_mapzen_osm_name(area_id)
fn_osm = FMT_SERIALIZED_OSMDATA.format(mapzen_name)
with open(fn_osm, 'rb') as f:
osm = pickle.load(f)
geoms = [
geom
for geom, type_name, properties in osm['buildings']
if type_name == 'area'
]
return geoms
def extract_waterarea_geoms(area_id):
mapzen_name = get_mapzen_osm_name(area_id)
fn_osm = FMT_SERIALIZED_OSMDATA.format(mapzen_name)
with open(fn_osm, 'rb') as f:
osm = pickle.load(f)
geoms = [
geom
for geom, type_name, properties in osm['waterareas']
if type_name == 'area'
]
return geoms
def extract_landusages_industrial_geoms(area_id):
mapzen_name = get_mapzen_osm_name(area_id)
fn_osm = FMT_SERIALIZED_OSMDATA.format(mapzen_name)
with open(fn_osm, 'rb') as f:
osm = pickle.load(f)
geoms = [
geom
for geom, type_name, properties in osm['landusages']
if type_name == 'area' and properties['type'] == 'industrial'
]
return geoms
def extract_landusages_farm_and_forest_geoms(area_id):
mapzen_name = get_mapzen_osm_name(area_id)
fn_osm = FMT_SERIALIZED_OSMDATA.format(mapzen_name)
with open(fn_osm, 'rb') as f:
osm = pickle.load(f)
geoms = [
geom
for geom, type_name, properties in osm['landusages']
if type_name == 'area' and properties['type'] in [
'forest',
'farmyard',
]
]
return geoms
def extract_landusages_residential_geoms(area_id):
mapzen_name = get_mapzen_osm_name(area_id)
fn_osm = FMT_SERIALIZED_OSMDATA.format(mapzen_name)
with open(fn_osm, 'rb') as f:
osm = pickle.load(f)
geoms = [
geom
for geom, type_name, properties in osm['landusages']
if type_name == 'area' and properties['type'] == 'residential'
]
return geoms
def extract_roads_geoms(area_id):
mapzen_name = get_mapzen_osm_name(area_id)
fn_osm = FMT_SERIALIZED_OSMDATA.format(mapzen_name)
with open(fn_osm, 'rb') as f:
osm = pickle.load(f)
geoms = [
geom
for geom, type_name, properties in osm['roads']
if type_name == 'line' and properties['type'] != 'subway'
]
return geoms
def extract_osmlayers(area_id):
if area_id == 2:
return [
extract_buildings_geoms(area_id),
extract_landusages_industrial_geoms(area_id),
extract_landusages_residential_geoms(area_id),
extract_roads_geoms(area_id),
]
elif area_id == 3:
return [
extract_buildings_geoms(area_id),
extract_landusages_farm_and_forest_geoms(area_id),
extract_landusages_industrial_geoms(area_id),
extract_landusages_residential_geoms(area_id),
extract_roads_geoms(area_id),
]
elif area_id == 4:
return [
extract_waterarea_geoms(area_id),
extract_landusages_industrial_geoms(area_id),
extract_landusages_residential_geoms(area_id),
extract_roads_geoms(area_id),
]
elif area_id == 5:
return [
extract_waterarea_geoms(area_id),
extract_landusages_industrial_geoms(area_id),
extract_landusages_residential_geoms(area_id),
extract_roads_geoms(area_id),
]
else:
raise RuntimeError("area_id must be in range(2, 6): {}".foramt(
area_id))
def prep_osmlayer_test(area_id, datapath):
prefix = area_id_to_prefix(area_id)
logger.info("prep_osmlayer_test for {}".format(prefix))
fn_list = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
fn_store = FMT_TEST_OSM_STORE.format(prefix)
layers = extract_osmlayers(area_id)
df = pd.read_csv(fn_list, index_col='ImageId')
logger.info("Prep osm container: {}".format(fn_store))
with tb.open_file(fn_store, 'w') as f:
df_sz = len(df)
for image_id in tqdm.tqdm(df.index, total=df_sz):
# fn_tif = test_image_id_to_path(image_id)
fn_tif = get_test_image_path_from_imageid(
image_id, datapath, mul=False)
with rasterio.open(fn_tif, 'r') as fr:
values = fr.read(1)
masks = [] # rasterize masks
for layer_geoms in layers:
mask = rasterio.features.rasterize(
layer_geoms,
out_shape=values.shape,
transform=rasterio.guard_transform(
fr.transform))
masks.append(mask)
masks = np.array(masks)
masks = np.swapaxes(masks, 0, 2)
masks = np.swapaxes(masks, 0, 1)
assert masks.shape == (650, 650, len(layers))
# slice of masks
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im = masks[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im.shape == (256, 256, len(layers))
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root,
slice_id,
atom,
im.shape,
filters=filters)
ds[:] = im
def prep_osmlayer_train(area_id, datapath, is_valtrain=False):
prefix = area_id_to_prefix(area_id)
logger.info("prep_osmlayer_train for {}".format(prefix))
if is_valtrain:
fn_list = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix)
fn_store = FMT_VALTRAIN_OSM_STORE.format(prefix)
else:
fn_list = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
fn_store = FMT_VALTEST_OSM_STORE.format(prefix)
layers = extract_osmlayers(area_id)
df = pd.read_csv(fn_list, index_col='ImageId')
logger.info("Prep osm container: {}".format(fn_store))
with tb.open_file(fn_store, 'w') as f:
df_sz = len(df)
for image_id in tqdm.tqdm(df.index, total=df_sz):
# fn_tif = train_image_id_to_path(image_id)
fn_tif = get_train_image_path_from_imageid(
image_id, datapath, mul=False)
with rasterio.open(fn_tif, 'r') as fr:
values = fr.read(1)
masks = [] # rasterize masks
for layer_geoms in layers:
mask = rasterio.features.rasterize(
layer_geoms,
out_shape=values.shape,
transform=rasterio.guard_transform(
fr.transform))
masks.append(mask)
masks = np.array(masks)
masks = np.swapaxes(masks, 0, 2)
masks = np.swapaxes(masks, 0, 1)
assert masks.shape == (650, 650, len(layers))
# slice of masks
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im = masks[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im.shape == (256, 256, len(layers))
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root,
slice_id,
atom,
im.shape,
filters=filters)
ds[:] = im
def preproc_osm(area_id, datapath, is_train=True):
logger.info("Loading raster...")
osmprefix = area_id_to_osmprefix(area_id)
# df = pd.concat([
# location_summary(area_id),
# location_summary_test(area_id),
# ])
if is_train:
df = location_summary(area_id, datapath)
else:
df = location_summary_test(area_id, datapath)
map_bound = Bunch(
left=df.sort_values(by='xcenter').iloc[-1]['pos']['maxx'],
right=df.sort_values(by='xcenter').iloc[0]['pos']['minx'],
top=df.sort_values(by='ycenter').iloc[-1]['pos']['maxy'],
bottom=df.sort_values(by='ycenter').iloc[0]['pos']['miny'],
)
geom_layers = {}
fn_osm = FMT_SERIALIZED_OSMDATA.format(osmprefix)
if not Path(fn_osm).exists():
for layer_name in LAYER_NAMES:
fn_shp = FMT_OSMSHAPEFILE.format(
name=osmprefix,
layer=layer_name)
if not Path(fn_shp).exists():
raise RuntimeError("shp not found: {}".format(fn_shp))
geom_bounds = shapely.geometry.Polygon([
(map_bound.left, map_bound.top),
(map_bound.right, map_bound.top),
(map_bound.right, map_bound.bottom),
(map_bound.left, map_bound.bottom),
])
with fiona.open(fn_shp, 'r') as vector:
print("{}: {}".format(layer_name, len(vector)))
geoms = []
for feat in tqdm.tqdm(vector, total=len(vector)):
try:
geom = shapely.geometry.shape(feat['geometry'])
isec_area = geom.intersection(geom_bounds).area
if isec_area > 0:
geoms.append([
geom, 'area', feat['properties'],
])
elif geom.intersects(geom_bounds):
geoms.append([
geom, 'line', feat['properties'],
])
except:
pass
print("{}: {} -> {}".format(
layer_name,
len(vector),
len(geoms)))
geom_layers[layer_name] = geoms
with open(fn_osm, 'wb') as f:
pickle.dump(geom_layers, f)
@click.group()
def cli():
pass
@cli.command()
@click.option('--testonly/--no-testonly', default=True)
def testmerge(testonly):
# file check: test
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_TESTPOLY_PATH.format(prefix)
if not Path(fn_out).exists():
logger.info("Required file not found: {}".format(fn_out))
sys.exit(1)
if not testonly:
# file check: valtest
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
if not Path(fn_out).exists():
logger.info("Required file not found: {}".format(fn_out))
sys.exit(1)
# merge files: test poly
rows = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_TESTPOLY_PATH.format(prefix)
with open(fn_out, 'r') as f:
line = f.readline()
if area_id == 2:
rows.append(line)
for line in f:
line = _remove_interiors(line)
rows.append(line)
with open(FN_SOLUTION_CSV, 'w') as f:
for line in rows:
f.write(line)
if not testonly:
# merge files: valtest poly
rows = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'r') as f:
line = f.readline()
if area_id == 2:
rows.append(line)
for line in f:
line = _remove_interiors(line)
rows.append(line)
fn_out = FMT_VALTESTPOLY_OVALL_PATH
with open(fn_out, 'w') as f:
for line in rows:
f.write(line)
# merge files: valtest truth
rows = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'r') as f:
line = f.readline()
if area_id == 2:
rows.append(line)
for line in f:
rows.append(line)
fn_out = FMT_VALTESTTRUTH_OVALL_PATH
with open(fn_out, 'w') as f:
for line in rows:
f.write(line)
@cli.command()
@click.argument('area_id', type=int)
def testproc(area_id):
prefix = area_id_to_prefix(area_id)
logger.info(">>>> Test proc for {}".format(prefix))
_internal_test(area_id)
logger.info(">>>> Test proc for {} ... done".format(prefix))
@cli.command()
@click.argument('area_id', type=int)
@click.option('--epoch', type=int, default=0)
@click.option('--th', type=int, default=MIN_POLYGON_AREA)
@click.option('--predict/--no-predict', default=False)
def validate_city_fscore(area_id, epoch, th, predict):
_internal_validate_fscore(
area_id,
epoch=epoch,
enable_tqdm=True,
min_th=th,
predict=predict)
evaluate_record = _calc_fscore_per_aoi(area_id)
evaluate_record['epoch'] = epoch
evaluate_record['min_area_th'] = th
evaluate_record['area_id'] = area_id
logger.info("\n" + json.dumps(evaluate_record, indent=4))
@cli.command()
@click.argument('datapath', type=str)
def evalfscore(datapath):
area_id = directory_name_to_area_id(datapath)
prefix = area_id_to_prefix(area_id)
logger.info("Evaluate fscore on validation set: {}".format(prefix))
# for each epoch
# if not Path(FMT_VALMODEL_EVALHIST.format(prefix)).exists():
if True:
df_hist = pd.read_csv(FMT_VALMODEL_HIST.format(prefix))
df_hist.loc[:, 'epoch'] = list(range(1, len(df_hist) + 1))
rows = []
for zero_base_epoch in range(0, len(df_hist)):
logger.info(">>> Epoch: {}".format(zero_base_epoch))
_internal_validate_fscore_wo_pred_file(
area_id,
epoch=zero_base_epoch,
enable_tqdm=True,
min_th=MIN_POLYGON_AREA)
evaluate_record = _calc_fscore_per_aoi(area_id)
evaluate_record['zero_base_epoch'] = zero_base_epoch
evaluate_record['min_area_th'] = MIN_POLYGON_AREA
evaluate_record['area_id'] = area_id
logger.info("\n" + json.dumps(evaluate_record, indent=4))
rows.append(evaluate_record)
pd.DataFrame(rows).to_csv(
FMT_VALMODEL_EVALHIST.format(prefix),
index=False)
# find best min-poly-threshold
df_evalhist = pd.read_csv(FMT_VALMODEL_EVALHIST.format(prefix))
best_row = df_evalhist.sort_values(by='fscore', ascending=False).iloc[0]
best_epoch = int(best_row.zero_base_epoch)
best_fscore = best_row.fscore
# optimize min area th
rows = []
for th in [30, 60, 90, 120, 150, 180, 210, 240]:
logger.info(">>> TH: {}".format(th))
predict_flag = False
if th == 30:
predict_flag = True
_internal_validate_fscore(
area_id,
epoch=best_epoch,
enable_tqdm=True,
min_th=th,
predict=predict_flag)
evaluate_record = _calc_fscore_per_aoi(area_id)
evaluate_record['zero_base_epoch'] = best_epoch
evaluate_record['min_area_th'] = th
evaluate_record['area_id'] = area_id
logger.info("\n" + json.dumps(evaluate_record, indent=4))
rows.append(evaluate_record)
| pd.DataFrame(rows) | pandas.DataFrame |
#!/usr/bin/env python3
"""Code that makes use of other people's data.
"""
import arviz as az
import pandas as pd
import numpy as np
import jax.numpy as jnp
import numpyro
import numpyro.distributions as dist
from jax.random import PRNGKey
from .mcmc import _compute_hdi
def assemble_dacey_dataset(df):
"""Assemble xarray dataset from [1]_ data.
Parameters
----------
df : pd.DataFrame
The Dacey1992_RGC dataframe containing dendritic field diameter, in
arcmin, of parasol and midget RGCs.
Returns
-------
dataset : xarray.Dataset
That data, rearranged to a dataset.
References
----------
.. [1] <NAME>., & <NAME>. (1992). Dendritic field size and
morphology of midget and parasol ganglion cells of the human retina.
Proceedings of the National Academy of Sciences, 89(20), 9666–9670.
http://dx.doi.org/10.1073/pnas.89.20.9666
"""
df['dendritic_field_diameter_deg'] = df['dendritic_field_diameter_min'] / 60
df = df.set_index('cell_type')
for n in df.index.unique():
df.loc[n, 'cells'] = np.arange(len(df.loc[n]))
return df.reset_index().set_index(['cells', 'cell_type']).to_xarray()
def hinged_line(ecc, slope, hinge_ecc, intercept=0):
"""Hinged line with an optional intercept.
Parameters
----------
ecc : jnp.ndarray
Eccentricity of RGC cells.
slope : jnp.ndarray
The slope giving the relationship between eccentricity and diameter.
hinge_ecc : jnp.ndarray
Eccentricity at which the line hinges.
intercept : jnp.ndarray, optional
The intercept for the line.
Returns
-------
diameter_mean : jnp.ndarray
The mean diameter at each location.
"""
diam = intercept + ecc * slope
return diam.clip(intercept + hinge_ecc * slope)
def model_physiological_scaling(eccentricity, observed_diameter=None):
"""Probabilistic model of dendritic field size, as function of eccentricity.
Fits ``hinged_line`` to eccentricity in order to get the mean diameter at
each location, then uses a normal distribution with the given mean (and
standard deviation equal to a scaling parameter times the mean) to predict
the observed diameters. We use this scaled normal because of the
observation that the spread of observed diameters increases with
eccentricity.
The slope of this line is our physiological scaling value.
We fit everything independently across cell types.
Parameters
----------
eccentricity : jnp.ndarray
Eccentricity of RGC cells, of shape (cells, cell_types)
observed_diameter : jnp.ndarray or None, optional
Observed diameters to condition our results on. If None, don't
condition.
Returns
-------
diameters : jnp.ndarray
Samples of RGC diameters.
"""
cell_type_plate = numpyro.plate('cell_type', eccentricity.shape[-1],
dim=-1)
obs_nans = jnp.isnan(eccentricity)
with cell_type_plate:
# exponentiated to .018
diam_slope = numpyro.sample('log_diameter_slope', dist.Normal(-4, 1))
diam_int = numpyro.sample('diameter_intercept', dist.HalfCauchy(.1))
diam_hinge = numpyro.sample('log_diameter_hinge_ecc', dist.Normal(0, 1))
diam_scale = numpyro.sample('diameter_scale', dist.HalfCauchy(.1))
# fill in missing eccentricity observations with random values in the
# right range.
if obs_nans is not None:
eccentricity_fillin = numpyro.sample('dummy_ecc',
dist.Uniform(jnp.nanmin(eccentricity),
jnp.nanmax(eccentricity)).mask(False),
sample_shape=eccentricity.shape)
eccentricity = jnp.where(obs_nans, eccentricity_fillin, eccentricity)
diameter_mean = numpyro.deterministic('diameter_mean',
hinged_line(eccentricity,
jnp.exp(diam_slope),
jnp.exp(diam_hinge),
diam_int))
if observed_diameter is not None:
# use Bayesian imputation to fill out any missing diameter observations
imputed_diam = numpyro.sample('rgc_diameter_imputed',
dist.Normal(diameter_mean, diam_scale*diameter_mean,
validate_args=True).mask(False))
observed_diameter = jnp.where(obs_nans, imputed_diam, observed_diameter)
return numpyro.sample('rgc_diameter', dist.Normal(diameter_mean,
diam_scale*diameter_mean, validate_args=True),
obs=observed_diameter)
def run_phys_scaling_inference(dataset, step_size=.1, num_draws=1000,
num_chains=1, num_warmup=500, seed=0,
target_accept_prob=.8, max_tree_depth=10,
**nuts_kwargs):
"""Run MCMC inference for physiological scaling, conditioned on data.
Uses NUTS sampler.
For some reason, numpyro has trouble running in parallel, even with the
suggested `numpyro.set_host_device_count(n)` -- I think the solution is to
set the proper environmental variable yourself: `export
XLA_FLAGS=--xla_force_host_platform_device_count=n`. Check
`jax.lib.xla_bridge.device_count()` to see if it worked.
Parameters
----------
dataset: xarray.Dataset
Dataset containing eccentricity and dendritic_field_diameter_deg data
variables.
step_size : float, optional
Size of a single step.
num_draws : int, optional
Number of draws (samples in numpyro's terminology) in each chain. The
higher the better.
num_chains : int, optional
The number of independent MCMC chains to run. The higher the better. If
greater than 1, will use multiprocessing.
num_warmup : int, optional
The number of "warmup" steps to include in each chain. These are
discarded.
seed : int, optional
RNG seed.
target_accept_prob : float, optional
Target acceptance probability for NUTS.
max_tree_depth : int, optional
Max depth of the tree for NUTS.
nuts_kwargs :
Passed to NUTS at initialization
Returns
-------
mcmc : numpyro.infer.MCMC
The MCMC object that has run inference. Pass to assemble_inf_data.
"""
ecc = jnp.array(dataset.eccentricity_deg.values, dtype=jnp.float32)
diams = jnp.array(dataset.dendritic_field_diameter_deg.values, dtype=jnp.float32)
mcmc_kernel = numpyro.infer.NUTS(model_physiological_scaling,
step_size=step_size,
init_strategy=numpyro.infer.init_to_sample,
target_accept_prob=target_accept_prob,
max_tree_depth=max_tree_depth,
**nuts_kwargs)
# for now, progress bar doesn't show for multiple chains:
# https://github.com/pyro-ppl/numpyro/issues/309
mcmc = numpyro.infer.MCMC(mcmc_kernel, num_samples=num_draws,
num_chains=num_chains,
num_warmup=num_warmup, progress_bar=True)
mcmc.run(PRNGKey(seed), ecc, diams)
return mcmc
def assemble_inf_data(mcmc, dataset, seed=1):
"""Convert mcmc into properly-formatted inference data object.
Parameters
----------
mcmc : numpyro.infer.MCMC
The MCMC object returned by `run_inference`
dataset: xarray.Dataset
Dataset containing observed_responses data variable and at least the
coordinates trials and scaling (must be first two).
seed : int, optional
RNG seed.
Returns
-------
inf_data : arviz.InferenceData
arviz InferenceData object (xarray-like) containing the posterior,
posterior_predictive, prior, prior_predictive, and observed_data.
"""
ecc = jnp.array(dataset.eccentricity_deg.values, dtype=jnp.float32)
post_pred = numpyro.infer.Predictive(model_physiological_scaling,
posterior_samples=mcmc.get_samples())
post_pred = post_pred(PRNGKey(seed), ecc)
post_pred = az.from_numpyro(posterior_predictive=post_pred,
coords=dataset.coords,
dims={'rgc_diameter': ['cells', 'cell_type'],
'diameter_mean': ['cells', 'cell_type']})
n_total_samples = list(mcmc.get_samples().values())[0].shape[0]
prior = numpyro.infer.Predictive(model_physiological_scaling,
num_samples=n_total_samples)
prior = prior(PRNGKey(seed+1), ecc)
prior = az.from_numpyro(prior=prior,
coords=dataset.coords,
dims={'rgc_diameter': ['cells', 'cell_type'],
'log_diameter_slope': ['cell_type'],
'diameter_scale': ['cell_type'],
'log_diameter_hinge_ecc': ['cell_type'],
'diameter_intercept': ['cell_type'],
'dummy_ecc': ['cells', 'cell_type'],
'diameter_mean': ['cells', 'cell_type']})
inf_data = az.from_numpyro(mcmc,
coords=dataset.coords,
dims={'log_diameter_slope': ['cell_type'],
'diameter_scale': ['cell_type'],
'log_diameter_hinge_ecc': ['cell_type'],
'diameter_intercept': ['cell_type'],
'dummy_ecc': ['cells', 'cell_type'],
'diameter_mean': ['cells', 'cell_type'],
'rgc_diameter_imputed': ['cells', 'cell_type'],
'rgc_diameter': ['cells', 'cell_type']})
inf_data = inf_data + post_pred + prior
inf_data.add_groups({'prior_predictive':
inf_data.prior[['rgc_diameter',]]})
inf_data.prior = inf_data.prior.drop_vars(['cells', 'rgc_diameter',])
inf_data.posterior_predictive['eccentricity'] = (['cells', 'cell_type'], ecc)
inf_data.posterior_predictive = inf_data.posterior_predictive.set_coords('eccentricity')
inf_data.prior_predictive['eccentricity'] = (['cells', 'cell_type'], ecc)
inf_data.prior_predictive = inf_data.prior_predictive.set_coords('eccentricity')
inf_data.observed_data['eccentricity'] = (['cells', 'cell_type'], ecc)
# then there was missing data, so we imputed the responses
if np.isnan(dataset.dendritic_field_diameter_deg).any():
inf_data.observed_data = inf_data.observed_data.rename({'rgc_diameter': 'imputed_rgc_diameter'})
return inf_data
def inf_data_to_df(inf_data, kind='predictive', query_str=None, hdi=False):
"""Convert inf_data to a dataframe, for plotting.
We exponentiate the log_diameter_slope and log_diameter_hinge_ecc
variables.
Parameters
----------
inf_data : arviz.InferenceData
arviz InferenceData object (xarray-like) created by `assemble_inf_data`.
kind : {'predictive', 'parameters'}, optional
Whether to create df containing predictive info (responses and
probability_correct) or model parameter info.
query_str : str or None, optional
If not None, the string to query dataframe with to limit the plotted
data (e.g., "distribution == 'posterior'").
hdi : bool or float, optional
Whether to compute the HDI (highest density interval) on the parameters
or return the full distributions. If True, we compute the 95% HDI, if a
float, must lie in (0, 1] and give the percentage HDI (we also include
the median). The HDI is one way of constructing a summary credible
interval, the other common way is to use the equal-tailed interval
(ETI), where a 95% ETI has 2.5% of the distribution on either side of
its limits (so it goes from the 2.5th to 97.5th percentile). The 95%
HDI, on the other hand, contains the central 95% with the highest
probability density; with symmetric distirbutions, this will be the
same as ETI. See
https://www.sciencedirect.com/topics/mathematics/highest-density-interval
for some more discussion, excerpted from [2]_
To quote
Returns
-------
df : pd.DataFrame
The DataFrame described above
References
----------
.. [2] Kruschke, <NAME>. (2015). Doing Bayesian Data Analysis. : Elsevier.
"""
if hdi is True:
hdi = .95
if kind == 'predictive':
dists = ['observed_data', 'posterior_predictive', 'prior_predictive']
df = []
for d in dists:
tmp = inf_data[d]
# doesn't make sense to compute the HDI for observed data.
if hdi and d != 'observed_data':
tmp = _compute_hdi(tmp, hdi)
tmp = tmp.to_dataframe().reset_index()
tmp['distribution'] = d
df.append(tmp)
df = pd.concat(df).reset_index(drop=True)
elif kind == 'parameters':
dists = ['prior', 'posterior']
df = []
for d in dists:
tmp = inf_data[d].drop_dims('cells')
if hdi:
tmp = _compute_hdi(tmp, hdi)
tmp = tmp.to_dataframe()
for c in tmp.columns:
if c.startswith('log'):
tmp[c.replace('log_', '')] = tmp[c].map(np.exp)
tmp = tmp.reset_index().melt(tmp.index.names)
tmp['distribution'] = d
df.append(tmp)
df = | pd.concat(df) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description : This code do basic statistical tests (i.e., student t-test, fold change,
Benjamini-Hochberg false discovery rate adjustment) for peak table generated
by MZmine-2.53
Copyright : (c) LemasLab, 02/23/2020
Author : <NAME>
License : MIT License
Maintainer : <EMAIL>, <EMAIL>, <EMAIL>
Usage : python add_stats.py -i $input_peak_table
-d $design_file_location
-o $output_peak_table
-l $library_location
"""
import warnings
import logging
import logging.handlers
import pandas as pd
import numpy as np
from statsmodels.stats.multitest import multipletests
from scipy import stats
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s]: %(levelname)s: %(message)s')
warnings.filterwarnings('ignore')
def add_threshold(row, names):
"""Add threshold for blank subtraction algorithm.
# Arguments:
row: certain row of peak table (pandas dataframe).
names: column names in the peak table of a certain group of samples
# Returns:
threshold value
"""
value = np.mean(row[names]) + 3*np.std(row[names])
return value if value > 0 else 5000
def blank_subtraction_flag(row, name_group, name_threshold, bar):
"""Blank subtraction function.
Blank subtraction algorithm:
- Calculate mean (mean_blank) and standard deviation (sd_blank)
of peak intensions in blank samples.
- Threshold ← mean_blank+3*sd_blank
- If threshold <=0, then replace it with 5,000 (why 5,000?)
- Calculate mean peak intension in fat (mean_fat), whole (mean_whole)
and skim (mean_skim) samples.
- ratio_fat ← (mean_fat-threshold)/threshold;
ratio_whole ← (mean_whole-threshold)/threshold;
ratio_skim ← (mean_skim-threshold)/threshold
- If ratio_fat<self_defined_number (e.g. 100) and
ratio_whole<self_defined_number and ratio_skim<self_defined_number,
then drop the peak.
# Arguments:
row: certain row of peak table (pandas dataframe).
name_group: name of the group.
name_threshold: name of the threshold column.
bar: bar value of blank subtraction algorithm.
# Returns:
If a certain peak of this group still exist after blank subtraction
"""
return (np.mean(row[name_group]) - row[name_threshold])/row[name_threshold] > bar
# Judge whether certain peak intensity of a sample is 0 or not
def zero_intensity_flag(row, name_group):
"""Check if the mean intensity of certain group of samples is zero. If zero, then
the metabolite is not existed in that material.
# Arguments:
row: certain row of peak table (pandas dataframe).
name_group: name of the group.
# Returns:
True (the mean intensity is zero) or False (the mean intensity is not zero).
"""
return np.mean(row[name_group]) <= 0
# Add p-value for student t-test between two groups of samples
def add_pvalue(row, left_names, right_names):
"""Add p value for two group comparison based on student t-test.
# Arguments:
row: certain row of peak table (pandas dataframe).
left_names: column names in the peak table of the first group of samples.
right_names: column names in the peak table of the second group of samples.
# Returns:
p value of student t-test
"""
_, p = stats.ttest_ind(row[left_names], row[right_names])
return p
# Add t-value for student t-test between two groups of samples
def add_tvalue(row, left_names, right_names):
"""Add t value for two group comparison based on student t-test.
# Arguments:
row: certain row of peak table (pandas dataframe).
left_names: column names in the peak table of the first group of samples.
right_names: column names in the peak table of the second group of samples.
# Returns:
t value of student t-test
"""
t, _ = stats.ttest_ind(row[left_names], row[right_names])
return t
# Add fold-change for the mean values of two groups of samples
def fold_change(row, left, right):
"""Add fold change value for two group comparison.
# Arguments:
row: certain row of peak table (pandas dataframe).
left: column name in the peak table of the mean intensity of first group of samples.
right: column name in the peak table of the mean intensity of second group of samples.
# Returns:
fold change value.
"""
if row[right] == 0:
return np.inf
if row[left] == 0:
return -np.inf
result = row[left]/row[right]
return result if result >= 1 else -1/result
# Absolute value of fold-change
def abs_fold_change(row, fold_change_column):
"""Add absolute fold change value for two group comparison.
# Arguments:
row: certain row of peak table (pandas dataframe).
fold_change_column: column name in the peak table of the fold change value.
# Returns:
absolute fold change value.
"""
return abs(row[fold_change_column])
# Add ppm value for identified metabolites.
## The library search result produced by MZmine may exceed 5 ppm,
## so those beyond 5 ppm should be filtered out
def add_ppm(row, library_df):
"""Add part per million (ppm) value for library matching. The library matching done by
MZmine may not follow the threshold strictly (i.e., when setting the ppm to 5, some
metabolites with ppm of more than 5 may also appear in the peak table).
# Arguments:
row: certain row of peak table (pandas dataframe).
library_df: library dataframe.
# Returns:
ppm value of the matched metabolite in the row.
"""
if pd.isnull(row['row identity (main ID)']):
return None
mzs = list(library_df[library_df.Name.str.strip() == row['row identity (main ID)']]['M/Z'])
mz_observe = row["row m/z"]
diff = []
for mz in mzs:
diff.append(abs(mz_observe - mz))
if len(diff) == 0:
return None
mz_theoretical = mzs[diff.index(min(diff))]
return abs((mz_observe-mz_theoretical)*10e5/mz_theoretical)
def add_label(row, group1_name, group2_name):
"""Add label for metabolite represented by the row.
Format: "m_z/retention_time/fold_change".
# Arguments:
row: certain row of peak table (pandas dataframe).
group1_name: name of the group of first group of samples.
group2_name: name of the group of second group of samples.
# Returns:
label (string type).
"""
if pd.isnull(row["row identity (main ID)"]) or \
row["row identity (main ID)"] == "nan" or \
row["row identity (main ID)"] == None:
return str(round(row["row m/z"], 2)) + "/" + \
str(round(row["row retention time"], 2)) + \
"/" + str(round(row["fold_change" + \
"(" + str(group1_name) + " versus " + \
str(group2_name) + ")"], 2))
return str(row["row identity (main ID)"]) + "/" + \
str(round(row["fold_change" + "(" + str(group1_name) + \
" versus " + str(group2_name) + ")"], 2))
def add_stats(data_file="data_pos_ph.csv", design_file="design", \
output_file="pos_withstats.csv", \
library="positive_library.csv"):
"""Add basic statistics to peak table produced by MZmine.
# Arguments:
data_file: peak table.
design_file: design file corresponding to the peak table.
output_file: the name of processed file.
library: location and file name of the library used to identify metabolites.
# Returns:
list of identified metabolites.
# Outputs:
prosessed peak table
"""
data = | pd.read_csv(data_file) | pandas.read_csv |
from tpq_base import *
from scipy.io import savemat
from ssmtoybox.ssmod import ConstantVelocity, Radar2DMeasurement
from ssmtoybox.ssinf import StudentProcessKalman, StudentProcessStudent, GaussianProcessKalman, UnscentedKalman, \
CubatureKalman, FullySymmetricStudent
"""
Tracking of an object behaving according to Constant Velocity model based on radar measurements with glint noise.
"""
def constant_velocity_radar_demo(steps=100, mc_sims=100):
print('Constant Velocity Radar Tracking with Glint Noise')
print('K = {:d}, MC = {:d}'.format(steps, mc_sims))
# SYSTEM
m0 = np.array([10000, 300, 1000, -40], dtype=np.float)
P0 = np.diag([100**2, 10**2, 100**2, 10**2])
x0 = GaussRV(4, m0, P0)
dt = 0.5 # discretization period
# process noise and noise gain
Q = np.diag([50, 5])
G = np.array([[dt ** 2 / 2, 0],
[dt, 0],
[0, dt ** 2 / 2],
[0, dt]])
q = GaussRV(4, cov=G.T.dot(Q).dot(G))
dyn = ConstantVelocity(x0, q, dt)
R0 = np.diag([50, 0.4e-6])
R1 = np.diag([5000, 1.6e-5]) # glint (outlier) RV covariance
glint_prob = 0.15
r = GaussianMixtureRV(2, covs=(R0, R1), alphas=(1-glint_prob, glint_prob))
obs = Radar2DMeasurement(r, dyn.dim_state, state_index=[0, 2, 1, 3])
# SIMULATE DATA
x = dyn.simulate_discrete(steps, mc_sims)
z = obs.simulate_measurements(x)
# STATE SPACE MODEL
m0 = np.array([10175, 295, 980, -35], dtype=np.float)
P0 = np.diag([100 ** 2, 10 ** 2, 100 ** 2, 10 ** 2])
x0_dof = 1000.0
x0 = StudentRV(4, m0, ((x0_dof-2)/x0_dof)*P0, x0_dof)
dt = 0.5 # discretization period
# process noise and noise gain
Q = np.diag([50, 5])
q = StudentRV(4, scale=((x0_dof-2)/x0_dof)*G.T.dot(Q).dot(G), dof=x0_dof)
dyn = ConstantVelocity(x0, q, dt)
r_dof = 4.0
r = StudentRV(2, scale=((r_dof-2)/r_dof)*R0, dof=r_dof)
obs = Radar2DMeasurement(r, dyn.dim_state)
# import matplotlib.pyplot as plt
# for i in range(mc_sims):
# plt.plot(x[0, :, i], x[2, :, i], 'b', alpha=0.15)
# plt.show()
# kernel parameters for TPQ and GPQ filters
# TPQ Student
par_dyn_tp = np.array([[0.05, 100, 100, 100, 100]], dtype=float)
par_obs_tp = np.array([[0.005, 10, 100, 10, 100]], dtype=float)
# parameters of the point-set
kappa = 0.0
par_pt = {'kappa': kappa}
# print kernel parameters
import pandas as pd
parlab = ['alpha'] + ['ell_{}'.format(d + 1) for d in range(x.shape[0])]
partable = pd.DataFrame(np.vstack((par_dyn_tp, par_obs_tp)), columns=parlab, index=['dyn', 'obs'])
print()
print(partable)
# TODO: less TPQSFs, max boxplot y-range = 2000, try to get convergent RMSE semilogy
# init filters
filters = (
# ExtendedStudent(dyn, obs),
# UnscentedKalman(dyn, obs, kappa=kappa),
FullySymmetricStudent(dyn, obs, kappa=kappa, dof=4.0),
StudentProcessStudent(dyn, obs, par_dyn_tp, par_obs_tp, dof=4.0, dof_tp=4.0, point_par=par_pt),
# StudentProcessStudent(dyn, obs, par_dyn_tp, par_obs_tp, dof=4.0, dof_tp=10.0, point_par=par_pt),
# StudentProcessStudent(dyn, obs, par_dyn_tp, par_obs_tp, dof=4.0, dof_tp=20.0, point_par=par_pt),
# GaussianProcessKalman(dyn, obs, par_dyn_tp, par_obs_tp, dof=4.0, point_hyp=par_pt),
)
itpq = np.argwhere([isinstance(filters[i], StudentProcessStudent) for i in range(len(filters))]).squeeze(axis=1)[0]
# assign weights approximated by MC with lots of samples
# very dirty code
pts = filters[itpq].tf_dyn.model.points
kern = filters[itpq].tf_dyn.model.kernel
wm, wc, wcc, Q = rbf_student_mc_weights(pts, kern, int(2e6), 1000)
for f in filters:
if isinstance(f.tf_dyn, BQTransform):
f.tf_dyn.wm, f.tf_dyn.Wc, f.tf_dyn.Wcc = wm, wc, wcc
f.tf_dyn.Q = Q
pts = filters[itpq].tf_meas.model.points
kern = filters[itpq].tf_meas.model.kernel
wm, wc, wcc, Q = rbf_student_mc_weights(pts, kern, int(2e6), 1000)
for f in filters:
if isinstance(f.tf_meas, BQTransform):
f.tf_meas.wm, f.tf_meas.Wc, f.tf_meas.Wcc = wm, wc, wcc
f.tf_meas.Q = Q
# run all filters
mf, Pf = run_filters(filters, z)
# evaluate scores
pos_x, pos_mf, pos_Pf = x[[0, 2], ...], mf[[0, 2], ...], Pf[np.ix_([0, 2], [0, 2])]
vel_x, vel_mf, vel_Pf = x[[1, 3], ...], mf[[1, 3], ...], Pf[np.ix_([1, 3], [1, 3])]
pos_rmse, pos_lcr = eval_perf_scores(pos_x, pos_mf, pos_Pf)
vel_rmse, vel_lcr = eval_perf_scores(vel_x, vel_mf, vel_Pf)
rmse_avg, lcr_avg = eval_perf_scores(x, mf, Pf)
# variance of average metrics
from ssmtoybox.utils import bootstrap_var
var_rmse_avg = np.zeros((len(filters),))
var_lcr_avg = np.zeros((len(filters),))
for fi in range(len(filters)):
var_rmse_avg[fi] = bootstrap_var(rmse_avg[:, fi], int(1e4))
var_lcr_avg[fi] = bootstrap_var(lcr_avg[:, fi], int(1e4))
# save trajectories, measurements and metrics to file for later processing (tables, plots)
data_dict = {
'x': x,
'z': z,
'mf': mf,
'Pf': Pf,
'rmse_avg': rmse_avg,
'lcr_avg': lcr_avg,
'var_rmse_avg': var_rmse_avg,
'var_lcr_avg': var_lcr_avg,
'pos_rmse': pos_rmse,
'pos_lcr': pos_lcr,
'vel_rmse': vel_rmse,
'vel_lcr': vel_lcr,
'steps': steps,
'mc_sims': mc_sims,
'par_dyn_tp': par_dyn_tp,
'par_obs_tp': par_obs_tp,
'f_label': ['UKF', 'SF', r'TPQSF($\nu$=20)', 'GPQSF']
}
savemat('cv_radar_simdata_{:d}k_{:d}mc'.format(steps, mc_sims), data_dict)
# print out table
# mean overall RMSE and INC with bootstrapped variances
f_label = [f.__class__.__name__ for f in filters]
m_label = ['MEAN_RMSE', 'STD(MEAN_RMSE)', 'MEAN_INC', 'STD(MEAN_INC)']
data = np.array([rmse_avg.mean(axis=0), np.sqrt(var_rmse_avg), lcr_avg.mean(axis=0), np.sqrt(var_lcr_avg)]).T
table = pd.DataFrame(data, f_label, m_label)
print(table)
# mean/max RMSE and INC
m_label = ['MEAN_RMSE', 'MAX_RMSE', 'MEAN_INC', 'MAX_INC']
pos_data = np.array([pos_rmse.mean(axis=0), pos_rmse.max(axis=0), pos_lcr.mean(axis=0), pos_lcr.max(axis=0)]).T
vel_data = np.array([vel_rmse.mean(axis=0), vel_rmse.max(axis=0), vel_lcr.mean(axis=0), vel_lcr.max(axis=0)]).T
pos_table = pd.DataFrame(pos_data, f_label, m_label)
pos_table.index.name = 'Position'
vel_table = | pd.DataFrame(vel_data, f_label, m_label) | pandas.DataFrame |
# coding=utf-8
""" Charts from Google Earth Engine data. Inpired by this question
https://gis.stackexchange.com/questions/291823/ui-charts-for-indices-time-series-in-python-api-of-google-earth-engine
and https://youtu.be/FytuB8nFHPQ, but at the moment relaying on `pygal`
library because it's the easiest to integrate with ipywidgets
"""
import pygal
import base64
import ee
from geetools import tools, utils
import pandas as pd
# TODO: make not plotted bands values appear on tooltip
# TODO: give capability to plot a secondary axis with other data
def ydata2pandas(ydata):
""" Convert data from charts y_data property to pandas """
dataframes = []
for serie, data in ydata.items():
index = []
values = []
for d in data:
x = d[0]
y = d[1]
index.append(x)
values.append(y)
df = pd.DataFrame({serie:values}, index=index)
dataframes.append(df)
return | pd.concat(dataframes, axis=1, sort=False) | pandas.concat |
import os
import logging
import pandas as pd
import numpy as np
import urllib.request
import requests
import re
import io
import zipfile
import json
from pyseir import OUTPUT_DIR
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data')
def load_zip_get_file(url, file, decoder='utf-8'):
"""
Load a zipfile from a URL and extract a single file. Note that this is
not ideal and may fail for large files since the files must fit in memory.
Parameters
----------
url: str
URL to read from.
file: str
Filename to pull out of the zipfile.
decoder: str
Usually None for raw bytes or 'utf-8', or 'latin1'
Returns
-------
file_buffer: io.BytesIO or io.StringIO
The file buffer for the requested file if decoder is None else return
a decoded StringIO.
"""
remotezip = urllib.request.urlopen(url)
zipinmemory = io.BytesIO(remotezip.read())
zf = zipfile.ZipFile(zipinmemory)
byte_string = zf.read(file)
if decoder:
string = byte_string.decode(decoder)
return io.StringIO(string)
else:
return io.BytesIO(byte_string)
def cache_county_case_data():
"""
Cache county covid case data from NYT in #PYSEIR_HOME/data.
"""
logging.info('Downloading covid case data')
# NYT dataset
county_case_data = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype='str')
county_case_data['date'] = | pd.to_datetime(county_case_data['date']) | pandas.to_datetime |
import pandas as pd
def read_csv(pd,link2015,link2016,link2017,link2018,link2019):
"""
Our Project works on data from 2015 to 2019
The data is taken from OFLC (link in the notebook)
Each year the column names vary this has been fixed in the code below
the dataframe for all years are concatenated to create one dataframe
and the intermediate dataframes are deleted to avoid the RAM from crashing
"""
file2015=pd.read_csv(link2015,encoding='latin-1', low_memory=False)
file2015.rename(columns = {'H-1B_DEPENDENT':'H1B_DEPENDENT','SOC_NAME':'SOC_TITLE','WAGE_RATE_OF_PAY':'WAGE_RATE_OF_PAY_FROM'}, inplace = True)
df2015 = file2015[['CASE_NUMBER', 'CASE_STATUS', 'CASE_SUBMITTED', 'DECISION_DATE',
'VISA_CLASS','FULL_TIME_POSITION','JOB_TITLE', 'SOC_CODE', 'SOC_TITLE',
'EMPLOYER_NAME','WAGE_RATE_OF_PAY_FROM','WAGE_UNIT_OF_PAY','WORKSITE_CITY',
'WORKSITE_STATE','H1B_DEPENDENT']]
df2015['WAGE_RATE_OF_PAY_FROM'] = df2015['WAGE_RATE_OF_PAY_FROM'].str.split('-').str[0]
del file2015
file2016=pd.read_csv(link2016,encoding='latin-1', low_memory=False)
file2016.rename(columns = {'H-1B_DEPENDENT':'H1B_DEPENDENT','SOC_NAME':'SOC_TITLE'}, inplace = True)
df2016=file2016[['CASE_NUMBER', 'CASE_STATUS', 'CASE_SUBMITTED', 'DECISION_DATE',
'VISA_CLASS','FULL_TIME_POSITION','JOB_TITLE', 'SOC_CODE', 'SOC_TITLE',
'EMPLOYER_NAME','WAGE_RATE_OF_PAY_FROM','WAGE_UNIT_OF_PAY','WORKSITE_CITY',
'WORKSITE_STATE','H1B_DEPENDENT']]
del file2016
file2017=pd.read_csv(link2017,encoding='latin-1', low_memory=False)
file2017=pd.read_csv('/content/gdrive/My Drive/H1B_project/H-1B_Disclosure_Data_FY17.csv',encoding='latin-1', low_memory=False)
file2017.rename(columns = {'SOC_NAME':'SOC_TITLE'}, inplace = True)
df2017=file2017[['CASE_NUMBER', 'CASE_STATUS', 'CASE_SUBMITTED', 'DECISION_DATE',
'VISA_CLASS','FULL_TIME_POSITION','JOB_TITLE', 'SOC_CODE',
'SOC_TITLE','EMPLOYER_NAME','WAGE_RATE_OF_PAY_FROM',
'WAGE_UNIT_OF_PAY','WORKSITE_CITY','WORKSITE_STATE','H1B_DEPENDENT']]
del file2017
file2018=pd.read_csv(link2018,encoding='latin-1', low_memory=False)
file2018= | pd.read_csv('/content/gdrive/My Drive/H1B_project/H-1B_Disclosure_Data_FY2018_EOY.csv',encoding='latin-1', low_memory=False) | pandas.read_csv |
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
from com_cheese_api.cmm.utl.file import FileReader
from com_cheese_api.ext.db import url, db, openSession, engine
from konlpy.tag import Okt
from collections import Counter
from wordcloud import WordCloud
import seaborn as sns
from sqlalchemy import func
from sqlalchemy.ext.declarative import declarative_base
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold # k value is understood as count
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier # rforest
from sklearn.tree import DecisionTreeClassifier # dtree
from sklearn.naive_bayes import GaussianNB # nb
from sklearn.neighbors import KNeighborsClassifier # knn
from sklearn.svm import SVC # svm
import os
import json
# 코드 실행시 생성되는 파일목록
# user_df.csv
# user_df 등 user csv 데이터의 컬럼명이 user_index -> user_no로 바뀜, 고쳐야 함
class OrderDfo:
def __init__(self):
self.fileReader = FileReader()
# self.data = os.path.join(os.path.abspath(os.path.dirname(__file__))+'/m_data')
self.data = os.path.join('com_cheese_api/cop/ord/order/data')
self.odf = None
def new(self):
user = 'users.csv'
cheese = pd.read_csv('com_cheese_api/cop/itm/cheese/data/cheese_data.csv')
this = self.fileReader
this.user = self.new_model(user) # payload
print(this)
category_count = OrderDfo.category_count(this.user)
item_count = OrderDfo.item_count(this.user, category_count)
this.user = OrderDfo.change_to_cheese(cheese, item_count)
print(f'######## 치즈 상품 대체 체크 ##########')
print(this)
this = OrderDfo.user_gender_nominal(this)
this = OrderDfo.cheese_rank_ordinal(this)
# print(min(this.user['user_age'])) # 고객 최소 나이 : 10
# print(max(this.user['user_age'])) # 고객 최대 나이 : 80
# this = OrderDfo.user_age_nominal(this)
print(f'######## age 전처리 체크 ##########')
print(this.user.head(10))
# this = OrderDfo.cheese_code_numeric(this)
# this = OrderDfo.cheese_code_ordinal(this)
this = OrderDfo.buy_count_numeric(this)
# this = OrderDfo.cheese_category_nominal(this)
# this = OrderDfo.cheese_texture_nominal(this)
print(f'######## cheese, count 전처리 체크 ##########')
print(this.user.head(10))
this = OrderDfo.user_price_numeric(this)
this = OrderDfo.total_buy_price(this)
# print(f'Preprocessing User Dataset : {this.user}')
print(f'######## train na 체크 ##########')
print(f'{this.user.isnull().sum()}')
print(f'######## test na 체크 ##########')
print(f'{this.user.isnull().sum()}')
print(f'######## data type 체크 ##########')
print(this.user.dtypes)
self.odf = pd.DataFrame(
{
# 'order_no': this.user.order_no, # duplicate 오류
'user_id': this.user.user_id,
'cheese_id': this.user.cheese_id,
'buy_count': this.user.buy_count,
'total_price': this.user.total_price
}
)
self.odf.to_csv(os.path.join('com_cheese_api/cop/ord/order/data', 'user_order.csv'), index=False, encoding='utf-8-sig')
print(f'######## 최종 user DF 결과 ##########')
print(self.odf)
return self.odf
################## 데이터 불러오기 & 생성 & featrue 제거 ###################
# self, 메모리에 적재
def new_model(self, payload):
this = self.fileReader
this.data = self.data
this.fname = payload
print(f'{self.data}')
print(f'{this.fname}')
return pd.read_csv(Path(self.data, this.fname))
####################### 원본 데이터를 치즈 구매 데이터 변환 #######################
# 밑에 make_barplot()을 이용해서 시각화 가능.
@staticmethod
def find_requency(data, column1, column2):
count_size = data[column1].groupby(data[column2]).count().reset_index(name='counts')
count_size['rank'] = count_size['counts'].rank(ascending=False)
show_barplot = OrderDfo.make_barplot(column2, 'counts', count_size)
return count_size
@staticmethod
def category_count(data) -> object:
sub_size = data['buy_count'].groupby(data['sub1_category']).sum().reset_index(name='sub1_counts')
sub_size['sub1_rank'] = sub_size['sub1_counts'].rank(ascending=False)
# barplot = OrderDfo.make_barplot('sub1_category', 'sub1_counts', sub_size)
return sub_size
@staticmethod
def item_count(data, category_count):
item_size = data['buy_count'].groupby([data['sub1_category'],data['sub2_category']]).sum().reset_index(name='sub2_counts')
item_size['sub2_rank'] = item_size['sub2_counts'].rank(ascending=False, method="dense")
category_item_rank = | pd.merge(category_count, item_size, on='sub1_category', how='right') | pandas.merge |
def hover(x):
index=x.find(".")
if index==-1: return x
else: return x[:index]
def morph(x):
index=x.find(".")
if index==-1: return ""
else: return "+"+x[index+1:]
def stransform(inputw):
if inputw.startswith("["):
return " ʔăḏōnāy"
elif len(inputw)>1 and inputw[0]==inputw[1]:
return "-"+inputw[0]+"-"+inputw[1:]
else:
return inputw
def septransform(inputw):
if inputw!="":
return " "
else:
return inputw
def beautify(phon):
if phon==" ":return "_"
return phon.replace("ḏ","d").replace("ḡ","g").replace("ṯ","t").replace("ḵ","x").replace("ʔ","ʾ").replace("ʕ","ʿ").replace("ₐ","a").replace("î","ī").replace("ê","ē").replace("ô","ō").replace("û","ū").replace("ᵒ","ŏ").replace("ᵉ","ĕ").replace("ᵃ","ă").replace("ᵊ","ᵉ").replace("ʸ","").replace("ˈ",'<sub id="s">́</sub>').replace(" "," ").replace(" -","-")
def repl(inputw):
text=beautify(inputw)
return text
##########################
#### Start here ########
##########################
import pandas as pd
print('Loading verses...\r',end="")
df=pd.read_csv("_data/bible.csv",sep="\t")
## df : hebtext number book chapter verse trchapter trverse trtext translit
## 0 1 2 3 4 5 6 7 8
print('Done.\r',end="")
print('Loading index...\r',end="")
ixv= | pd.read_csv("_data/indexv.csv",sep="\t",header=None) | pandas.read_csv |
import pandas as pd
medals = r'medals.csv'
medals_r = pd.read_csv(medals, header=0)
r = | pd.isnull(medals_r) | pandas.isnull |
"""Test if functions in ``executers.py`` are properly connecting to the database."""
import pandas as pd
from sqltools import executers
def test_run_query() -> None:
"""Test if ``run_query`` connects to database."""
expected_data = pd.DataFrame({"test": [1]})
query = "SELECT 1 test"
actual_data = executers.run_query(query)
| pd.testing.assert_frame_equal(expected_data, actual_data) | pandas.testing.assert_frame_equal |
import pandas as pd
import altair as alt
import ci_mapping
from ci_mapping import logger
from ci_mapping.utils.utils import flatten_lists
def annual_publication_increase(data, filename="annual_publication_increase"):
"""Annual increase of publications.
Args:
data (`pd.DataFrame`): MAG paper data.
filename (str): Name of the HTML file to store the plot.
"""
frames = []
for cat in data.type.unique():
frame = pd.DataFrame(
data[data.type == cat].groupby("year")["id"].count()
/ data[data.type == cat].groupby("year")["id"].count().iloc[0]
).reset_index()
frame = pd.DataFrame(frame).rename(index=str, columns={"id": "value"})
frame["type"] = cat
frames.append(frame)
df = pd.concat(frames)
# Plotting
alt.Chart(df).mark_line(point=True).encode(
alt.X("year", axis=alt.Axis(labelFontSize=12, titleFontSize=12)),
alt.Y("value", axis=alt.Axis(labelFontSize=12, titleFontSize=12)),
alt.Color("type", legend=alt.Legend(title="Category")),
tooltip=["year", "value", "type"],
).properties(
title="Annual publication increase (base year = 2000)"
).configure_legend(
titleFontSize=12, labelFontSize=12
).interactive().save(
f"{ci_mapping.project_dir}/reports/figures/{filename}.html"
)
logger.info(f"Stored {filename} plot.")
def annual_publication_count(data, filename="annual_publication_count"):
"""Annual number of publications.
Args:
data (`pd.DataFrame`): MAG paper data.
filename (str): Name of the HTML file to store the plot.
"""
frames = []
for cat in data.type.unique():
frame = pd.DataFrame(
data[data.type == cat].groupby("year")["id"].count()
).reset_index()
frame = pd.DataFrame(frame).rename(index=str, columns={"id": "value"})
frame["type"] = cat
frames.append(frame)
df = pd.concat(frames)
# Plotting
alt.Chart(df).mark_line(point=True).encode(
alt.X("year", axis=alt.Axis(labelFontSize=12, titleFontSize=12)),
alt.Y("value", axis=alt.Axis(labelFontSize=12, titleFontSize=12)),
alt.Color("type", legend=alt.Legend(title="Category")),
tooltip=["year", "value", "type"],
).properties(title="Annual number of publications").configure_legend(
titleFontSize=12, labelFontSize=12
).interactive().save(
f"{ci_mapping.project_dir}/reports/figures/{filename}.html"
)
logger.info(f"Stored {filename} plot.")
def annual_citation_sum(data, filename="annual_citation_sum"):
"""Sum of annual citations for CI and AI+CI.
Args:
data (`pd.DataFrame`): MAG paper data.
filename (str): Name of the HTML file to store the plot.
"""
df = pd.DataFrame(data.groupby(["year", "type"])["citations"].sum()).reset_index()
# Plotting
alt.Chart(df).mark_circle(opacity=1, stroke="black", strokeWidth=0.5).encode(
alt.X("year", axis=alt.Axis(labelAngle=0)),
alt.Y("type"),
alt.Size(
"citations",
scale=alt.Scale(range=[0, 1500]),
legend=alt.Legend(title="Citations"),
),
alt.Color("type", legend=None),
).properties(
width=780,
height=150,
title="Total citations for CI and AI+CI papers published in a year",
).save(
f"{ci_mapping.project_dir}/reports/figures/{filename}.html"
)
logger.info(f"Stored {filename} plot.")
def publications_by_affiliation_type(data, filename="publications_by_affiliation_type"):
"""
Share of publications in CI, AI+CI by industry and non-industry affiliations.
Args:
data (`pd.DataFrame`): Author-level affiliation data.
filename (str): Name of the HTML file to store the plot.
"""
frames = []
for (num, comp) in zip([0, 1], ["non-Industry", "Industry"]):
for cat in data.type.unique():
df = data[data.non_company == num].drop_duplicates("paper_id")
nominator = df[df.type == cat].groupby("year")["paper_id"].count()
try:
denominator = (
df[df.type == cat].groupby("year")["paper_id"].count().iloc[0]
)
except IndexError:
denominator = 0
frame = pd.DataFrame(nominator / denominator).reset_index()
frame = | pd.DataFrame(frame) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.optimize import minimize
__all__ = ["NelsonSiegel", "Vasicek"]
def nelson_siegel(theta0, theta1, theta2, kappa, maturities):
inverse_maturities = 1.0 / maturities
inverse_maturities[inverse_maturities == np.inf] = 0
yields = np.zeros(maturities.shape)
yields += theta0
yields += (
(theta1 + theta2)
* (1 - np.exp(-maturities / kappa))
* inverse_maturities
* kappa
)
yields -= theta2 * np.exp(-maturities / kappa)
return yields
def price(cashflows, zeros):
if isinstance(cashflows, pd.Series) and isinstance(zeros, pd.Series):
return (cashflows * zeros).sum()
return (cashflows * zeros).sum(axis=1)
def price_error(real_prices, fitted_prices):
return ((real_prices - fitted_prices) ** 2).sum()
def ns_error(x, real_prices, cashflows, maturities):
yields = nelson_siegel(*x, maturities=maturities)
zeros = np.exp(-maturities * yields)
fitted_prices = price(cashflows, zeros)
return price_error(real_prices, fitted_prices)
def ns_fit(real_prices, cashflows, maturities, x0=None):
x0 = x0 if x0 is not None else [0.0, 0.0, 0.0, 1.0]
return minimize(
ns_error, x0, args=(real_prices, cashflows, maturities), method="powell"
)
def forwards(df):
df["Forward"] = -np.log(df.Zero).diff() / df.Maturity.diff()
df.Forward.values[0] = df.Yield.values[0]
return df
class NelsonSiegel:
def __init__(self, theta0, theta1, theta2, kappa):
self.theta0 = theta0
self.theta1 = theta1
self.theta2 = theta2
self.kappa = kappa
def __repr__(self):
params = ",".join(f"{k}={v:.4f}" for k, v in vars(self).items())
return f"{self.__class__.__name__}({params})"
def dataframe(self, maturities):
yields = nelson_siegel(
self.theta0, self.theta1, self.theta2, self.kappa, maturities
)
return (
| pd.DataFrame() | pandas.DataFrame |
# CacheIntervals: Memoization with interval parameters
#
# Copyright (C) <NAME>
#
# This file is part of CacheIntervals.
#
# @author = '<NAME>'
# @email = '<EMAIL>'
import logging
from functools import reduce
import loguru
import numpy as np
import pandas as pd
import pendulum as pdl
import sqlite3
import time
import klepto
from datetime import date, datetime
from CacheIntervals import MemoizationWithIntervals
from CacheIntervals.utils.Timer import Timer
name_db_file_test1 = "../test/test1.sqlite"
delay = 2
def get_records(conn, name_table, period = pd.Interval(pd.Timestamp(2021, 1,1), pd.Timestamp(2021, 1, 31))):
time.sleep(delay)
query = f"Select * From {name_table} Where date(date) between date('{period.left.date()}') and date('{period.right.date()}')"
#query = f'Select * From {name_table} '
loguru.logger.debug(query)
df = pd.read_sql(query, conn)
return df
cache_itvls =MemoizationWithIntervals(
[], ['period'],
aggregation=pd.concat,
debug=True,
memoization=klepto.lru_cache(
maxsize=500,
cache=klepto.archives.dict_archive(),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
get_records_cached = cache_itvls(get_records)
cache_itvls_concat_with_tolerance = MemoizationWithIntervals(
[], ['period'],
aggregation=pd.concat,
debug=False,
memoization=klepto.lru_cache(
maxsize=500,
cache=klepto.archives.dict_archive(),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)),
rounding = pdl.today()-pdl.yesterday()
)
get_records_cached_with_tolerance_1day = cache_itvls_concat_with_tolerance(get_records)
def caching_with_tolerance():
with Timer() as timer_no_cache:
df_jan = get_records(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 1, 1), pd.Timestamp(2021, 1, 31)))
# activate caching
get_records_cached_with_tolerance_1day(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 1, 1),
pd.Timestamp(2021, 1, 31)))
df_jan_cached = None
with Timer() as timer_cache:
df_jan_cached = get_records_cached_with_tolerance_1day(cnx_file, "test1",
pd.Interval(pd.Timestamp(2021, 1, 1), pd.Timestamp(2021, 2, 1)))
loguru.logger.debug(f'\n{df_jan_cached.sort_values(by="date")}')
assert timer_cache.interval < timer_no_cache.interval
def accesss_cached_function():
get_records_cached(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 2, 1), pd.Timestamp(2021, 3, 31)))
f_cached = get_records_cached(cnx_file, "test1", get_function_cachedQ=True)
return f_cached.info().hit, f_cached.info().miss, f_cached.info().load
########################################################################################################
#
# Testing caching with aggregation-type operations
#
########################################################################################################
def agg_cumul(listdf):
loguru.logger.debug(f'list dfs:{listdf}')
listdf = [df for df in listdf if not (df is None) and not (df.empty)]
if len(listdf):
df = reduce(lambda x, y: x.add(y, fill_value=0), listdf)
else:
raise Exception("Nothing to aggregate")
return df
def cumulate_records(conn, name_table, period=pd.Interval(pd.Timestamp(2021, 1, 1), pd.Timestamp(2021, 1, 31))):
time.sleep(delay) # simulating a long SQL request
query = f"Select currency, sum(amount_in_eur) " \
f"From {name_table} " \
f"Where date(date) >= date('{period.left.date()}') and date(date) < date('{period.right.date()}')" \
f"Group by currency"
loguru.logger.debug(query)
df = pd.read_sql(query, conn)
df = df.set_index('currency', drop=True)
df.columns = ['total']
df['total'] = df['total'].astype(float)
return df
cache_itvls_agg = MemoizationWithIntervals(
[],
['period'],
aggregation=agg_cumul,
debug=True,
memoization=klepto.lru_cache(
maxsize=500,
cache=klepto.archives.dict_archive(),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)),
subintervals_requiredQ=True # extra-kwarg are passed to RecordInterval constructor
)
cumulate_records_cached = cache_itvls_agg(cumulate_records)
def caching_aggregation():
with Timer() as timer_no_cache:
df_janmar = cumulate_records(cnx_file,
"test1",
pd.Interval(pd.Timestamp(2021, 1, 1), pd.Timestamp(2021, 4, 1)))
# activate caching
df_jan = cumulate_records_cached(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 1, 1),
pd.Timestamp(2021, 2, 1)))
df_febmar = cumulate_records_cached(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 2, 1),
pd.Timestamp(2021, 4, 1)))
with Timer() as timer_cache:
df_janmar_cached = cumulate_records_cached(cnx_file,
"test1",
pd.Interval(pd.Timestamp(2021, 1, 1),
pd.Timestamp(2021, 4, 1)))
loguru.logger.debug(f'no cache: \n{df_janmar}')
loguru.logger.debug(f'cached: \n{df_janmar_cached}')
loguru.logger.debug(f'jan: \n{df_jan}')
loguru.logger.debug(f'feb-mar:\n{df_febmar}')
df_compare = pd.concat({'nocache': df_janmar, 'cache' : df_janmar_cached}, axis=1)
df_compare = df_compare.assign(zediff = lambda x: x[('cache', 'total')] - x[('nocache', 'total')])
df_compare = df_compare.assign(zediff = lambda x: x.zediff.apply(abs))
loguru.logger.debug(f'diff :\n{df_compare[df_compare.zediff>1]}')
assert np.isclose(df_janmar.total, df_janmar_cached.total, 0.1).all()
assert timer_cache.interval < timer_no_cache.interval
if __name__ == '__main__':
import logging
import daiquiri
daiquiri.setup(level=logging.DEBUG)
name_csv_test1 = "test1.gz"
cnx_file = sqlite3.connect(name_db_file_test1)
if False:
df = pd.read_sql('Select * from test1', cnx_file)
loguru.logger.debug(f'DB content:\n{df[df.date<"2021-01-04"].groupby(["date","currency"]).sum()}')
if False:
df = get_records(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 1,1), pd.Timestamp(2021, 1, 31)))
loguru.logger.debug(f'\n{df}')
df = get_records_cached(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 1,1), pd.Timestamp(2021, 1, 31)))
df = get_records_cached(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 1,1), pd.Timestamp(2021, 1, 31)))
loguru.logger.debug(f'\n{df}')
if False:
caching_with_tolerance()
if False:
hits, miss, load = accesss_cached_function()
loguru.logger.info(f'hits: {hits}, miss: {miss}, load: {load}')
if True:
df1 = cumulate_records(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 1,1), pd.Timestamp(2021, 2, 1)))
loguru.logger.debug(f'Jan 1st result: {df1}')
df2 = cumulate_records(cnx_file, "test1", pd.Interval( | pd.Timestamp(2021, 2, 1) | pandas.Timestamp |
import pandas as pd
import abc
from typing import Iterable
from datetime import datetime
class TableWriter(abc.ABC):
@abc.abstractmethod
def __init__(self, rows: Iterable[Iterable]) -> None:
pass
@abc.abstractmethod
def set_rows(self, rows: Iterable[Iterable]) -> None:
pass
@abc.abstractmethod
def write_to_file(self, file_path: str, header_row: Iterable[str]):
pass
class ExcelWriter(TableWriter):
DEFAULT_FILE_PATH: str = f"./saved_documents/{datetime.now().strftime('%Y-%m-%dT%H-%M-%S')}.xlsx"
def __init__(self, rows: Iterable[Iterable]) -> None:
super().__init__(rows)
self.__rows: Iterable[Iterable] = rows
self.__dataframe: pd.DataFrame = pd.DataFrame(data=rows)
def set_rows(self, rows: Iterable[Iterable]) -> None:
self.__rows = rows
self.__dataframe = | pd.DataFrame(data=rows) | pandas.DataFrame |
# pylint: skip-file
# Pylint said Maximum recursion depth exceeded
import sys
import click
from pathlib import Path
@click.command()
@click.argument('input-path', type=click.Path(exists=True))
@click.argument('checkpoint-path', type=click.Path(exists=True, dir_okay=False))
@click.argument('output-dir', type=click.Path())
@click.argument('landmarks-path', type=click.Path())
@click.argument('num-iterations', type=int)
@click.argument('csv-path', type=click.Path())
@click.option('--batch-size', '-b', type=int, default=6, show_default=True)
@click.option('--num-workers', '-j', type=int, default=12, show_default=True)
@click.option('--gpu/--cpu', default=True, show_default=True)
@click.option('--threshold/--no-threshold', default=False, show_default=True)
@click.option('--augmentation/--no-augmentation', default=True, show_default=True) # whether to use same augmentation as the one during training
@click.option('--save-volumes/--no-save-volumes', '-v', default=True, show_default=True)
@click.option('--interpolation', default='bspline', type=click.Choice(['linear', 'bspline']), show_default=True)
@click.option('--std-noise', default=0, type=float)
def main(
input_path,
checkpoint_path,
output_dir,
landmarks_path,
num_iterations,
csv_path,
batch_size,
num_workers,
gpu,
threshold,
augmentation,
save_volumes,
interpolation,
std_noise,
):
import torch
import pandas as pd
import numpy as np
import torchio as tio
from tqdm import tqdm
import models
device = torch.device('cuda' if torch.cuda.is_available() and gpu else 'cpu')
checkpoint = torch.load(checkpoint_path, map_location=device)
model = models.get_unet().to(device)
model.load_state_dict(checkpoint['model'])
output_dir = Path(output_dir)
model.eval()
torch.set_grad_enabled(False)
fps = get_paths(input_path)
mean_dir = output_dir / 'mean'
std_dir = output_dir / 'std'
# entropy_dir = output_dir / 'entropy'
mean_dir.mkdir(parents=True, exist_ok=True)
std_dir.mkdir(parents=True, exist_ok=True)
# entropy_dir.mkdir(parents=True, exist_ok=True)
records = []
progress = tqdm(fps, unit='subject')
for fp in progress:
subject_id = fp.name[:4]
progress.set_description(subject_id)
image = tio.ScalarImage(fp)
subject = tio.Subject(image=image) # key must be 'image' as in get_test_transform
transform = get_transform(augmentation, landmarks_path)
dataset = tio.SubjectsDataset(num_iterations * [subject], transform=transform)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
collate_fn=lambda x: x,
)
all_results = []
for subjects_list_batch in tqdm(loader, leave=False, unit='batch'):
inputs = torch.stack([subject.image.data for subject in subjects_list_batch]).float().to(device)
with torch.cuda.amp.autocast():
segs = model(inputs).softmax(dim=1)[:, 1:].cpu()
iterable = list(zip(subjects_list_batch, segs))
for subject, seg in tqdm(iterable, leave=False, unit='subject'):
subject.image.set_data(seg)
inverse_transform = subject.get_inverse_transform(warn=False)
inverse_transforms = inverse_transform.transforms
first = inverse_transforms[0]
if hasattr(first, 'image_interpolation') and first.image_interpolation != 'linear':
first.image_interpolation = 'linear' # force interp to be lin so probs stay in [0,1]
subject_back = inverse_transform(subject)
result = subject_back.image.data
assert np.count_nonzero(result.numpy() < 0) == 0, 'neg values found in result'
if threshold:
result = (result >= 0.5).float()
all_results.append(result)
result = torch.stack(all_results)
volumes = result.sum(dim=(-3, -2, -1)).numpy()
mean_volumes = volumes.mean()
std_volumes = volumes.std()
volume_variation_coefficient = std_volumes / mean_volumes
q1, q3 = np.percentile(volumes, (25, 75))
quartile_coefficient_of_dispersion = (q3 - q1) / (q3 + q1)
record = dict(
Subject=subject_id,
VolumeMean=mean_volumes,
VolumeSTD=std_volumes,
VVC=volume_variation_coefficient,
Q1=q1,
Q3=q3,
QCD=quartile_coefficient_of_dispersion,
)
if save_volumes:
for i, volume in enumerate(volumes):
record[f'Volume_{i}'] = volume
records.append(record)
mean = result.mean(dim=0)
std = result.std(dim=0)
# entropy = utils.get_entropy(result)
mean_image = tio.ScalarImage(tensor=mean, affine=image.affine)
std_image = tio.ScalarImage(tensor=std, affine=image.affine)
# entropy_image = tio.ScalarImage(tensor=entropy, affine=image.affine)
mean_path = mean_dir / fp.name.replace('.nii', '_mean.nii')
std_path = std_dir / fp.name.replace('.nii', '_std.nii')
# entropy_path = entropy_dir / fp.name.replace('.nii', '_entropy.nii')
mean_image.save(mean_path)
std_image.save(std_path)
# entropy_image.save(entropy_path)
# So it's updated during execution
df = | pd.DataFrame.from_records(records) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
class TestNumberScalar(object):
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.bool(False))
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number('x')
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64('2011-01-01'))
assert not is_number(Timestamp('2011-01-01'))
assert not is_number(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta('1 days'))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, 'D'))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(np.bool(False))
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool('x')
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, 'D'))
assert not is_bool(Timedelta('1 days'))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(np.bool(False))
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer('x')
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta('1 days'))
# questionable
assert is_integer(np.timedelta64(1, 'D'))
def test_is_float(self):
assert is_float(1.1)
assert is_float(np.float64(1.1))
assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
assert not is_float(1 + 3j)
assert not is_float(np.bool(False))
assert not is_float(np.bool_(False))
assert not is_float(np.int64(1))
assert not is_float(np.complex128(1 + 3j))
assert not is_float(None)
assert not is_float('x')
assert not is_float(datetime(2011, 1, 1))
assert not is_float(np.datetime64('2011-01-01'))
assert not is_float(Timestamp('2011-01-01'))
assert not is_float(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_float(timedelta(1000))
assert not is_float(np.timedelta64(1, 'D'))
assert not is_float(Timedelta('1 days'))
def test_is_datetime_dtypes(self):
ts = pd.date_range('20130101', periods=3)
tsa = pd.date_range('20130101', periods=3, tz='US/Eastern')
assert is_datetime64_dtype('datetime64')
assert is_datetime64_dtype('datetime64[ns]')
assert is_datetime64_dtype(ts)
assert not is_datetime64_dtype(tsa)
assert not is_datetime64_ns_dtype('datetime64')
assert is_datetime64_ns_dtype('datetime64[ns]')
assert is_datetime64_ns_dtype(ts)
assert is_datetime64_ns_dtype(tsa)
assert is_datetime64_any_dtype('datetime64')
assert is_datetime64_any_dtype('datetime64[ns]')
assert is_datetime64_any_dtype(ts)
assert is_datetime64_any_dtype(tsa)
assert not is_datetime64tz_dtype('datetime64')
assert not is_datetime64tz_dtype('datetime64[ns]')
assert not is_datetime64tz_dtype(ts)
assert is_datetime64tz_dtype(tsa)
for tz in ['US/Eastern', 'UTC']:
dtype = 'datetime64[ns, {}]'.format(tz)
assert not is_datetime64_dtype(dtype)
assert is_datetime64tz_dtype(dtype)
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_any_dtype(dtype)
def test_is_timedelta(self):
assert is_timedelta64_dtype('timedelta64')
assert is_timedelta64_dtype('timedelta64[ns]')
assert not is_timedelta64_ns_dtype('timedelta64')
assert is_timedelta64_ns_dtype('timedelta64[ns]')
tdi = TimedeltaIndex([1e14, 2e14], dtype='timedelta64[ns]')
assert is_timedelta64_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi.astype('timedelta64[ns]'))
# Conversion to Int64Index:
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64'))
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64[h]'))
class TestIsScalar(object):
def test_is_scalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
assert is_scalar(Number())
assert is_scalar(Fraction())
assert is_scalar(0.)
assert is_scalar(np.nan)
assert is_scalar('foobar')
assert is_scalar(b'foobar')
assert is_scalar(u('efoobar'))
assert is_scalar(datetime(2014, 1, 1))
assert is_scalar(date(2014, 1, 1))
assert is_scalar(time(12, 0))
assert is_scalar(timedelta(hours=1))
assert is_scalar(pd.NaT)
def test_is_scalar_builtin_nonscalars(self):
assert not is_scalar({})
assert not is_scalar([])
assert not is_scalar([1])
assert not is_scalar(())
assert not is_scalar((1, ))
assert not is_scalar(slice(None))
assert not is_scalar(Ellipsis)
def test_is_scalar_numpy_array_scalars(self):
assert is_scalar(np.int64(1))
assert is_scalar(np.float64(1.))
assert is_scalar(np.int32(1))
assert is_scalar(np.object_('foobar'))
assert is_scalar(np.str_('foobar'))
assert is_scalar(np.unicode_(u('foobar')))
assert is_scalar(np.bytes_(b'foobar'))
assert is_scalar(np.datetime64('2014-01-01'))
assert is_scalar(np.timedelta64(1, 'h'))
def test_is_scalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
assert not is_scalar(zerodim)
assert is_scalar(lib.item_from_zerodim(zerodim))
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scalar_numpy_arrays(self):
assert not is_scalar(np.array([]))
assert not is_scalar(np.array([[]]))
assert not is_scalar(np.matrix('1; 2'))
def test_is_scalar_pandas_scalars(self):
assert is_scalar(Timestamp('2014-01-01'))
assert is_scalar(Timedelta(hours=1))
assert is_scalar(Period('2014-01-01'))
assert is_scalar(Interval(left=0, right=1))
assert is_scalar(DateOffset(days=1))
def test_is_scalar_pandas_containers(self):
assert not is_scalar(Series())
assert not is_scalar(Series([1]))
assert not is_scalar(DataFrame())
assert not is_scalar( | DataFrame([[1]]) | pandas.DataFrame |
import streamlit as st
import plotly_express as px
import pandas as pd
from plotnine import *
from plotly.tools import mpl_to_plotly as ggplotly
import numpy as np
import math
import scipy.stats as ss
from scipy.stats import *
def app():
# add a select widget to the side bar
st.sidebar.subheader("Discrete Probaility")
prob_choice = st.sidebar.radio("",["Discrete Probability","Binomial Probability","Geometric Probability","Poisson Probability"])
st.markdown('Discrete Probability')
if prob_choice == "Discrete Probability":
top = st.columns((1,1,2))
bottom = st.columns((1,1))
with top[0]:
#st.subheader("Discrete Probaility")
gs_URL = st.session_state.gs_URL
googleSheetId = gs_URL.split("spreadsheets/d/")[1].split("/edit")[0]
worksheetName = st.text_input("Sheet Name:","Discrete")
URL = f'https://docs.google.com/spreadsheets/d/{googleSheetId}/gviz/tq?tqx=out:csv&sheet={worksheetName}'
if st.button('Refresh'):
df = pd.read_csv(URL)
df = df.dropna(axis=1, how="all")
df = pd.read_csv(URL)
df = df.dropna(axis=1, how="all")
with bottom[0]:
st.dataframe(df)
global numeric_columns
global non_numeric_columns
try:
numeric_columns = list(df.select_dtypes(['float', 'int']).columns)
non_numeric_columns = list(df.select_dtypes(['object']).columns)
except Exception as e:
print(e)
st.write("Please upload file to the application.")
with top[1]:
x_axis = st.selectbox('X-Axis', options=numeric_columns, index=0)
prob = st.selectbox('Probabilities', options=numeric_columns, index = 1)
cat = 0
if len(non_numeric_columns) >= 1:
cat = 1
#cv = st.selectbox("Group", options=list(df[non_numeric_columns[0]].unique()))
if cat == 0:
x = df[x_axis]
p_x = df[prob]
m = sum(x*p_x)
sd = math.sqrt(sum((x-m)**2*p_x))
data = pd.DataFrame({"Mean":m,"Std Dev":sd},index = [0])
with top[2]:
dph = ggplot(df) + geom_bar(aes(x=df[df.columns[0]],weight=df[df.columns[1]]),color="darkblue", fill="lightblue")
st.pyplot(ggplot.draw(dph))
with bottom[1]:
st.write(data)
if cat != 0:
with bottom[1]:
data = pd.DataFrame(columns = ['Type','Mean','Standard Deviation'])
drow = 0
for type in list(df[non_numeric_columns[0]].unique()):
df1 = df[df[non_numeric_columns[0]]==type]
x = df1[x_axis]
p_x = df1[prob]
data.loc[drow,'Type'] = type
m = sum(x*p_x)
data.loc[drow,'Mean'] = m
data.loc[drow,'Standard Deviation'] = math.sqrt(sum((x-m)**2*p_x))
drow = +1
st.dataframe(data)
with top[2]:
dph = ggplot(df) + geom_bar(aes(x=df[x_axis],weight=df[prob],fill=non_numeric_columns[0],color=non_numeric_columns[0]),position= "identity", alpha = .4)
st.pyplot(ggplot.draw(dph))
if prob_choice == "Binomial Probability":
top = st.columns(2)
with top[0]:
st.subheader("Binomial Probability")
bip, bit, bih = st.text_input("Hit Probability:",.2),st.text_input("Tries:",8),st.text_input("Hits:",0)
bit = int(bit)
bip = float(bip)
biah = np.r_[0:bit+1]
cdf = binom.cdf(biah,bit,bip)
pmf = binom.pmf(biah,bit,bip)
biah = pd.DataFrame(biah)
cdf = pd.DataFrame(cdf)
pmf = | pd.DataFrame(pmf) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Series,
Timestamp,
isna,
notna,
)
import pandas._testing as tm
class TestSeriesClip:
def test_clip(self, datetime_series):
val = datetime_series.median()
assert datetime_series.clip(lower=val).min() == val
assert datetime_series.clip(upper=val).max() == val
result = datetime_series.clip(-0.5, 0.5)
expected = np.clip(datetime_series, -0.5, 0.5)
tm.assert_series_equal(result, expected)
assert isinstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [
Series([np.nan, 1.0, 2.0, 3.0]),
Series([None, "a", "b", "c"]),
Series(pd.to_datetime([np.nan, 1, 2, 3], unit="D")),
]
for s in sers:
thresh = s[2]
lower = s.clip(lower=thresh)
upper = s.clip(upper=thresh)
assert lower[notna(lower)].min() == thresh
assert upper[notna(upper)].max() == thresh
assert list(isna(s)) == list(isna(lower))
assert list(isna(s)) == list(isna(upper))
def test_series_clipping_with_na_values(self, any_numeric_ea_dtype, nulls_fixture):
# Ensure that clipping method can handle NA values with out failing
# GH#40581
if nulls_fixture is pd.NaT:
# constructor will raise, see
# test_constructor_mismatched_null_nullable_dtype
return
ser = Series([nulls_fixture, 1.0, 3.0], dtype=any_numeric_ea_dtype)
s_clipped_upper = ser.clip(upper=2.0)
s_clipped_lower = ser.clip(lower=2.0)
expected_upper = Series([nulls_fixture, 1.0, 2.0], dtype=any_numeric_ea_dtype)
expected_lower = Series([nulls_fixture, 2.0, 3.0], dtype=any_numeric_ea_dtype)
tm.assert_series_equal(s_clipped_upper, expected_upper)
tm.assert_series_equal(s_clipped_lower, expected_lower)
def test_clip_with_na_args(self):
"""Should process np.nan argument as None"""
# GH#17276
s = Series([1, 2, 3])
tm.assert_series_equal(s.clip(np.nan), Series([1, 2, 3]))
tm.assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3]))
# GH#19992
tm.assert_series_equal(s.clip(lower=[0, 4, np.nan]), Series([1, 4, 3]))
tm.assert_series_equal(s.clip(upper=[1, np.nan, 1]), Series([1, 2, 1]))
# GH#40420
s = Series([1, 2, 3])
result = s.clip(0, [np.nan, np.nan, np.nan])
tm.assert_series_equal(s, result)
def test_clip_against_series(self):
# GH#6966
s = Series([1.0, 1.0, 4.0])
lower = Series([1.0, 2.0, 3.0])
upper = Series([1.5, 2.5, 3.5])
tm.assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
tm.assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])])
def test_clip_against_list_like(self, inplace, upper):
# GH#15390
original = Series([5, 6, 7])
result = original.clip(upper=upper, inplace=inplace)
expected = Series([1, 2, 3])
if inplace:
result = original
tm.assert_series_equal(result, expected, check_exact=True)
def test_clip_with_datetimes(self):
# GH#11838
# naive and tz-aware datetimes
t = Timestamp("2015-12-01 09:30:30")
s = Series([Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:31:00")])
result = s.clip(upper=t)
expected = Series(
[Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:30:30")]
)
tm.assert_series_equal(result, expected)
t = Timestamp("2015-12-01 09:30:30", tz="US/Eastern")
s = Series(
[
Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
Timestamp("2015-12-01 09:31:00", tz="US/Eastern"),
]
)
result = s.clip(upper=t)
expected = Series(
[
Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
Timestamp("2015-12-01 09:30:30", tz="US/Eastern"),
]
)
tm.assert_series_equal(result, expected)
def test_clip_with_timestamps_and_oob_datetimes(self):
# GH-42794
ser = Series([datetime(1, 1, 1), datetime(9999, 9, 9)])
result = ser.clip(lower=Timestamp.min, upper=Timestamp.max)
expected = Series([Timestamp.min, Timestamp.max], dtype="object")
tm.assert_series_equal(result, expected)
def test_clip_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
ser = Series([1, 2, 3])
msg = (
r"In a future version of pandas all arguments of Series.clip except "
r"for the arguments 'lower' and 'upper' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.clip(0, 1, 0)
expected = | Series([1, 1, 1]) | pandas.Series |
import os
import re
import json
import numpy as np
import pandas as pd
import operator
import base64
os.environ['DJANGO_SETTINGS_MODULE'] = 'zazz_site.settings'
import django
django.setup()
from django.core.exceptions import ObjectDoesNotExist
from django.core import serializers
from zazz import models
from time import gmtime, strftime
from functools import reduce
from itertools import product
from collections import OrderedDict, defaultdict
from zazz.models import Samples
print ('OFFLINE:')
g = {
}
import_errors = defaultdict(int)
'''
class Mutations(models.Model):
vep = models.ManyToManyField(to="VEP")
name = models.CharField(null=False, max_length=100)
alternative = models.CharField(null=True, max_length=100)
reference = models.CharField(null=True, max_length=100)
this_type = models.CharField(null=False, choices=[('name', 'GENERIC'), ('rs_name', 'rs'), ('hgvs_name', 'hgvs')], max_length=100)
'''
class ZazzException(Exception):
def set_info(self, info):
self.info = info
def convert_to_base64(s):
return base64.b64encode(bytes(s, encoding='ascii')).decode()
def decode_base64_json(s):
return json.loads(base64.b64decode(s.replace('_', '=')))
def print_now():
return strftime("%Y-%m-%d %H:%M:%S", gmtime())
def get_model(name):
return getattr(models, name)
def create_field_parameters(parameters):
return ', '.join(['{k} = {v}'.format(k=k,v=v) for k,v in parameters.items()])
def create_field(field):
# if field['type'] in ['MultiSelectField']:
# this_models = ''
# else:
# this_models = 'models.'
this_models = 'models.'
return ' {name} = {this_models}{type_}({parameters})'.format(
name=field['name'].replace(' ', '_'),
this_models=this_models,
type_ = field['type'],
parameters = create_field_parameters(field['parameters']),
)
def create_fields(fields):
return '\n'.join([create_field(field) for field in fields])
def get_table_pattern():
table_pattern = '''
class {table}(models.Model):
{meta_val}
{fields_val}
'''
return table_pattern
def table_pattern_f(table, fields_val, meta_val=''):
table_pattern = get_table_pattern()
return table_pattern.format(table=table, fields_val=fields_val, meta_val=meta_val)
def create_external(external):
#Create main
table = external['name']
#fields_keys = [x for x in external['fields'] if x['name'] in external['keys']]
fields_keys = external['fields']
fields_val = create_fields(fields_keys)
ret = table_pattern_f(table=table, fields_val=fields_val)
#Create secondary
return ret
def create_externals(externals):
'''
externals = [
{'name': 'Clinvar', 'filename': 'clinvar.csv', 'type': 'csv', 'fields':
[
{'name': 'Chromosome', 'type': 'CharField', 'parameters': {'max_length': '100'}},
{'name': 'Position', 'type': 'IntegerField', 'parameters': {}},
{'name': 'Clinical Significance', 'type': 'CharField', 'parameters': {'max_length': '100'}},
],
'keys': ['Chromosome', 'Position'],
},
]
'''
return '\n'.join(map(create_external, externals))
def create_table(table, fields, externals):
'''
table: Name of main table
fields: list fields that describe the database
'''
Many2ManyTables = {}
for field in fields:
#if field.get('table', False):
if field.get('database', False) == 'multi_1':
f_table = field['table']
if not f_table in Many2ManyTables:
Many2ManyTables[f_table] = []
Many2ManyTables[f_table].append(field)
'''
Many2ManyTables is a dictionary.
keys: are name of tables that we group fields together
values is a list of these fields
'''
# Transform Many2ManyTables to django tables format
Many2ManyTables_text = '\n'.join([table_pattern_f(k,create_fields(v)) for k,v in Many2ManyTables.items()])
# Add the "normal" fields (not Many2Many)
new_fields = [field for field in fields if field.get('database', False) != 'multi_1']
#Add fields for ManyToMany
#The main table needs to have a ManytoMany relationship with the Samples table
new_fields += [{'name': k, 'type': 'ManyToManyField', 'parameters': {'to': k}} for k,v in Many2ManyTables.items()]
#We also need to add a "raw" field for each many2many relationship
#We may have to remove this on the furture!
for k,v in Many2ManyTables.items():
for f in v:
# f = {'name': 'Sift', 'col_name': 'sift', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 20, 'database': 'multi', 'l': <function import_annotated_vcf.<locals>.<lambda> at 0x116418488>, 'l_multi': <function splitUnique.<locals>.f at 0x116418510>, 'table': 'Transcripts', 'order': 21}
#print (f)
field_to_add = dict(f)
# All raw fields should be CharFields !
if field_to_add['type'] != 'CharField':
field_to_add['type'] = 'CharField'
field_to_add['parameters']['max_length'] = '200'
field_to_add['name'] += '_raw'
new_fields.append(field_to_add)
# Create a multi field index
meta_val = '''
class Meta:
indexes = [
models.Index(
fields=['Chromosome', 'Position', 'Reference', 'Alternative'],
name='sample_idx',
),
]
'''
table_text = table_pattern_f(table=table, fields_val = create_fields(new_fields), meta_val=meta_val)
# print (table_text)
# a=1/0
models_pattern = '''
from django.db import models
# from multiselectfield import MultiSelectField
# Create your models here.
{Many2ManyTables}
class Data(models.Model):
field = models.CharField(null=True, max_length=200)
{table}
{externals}
'''
externals_text = create_externals(externals)
models_text = models_pattern.format(table=table_text, Many2ManyTables=Many2ManyTables_text, externals=externals_text)
print ('NEW MODELS:')
print (models_text)
print ('Saving to zazz/models.py..')
with open('zazz/models.py', 'w') as f:
f.write(models_text)
print ('..DONE')
print ('Running: python manage.py makemigrations ...')
command = 'python manage.py makemigrations zazz'
os.system(command)
print (' ..DONE')
print ('Running: python manage.py migrate')
command = 'python manage.py migrate'
os.system(command)
print(' ..DONE')
#print (Data.objects.all())
#df = pd.read_excel('annotations_zaganas.xlsx')
#print (df[:3])
#print ()
#print ("python manage.py makemigrations")
#print ("python manage.py migrate")
def create_js_field(field):
'''
IGNORE = DO NOT SHOW IN UI
'''
pattern = "{{'name': '{name}', 'type': '{type}', 'selected': false, 'e_order': -1, 'database': '{database}', {special}{renderer}{table}{xUnits}{order}{include} }}"
database = field.get('database', 'normal');
xUnits = ''
if field.get('component') == 'freetext':
type_ = 'freetext'
special = "'text' : ''" # The ng-model
elif field.get('component') == 'ignore':
type_ = 'ignore'
special = "'text' : ''" # The ng-model
elif field['type'] in ['CharField', 'ManyToManyField']:
type_ = 'checkbox'
special = "'itemArray': [], 'selected2': ['ALL']"
elif field['type'] in ['IntegerField', 'FloatField']:
type_ = 'slider'
special = ''''slider': {
'min': 30,
'max': 70,
'options': {
'floor': 1,
'ceil': 100,
'disabled': true,
'onEnd' : function (sliderId, modelValue, highValue, pointerType) {
console.log('Slider changed');
//console.log(modelValue); // This the min
//console.log(highValue); // This is the max
$scope.update_table();
}
},
}'''
if field['type'] == 'IntegerField':
if not 'xUnits' in field:
raise ZazzException('xUnit missing from IntegerField')
xUnits = ", 'xUnits': " + str(field['xUnits'])
elif field['type'] == 'ForeignKey':
type_ = 'checkbox'
special = "'itemArray': [], 'selected2': ['ALL']"
else:
raise ZazzException('Unknown field: {}'.format(field['type']))
if 'renderer' in field:
renderer = ", 'renderer': " + field['renderer']
else:
renderer = ''
if 'table' in field:
table = ", 'table': '" + field['table'] + "'"
else:
table = ''
if 'order' in field:
order = ", 'order': " + str(field['order'])
else:
order = ''
if 'include' in field:
include = ", 'include': " + str(field['include'])
else:
include = ''
values = {
'name': field['name'],
'type': type_,
'special': special,
'database': database,
'renderer': renderer,
'table': table,
'order': order,
'include': include,
'xUnits': xUnits,
}
return pattern.format(**values)
def create_js_fields(fields):
return ',\n'.join([create_js_field(x) for x in fields])
def create_js(fields):
'''
$scope.fields = [
//{'name': 'sample', 'type': 'checkbox', 'selected': false, 'itemArray': [{id: 1, name: ''}], 'selected2': {'value': {id: 1, name: ''}} },
{'name': 'sample', 'type': 'checkbox', 'selected': false, 'itemArray': [], 'selected2': ['ALL'], 'e_order': -1 },
{'name': 'Bases', 'type': 'slider', 'selected': false, 'slider': {
'min': 30,
'max': 70,
'options': {
'floor': 1,
'ceil': 100,
'disabled': true,
'onEnd' : function (sliderId, modelValue, highValue, pointerType) {
console.log('Slider changed');
//console.log(modelValue); // This the min
//console.log(highValue); // This is the max
$scope.update_table();
}
},
},
'e_order': -1},
{'name':'Barcode_Name', 'type':'checkbox', 'selected': false, 'itemArray': [], 'selected2': ['ALL'], 'e_order': -1 }
];
'''
print ('JAVASCRIPT:')
fields_val = f'$scope.fields=[{create_js_fields(fields)}];'
print (fields_val)
# Add fields javascript object in angular controller
z_zazz_ctrl_fn = 'zazz/static/zazz/zazz_Ctrl.js'
with open(z_zazz_ctrl_fn) as f:
z_zazz_ctrl = f.read()
z_zazz_ctrl_new = re.sub(
r'// FIELDS BEGIN\n.+\n// FIELDS END\n',
f'// FIELDS BEGIN\n{fields_val}\n// FIELDS END\n',
z_zazz_ctrl,
flags=re.DOTALL )
with open(z_zazz_ctrl_fn, 'w') as f:
f.write(z_zazz_ctrl_new + '\n')
print ('Javed javascript at:', z_zazz_ctrl_fn)
def is_dataframe(data):
'''
Return true if data is a pandas dataFrame
'''
return type(data) is pd.DataFrame
def chromosome_unifier(chromosome):
'''
All chromosome input should pass from this function.
Chromosome can be declared in multiple ways.. "1", chr1, chr01, ...
Here we make sure that all chromosome values are in the form chr1, chr2, chrX, chrY
'''
# "15" --> chr15
if re.match(r'^\d+$', chromosome):
return 'chr' + chromosome
if re.match(r'^chr[\dXY]+$', chromosome):
return chromosome
if chromosome.upper() in ['X', 'Y']:
return 'chr' + chromosome.lower()
raise ZazzException(f'Unknown Chromosome value: ->{chromosome}<-')
def get_value_from_record(field, record, line_index):
'''
Extract the value that is present in the record and is described in the field
field : Any item in fields list. field is a dictionary
record: Any item in input data.
DUPLICATE CODE!!
FIX ME!!
'''
if not field['col_name'] in record:
message = '{} does not exist in record\n'.format(field['col_name'])
message += 'Available columns:\n'
message += '\n'.join(record.keys()) + '\n'
raise ZazzException(message)
try:
if 'line_l' in field:
value = field['line_l'](record)
elif 'l' in field:
value = field['l'](record[field['col_name']])
else:
value = record[field['col_name']]
except ZazzException as t_exception:
e_message = str(t_exception)
e_info = t_exception.info
import_errors[e_message] += 1
value = None
except Exception as e:
print ('Record:')
print (record)
print ('Index:', line_index)
raise e
return value
def get_key_from_record(field):
'''
Get the name of the key of the record
'''
key = field['name']
if field.get('database', '') == 'multi_2':
pass
elif field.get('database', '') == 'multi_1':
key = field['name'] + '_raw'
return key
def create_m2m_table(schema, table):
'''
Create a dictionary with all the Many2Many tables.
Example: {'phylop', 'pfam', 'drugbank', 'go', 'dbsnp', 'omim', 'cosmic', 'Transcripts'}
key: multi_1 table
values: list with all column names.
'''
m2m_tables = defaultdict(list)
for field in schema:
if field.get('database', '') == 'multi_1':
#m2m_tables.add(field.get('table', table))
m2m_tables[field.get('table', table)].append(field)
return m2m_tables
def get_multi_1_records(m2m_tables, record, ):
'''
example of field:
{'name': 'ANN_GeneDetail_refGene', 'col_name': 'GeneDetail.refGene', 'type': 'CharField', 'parameters': {'max_length': '500', 'null': 'True'}, 'database': 'multi_1', 'table': 'ANN_GeneDetail_refGene', 'l_multi': lambda x : x.replace('\\x3d', '=').split('\\x3b'), 'order': 38},
Returns:
ret:
{
'nameof_m2m_tale' : {
m2m_field_1: [list of values],
m2m_field_2: [list of values],
}
}
ret_raw:
{
'nameof_m2m_tale' : {
m2m_field_1: raw_values,
m2m_field_2: raw_values,
}
}
'''
ret = defaultdict(dict)
ret_raw = defaultdict(dict)
for m2m_table_key, m2m_table_value in m2m_tables.items():
for field in m2m_table_value:
#print ('*** FIELD: ***')
#print (field)
unsplitted = record[field['col_name']]
splited_values = field['l_multi'](unsplitted)
ret[m2m_table_key][field['name']] = splited_values
if 'l_raw_multi' in field:
ret_raw[m2m_table_key][field['name'] + '_raw'] = field['l_raw_multi'](splited_values)
else:
ret_raw[m2m_table_key][field['name'] + '_raw'] = unsplitted
#print (ret)
#a=1/0
return ret, ret_raw
def create_attribute_records(record_list):
'''
record_list:
{'k': [1,2,3], 'l': [4,5,6]}
RETURNS:
[{'k': 1, 'l': 4}, {'k': 2, 'l': 5}, {'k':3, 'l': 6}]
'''
return [dict(zip(record_list.keys(), x)) for x in zip(*record_list.values())]
def import_data_append(input_data, schema, table, externals, **kwargs):
'''
Append new data
kwargs:
to_append_re : Regular expression to match new field names
'''
# Get kwargs
to_append_re = kwargs.get('to_append_re', None)
assert to_append_re
# Get table
table_db = getattr(models, table)
# Check type of input data
if is_dataframe(input_data):
data = input_data.to_dict('records')
elif type(input_data) is dict:
data = input_data
else:
raise ZazzException('input_data is not a pandas dataframe or a dictionary')
#Get the new fields that we will add.
print ('Selecting only fields according to regexp: {}'.format(to_append_re))
print ('Total fields: {}'.format(len(schema)))
fields = [field for field in schema if re.match(to_append_re, field['name'])]
print ('Fields after selection: {}'.format(len(fields)))
assert len(fields)
print ('APPENDING NEW FIELDS:')
for field in fields:
print (' ' + field['name'])
# Get m2m_table:
m2m_tables = create_m2m_table(fields, table)
#print (m2m_tables)
#a=1/0
this_error = defaultdict(int)
for line_index, record in enumerate(data):
#print (line_index, record['# locus'])
if (line_index+1) % 1000 == 0:
print ('{} Imported records: {}/{} {:.1%}'.format(print_now(), line_index+1, len(data), line_index/len(data)))
try:
database_record = table_db.objects.get(Position=record['Position'], Chromosome=record['Chromosome'], Reference=record['Reference'], Alternative=record['Alternative'])
except ObjectDoesNotExist as e:
this_error['Could not find chromosome/position in db'] += 1
continue
for field in fields:
value = get_value_from_record(field, record, line_index)
key = get_key_from_record(field)
#print ('{}={}'.format(field['name'], value))
setattr(database_record, key, value)
#database_record.save()
# Get multi_1 records:
#print ('GeneDetail.refGene = ', record['GeneDetail.refGene'])
multi_1_records, multi_1_records_raw = get_multi_1_records(m2m_tables, record)
#print ('*** multi_1_records: ***')
#print (multi_1_records)
#print ('*** multi_1_records_raw: ***')
#print (multi_1_records_raw)
# Store multi records
for m2m_table_key, m2m_table_value in m2m_tables.items():
for field in m2m_table_value:
# Add raw multi_1 records
setattr(database_record, field['name'] + '_raw', multi_1_records_raw[m2m_table_key][field['name'] + '_raw'])
#print (database_record)
#print (field['name'] + '_raw')
#print (multi_1_records[m2m_table_key][field['name'] + '_raw'])
#Create attribute dictionary
attribute_records = create_attribute_records(multi_1_records[m2m_table_key])
#print ('*** attribute_records ***')
#print (attribute_records)
m2m_objects = [getattr(models, m2m_table_key).objects.get_or_create(**attribute_record)[0] for attribute_record in attribute_records]
getattr(getattr(database_record, m2m_table_key), 'set')(m2m_objects)
database_record.save()
print ('IMPORT ERRORS ERRORS:')
print (json.dumps(this_error, indent=4))
def import_data(input_data, schema, table, externals, delete=True, **kwargs):
'''
model_instances = [MyModel(
field_1=record['field_1'],
field_2=record['field_2'],
) for record in df_records]
'''
# Make sure that there is one and only one of the basic keys
chromosome_field = [x for x in schema if x['name'] == 'Chromosome']
position_field = [x for x in schema if x['name'] == 'Position']
reference_field = [x for x in schema if x['name'] == 'Reference']
alternative_field = [x for x in schema if x['name'] == 'Alternative']
assert len(chromosome_field) == 1
assert len(position_field) == 1
assert len(reference_field) == 1
assert len(alternative_field) == 1
chromosome_field = chromosome_field[0]
position_field = position_field[0]
reference_field = reference_field[0]
alternative_field = alternative_field[0]
errors_1 = 0
print ('Importing externals..')
if delete:
print ('Deleting external --> internal')
for external in externals:
if external['type'] == 'internal':
print (' Deleting external --> internal table: {}'.format(external['name']))
get_model(external['name']).objects.all().delete()
print (' Done')
print ('Deleting externals')
for external in externals:
if external['type'] == 'csv':
print (' Deleting external table: {}'.format(external['name']))
get_model(external['name']).objects.all().delete()
print (' Done')
if False:
'''
This is an initial effort. It is too slow.
It stores all info in DB. This is inefficient if we only need a fraction of information
'''
print ('Importing External Data')
for external in externals:
if external['type'] == 'csv':
print (' Name: {}'.format(external['name']))
print (' Loading file: {}'.format(external['filename']))
csv = pd.read_csv(external['filename'])
csv_dict = csv.to_dict('index')
print (' Length: {}'.format(len(csv_dict)))
c = 0
for index, d in csv_dict.items():
c += 1
if c % 1000 == 0:
print (' {}, Records: {}'.format(print_now(), c))
if c > 1000:
break
#Build a dictionary with the fields. NO M2M
item_fields_no_m2m = {field['name']:field['l'](d) for field in external['fields'] if not field['type'] == 'ManyToManyField'}
new_item = get_model(external['name']).objects.get_or_create(**item_fields_no_m2m)[0]
#new_item.save()
# Build a dictionary with fields. WITH M2M
for field in external['fields']:
if field['type'] != 'ManyToManyField':
continue
item_fields_m2m = {field['name']:field['l'](d) for field in external['fields'] if field['type'] == 'ManyToManyField'}
for m2m_k, m2m_v in item_fields_m2m.items():
getattr(new_item, m2m_k).add(m2m_v)
new_item.save()
elif external['type'] == 'internal':
continue
print (' Done')
if is_dataframe(input_data):
df = input_data
elif type(input_data) is str:
input_data_ext = os.path.splitext(input_data)[1]
if input_data_ext == '.xlsx':
print ('Reading MAIN Excel: {}'.format(input_filename))
df = pd.read_excel(input_filename)
else:
raise Exception('Unknown file type: ', input_data_ext )
else:
raise Exception('Unknown input type', type(input_data).__name__)
if False:
print ('Keeping only 1000 records')
df = df[:1000]
data = df.to_dict('records')
table_db = getattr(models, table)
if delete:
print ('Deleting all..')
print ('Deleting table.. ', table)
table_db.objects.all().delete()
# Get the new fields that we will add.
to_append_re = kwargs.get('to_append_re')
if to_append_re:
print ('Adding only fields that match regexp: {}'.format(to_append_re))
print ('Total fields: {}'.format(len(schema)))
schema = [field for field in schema if re.match(to_append_re, field['name'])]
# Add basic fields as well
schema.extend([chromosome_field, position_field, reference_field, alternative_field])
print ('After regexp: {}'.format(len(schema)))
m2m_tables = set()
for field in schema:
if field.get('database', '') == 'multi_1':
m2m_tables.add(field.get('table', table))
if delete:
for m2m_table in m2m_tables:
print ('Deleting table.. ', m2m_table)
mm_db = getattr(models, m2m_table)
mm_db.objects.all().delete()
#(field['line_l'](record)) if 'line_l' in field else (field.get('l', lambda l:l)(record[field['col_name']]))
print ('Building instances..')
if False:
instances = [
table_db(**{
field['name'] + ('_raw' if field.get('table', table) != table else ''):
(field['line_l'](record)) if 'line_l' in field else (field.get('l', lambda l:l)(record[field['col_name']])) #(field['l'] if 'l' in field else lambda x:x)(record[field['col_name']])
for field in schema if 'col_name' in field # Add only fields that have col_name.
}) for record in data] # for field in schema if not field['type'] == 'ManyToManyField'}) for record in data]
def create_multi_dictionary():
'''
Create multi dictionary for multi_2
'''
multi_dictionary = defaultdict(list)
for field in schema:
if field.get('database', False) == 'multi_2':
multi_dictionary[field['table']].append(field)
return multi_dictionary
multi_dictionary = create_multi_dictionary()
def create_multi_record(index, record):
all_multi_value_lists = []
for multi_key, multi_fields in multi_dictionary.items():
#Get the values of each multi field
multi_values_values = []
multi_values_keys = []
for multi_field in multi_fields:
field_value = record[multi_field['col_name']]
field_value_splitted = multi_field['l_multi'](field_value)
multi_values_keys.append(multi_field['name'])
multi_values_values.append(field_value_splitted)
# Make sure that all lists has the same number of values
set_of_the_length_of_all_values = set(map(len, multi_values_values))
if len(set_of_the_length_of_all_values) != 1:
#error_message = 'Index: {} . Fields do not have the same size..'.format(index)
error_message = 'Multi fields do not have the same size..'
import_errors[error_message] += 1
print (error_message)
return None
#print ('multi_values_values:')
#print (multi_values_values)
#print ('multi_values_keys')
#print (multi_values_keys)
multi_values_list_of_dicts = [dict(zip(multi_values_keys,x)) for x in zip(*multi_values_values)]
# [{'gene': 'NBPF9', 'transcript': 'NM_001037675.3', 'location': 'exonic', 'function': 'missense', 'codon': 'CGC', 'exon': '7', 'protein': 'p.His295Arg', 'coding': 'c.885A>G', 'sift': None}, {'gene': 'NBPF8', 'transcript': 'NM_001037501.2', 'location': 'exonic', 'function': 'missense', 'codon': 'CGC', 'exon': '6', 'protein': 'p.His295Arg', 'coding': 'c.885A>G', 'sift': None}, {'gene': 'NBPF8', 'transcript': 'NR_102404.1', 'location': 'exonic_nc', 'function': None, 'codon': None, 'exon': '6', 'protein': None, 'coding': None, 'sift': None}, {'gene': 'NBPF8', 'transcript': 'NR_102405.1', 'location': 'exonic_nc', 'function': None, 'codon': None, 'exon': '5', 'protein': None, 'coding': None, 'sift': None}, {'gene': 'NBPF9', 'transcript': 'NM_001277444.1', 'location': 'exonic', 'function': 'missense', 'codon': 'CGC', 'exon': '7', 'protein': 'p.His295Arg', 'coding': 'c.885A>G', 'sift': None}]
#print (multi_values_list_of_dicts)
all_multi_value_lists.append(multi_values_list_of_dicts)
# Combine multiple values
#print (reduce(lambda x,y: x*y, all_multi_value_lists))
if not all_multi_value_lists:
return None
ret = [dict(reduce(operator.or_, [y.items() for y in x])) for x in product(*all_multi_value_lists)]
#print ('Multivalues:', len(ret))
#print (ret)
return ret
if True:
instances = []
for line_index, record in enumerate(data):
#print (line_index, record['# locus'])
if (line_index+1) % 1000 == 0:
print ('{} Imported records: {}/{} {:.1%}'.format(print_now(), line_index+1, len(data), line_index/len(data)))
table_db_options = {}
for field in schema:
if not 'col_name' in field: # Add only fields that have col_name.
continue
key = field['name']
if field.get('database', '') == 'multi_2':
continue # Later add multi_2 fields
elif field.get('database', '') == 'multi_1':
key = field['name'] + '_raw'
try:
if 'line_l' in field:
value = field['line_l'](record)
elif 'l' in field:
# col_name might not exist in record! Data is not supposed to contain all fields!
if not field['col_name'] in record:
continue
value = field['l'](record[field['col_name']])
else:
# col_name might not exist in record! Data is not supposed to contain all fields!
if not field['col_name'] in record:
continue
value = record[field['col_name']]
except ZazzException as t_exception:
e_message = str(t_exception)
e_info = t_exception.info
import_errors[e_message] += 1
value = None
except Exception as e:
print ('Record:')
print (record)
print ('Index:', line_index)
raise e
if pd.isnull(value):
value = None # np.nan confuses django when attempting: int(np.nan)
table_db_options[key] = value
multi_records = create_multi_record(line_index, record)
if multi_records:
for multi_record in multi_records:
table_db_options = {**table_db_options, **multi_record}
instances.append(table_db(**table_db_options))
else:
#print (table_db_options)
instances.append(table_db(**table_db_options))
count = len(instances)
print ('Adding IDs..')
for i, instance in enumerate(instances):
instance.id = i
print ('{} Bulk creating main objects..'.format(print_now()))
# bulk_create does not work with many-to-many relationships. ..sniff...
# https://docs.djangoproject.com/en/2.0/ref/models/querysets/
if False:
'''
For testing
'''
print (serializers.serialize("json", instances, indent=4))
for inst in instances:
inst.save()
print (inst.pk)
if True:
table_db.objects.bulk_create(instances)
print (' {} Done'.format(print_now()))
print ('Indexing main objects..')
querySet = table_db.objects.filter(id__gte=0, id__lt=count)
assert querySet.count() == count
index = {x.id:x for x in querySet}
m2m_index = {}
print ('Creating many to many relationships..')
#errors_1 = 0
def process_multi_1(store):
errors_1 = 0
# m2m_objects: store in memory ALL m2m object, so that we can bulk import them later
m2m_objects = defaultdict(list)
# For each record store which many to many has
m2m_object_references = defaultdict(dict)
for id_, record in enumerate(data):
instance = index[id_]
if id_ % 1000 == 0:
print ('{} Entries: {}/{}'.format(print_now(), id_+1, count))
#l_multi is obligatory
for m2m_table in m2m_tables:
try:
# field['col_name'] in record : col_name does not have to be present in record!
m2m_fields = OrderedDict({field['name']: field['l_multi'](record[field['col_name']]) for field in schema if field.get('table', None) == m2m_table and field['col_name'] in record})
except ZazzException as e:
import_errors[str(e)] += 1
print (str(e))
m2m_fields = {}
#assert that all have the same length
if not len(set(len(x) for x in m2m_fields.values())) == 1:
print ('Index: {} . Fields do not have the same size..'.format(id_))
debug = {field['name']: record[field['col_name']] for field in schema if field.get('table', None) == m2m_table and field['col_name'] in record}
#print (debug)
#print (m2m_fields)
errors_1 += 1
m2m_fields = {}
#raise Exception()
#Create database objects
# {a: [1,2] , b: [3,4]} --> [{a:1, b:3} , {a:2, b:4}]. See also create_attribute_records()
m2m_fields = [dict(zip(m2m_fields.keys(), x)) for x in zip(*m2m_fields.values())]
current_length = len(m2m_objects[m2m_table])
m2m_objects[m2m_table].extend(m2m_fields)
m2m_object_references[id_][m2m_table] = (current_length, current_length+len(m2m_fields))
# m2m_fields: [{'Gene': 'CLCNKB', 'Transcript': 'NM_000085.4'}, {'Gene': 'CLCNKB', 'Transcript': 'NM_001165945.2'}]
if not m2m_fields:
# Do nothing.
#getattr(getattr(instance, m2m_table), 'set')(None)
#instance.save()
continue
if False:
'''
Always create new multi object
'''
m2m_objects = [getattr(models, m2m_table)(**m2m_field) for m2m_field in m2m_fields]
#Save objects
for o in m2m_objects:
o.save()
if False:
'''
Create only if they don't exist
'''
m2m_objects = [getattr(models, m2m_table).objects.get_or_create(**m2m_field)[0] for m2m_field in m2m_fields]
if store:
'''
Create only if they don't exist
'''
m2m_objects = [getattr(models, m2m_table).objects.get(**m2m_field)[0] for m2m_field in m2m_fields]
#print (m2m_table, m2m_fields)
#Add it to the main instance
if False:
getattr(getattr(instance, m2m_table), 'set')(m2m_objects)
if store:
#Save instance
instance.save()
return m2m_objects, m2m_object_references
m2m_objects, m2m_object_references = process_multi_1(store=False)
print ('Bulk creating Many2Many Objects')
table_insance_objects = {}
for m2m_table, m2m_values in m2m_objects.items():
print (' Bulk creating:', m2m_table)
table_instance = getattr(models, m2m_table)
table_insance_objects[m2m_table]= [table_instance(**x) for x in m2m_values]
getattr(models, m2m_table).objects.bulk_create(table_insance_objects[m2m_table])
print (' Getting Primary Key of:', m2m_table)
table_insance_objects[m2m_table] = table_instance.objects.all().order_by('pk')
print ('Connecting main instance with m2m..')
#Create through objects
through_objects = {m2m_table: getattr(Samples, m2m_table).through for m2m_table in m2m_tables}
for id_, record in enumerate(data):
if id_ % 1000 == 0:
print ('{} {}/{}'.format(print_now(), id_, len(data)))
instance = index[id_]
#
if not id_ in m2m_object_references:
continue
for table_name, table_indexes in m2m_object_references[id_].items():
#print (table_insance_objects[table_name][table_indexes[0]: table_indexes[1]+1])
if True:
'''
2019-04-18 16:09:42 0/10000
2019-04-18 16:10:15 1000/10000 --> 33
2019-04-18 16:10:48 2000/10000 --> 33
2019-04-18 16:11:22 3000/10000 --> 34
2019-04-18 16:11:57 4000/10000 --> 35
2019-04-18 16:12:33 5000/10000 --> 36
'''
getattr(getattr(instance, table_name), 'set')(table_insance_objects[table_name][table_indexes[0]: table_indexes[1]+1])
if False:
'''
2019-04-18 16:05:47 0/10000
2019-04-18 16:06:14 1000/10000 --> 27
2019-04-18 16:06:43 2000/10000 --> 29
2019-04-18 16:07:13 3000/10000 --> 30
2019-04-18 16:07:48 4000/10000 --> 35
2019-04-18 16:08:27 5000/10000 --> 39
'''
tmp1 = [{table_name.lower() + '_id': table_insance_objects[table_name][i].pk, 'samples_id': instance.pk} for i in range(table_indexes[0], table_indexes[1]+1)]
#print (tmp1)
tmp2 = [through_objects[table_name](**x) for x in tmp1]
#print (tmp2)
through_objects[table_name].objects.bulk_create(tmp2)
instance.save()
#a=1/0
print ('Errors 1:', errors_1)
print ('Annotating with external CSVs')
#Index external_internals
external_internals = {external['name']:external for external in externals if external['type'] == 'internal'}
for external in externals:
if external['type'] == 'csv':
external_name = external['name']
print (' Name: {}'.format(external_name))
print (' Loading file: {}'.format(external['filename']))
csv = pd.read_csv(external['filename'], **external['read_csv_options'])
csv_dict = csv.to_dict('index')
print (' DONE. Length: {}'.format(len(csv_dict)))
#Take the central table object
all_objects = table_db.objects.all()
print (' Annotating {} main records'.format(all_objects.count()))
o_counter = 0
o_annotated = 0
for o in all_objects:
o_counter += 1
if o_counter % 100 == 0:
print (' {}. Objects: {} Annotated: {}'.format(print_now(), o_counter, o_annotated))
matched = external['matcher'](csv, o) # THIS IS VERY SLOW!!
if matched.empty:
continue
o_annotated += 1
# This is not empty
# Create foreign object
# Create not M2M
not_m2m = {field['name']:fields['l'](matched) for field in external['fields'] if not field['type'] == 'ManyToManyField'}
foreign_object = get_model(external_name)(**not_m2m)
# Save model
foreign_object.save()
# Create M2M objects
m2m = {field['name']: field['l_m2m'](matched) for field in external['fields'] if field['type'] == 'ManyToManyField'}
#print (m2m) # {'Clinical_Significance': [{'Clinical Significance': 'Benign'}]}
m2m_objects = {k: [get_model(k).objects.get_or_create(**x)[0] for x in v] for k,v in m2m.items()}
#print (m2m_objects)
#Connect with foreign_object
for k, v in m2m_objects.items():
getattr(foreign_object, k).set(v)
#Save foreign_object
foreign_object.save()
#Now that we have the foreign_object stored, we can connect it with the foreign key of the main object
setattr(o, external_name, foreign_object) # o.external_name = foreign_object
#Update main object
o.save()
print ('Annotated {} out of {} records'.format(o_annotated, o_counter))
print ('DONE!')
if False: # This is legacy code. To be removed...
for field in schema:
if not field['type'] == 'ManyToManyField':
continue
if instance is None:
instance = index[id_]
values = field['l_multi'](record[field['col_name']])
#Store the values
m2m_db = getattr(models, field['name'])
if not field['name'] in m2m_index:
m2m_index[field['name']] = {}
#Perform as little as possible queries to the database
for value in values:
if not value in m2m_index[field['name']]:
m2m_index[field['name']][value] = m2m_db.objects.get_or_create(**{field['name']:value})[0]
values_obj = [m2m_index[field['name']][value] for value in values]
#Create M2M relationship
getattr(getattr(instance, field['name']+'_multi'), 'set')(values_obj)
instance.save()
print ('IMPORT ERRROS')
print (json.dumps(import_errors, indent=4))
print ('DONE')
def comma_int(x):
return int(x.replace(',', ''))
def isNone(x):
return None if pd.isnull(x) else x
def splitUnique(field_name, sep, t=str):
'''
t = type
'''
def f(x):
if pd.isnull(x):
return [None]
if not hasattr(x, 'split'):
if t == str:
return [str(x)]
elif t == int:
return [int(x)]
elif t == float:
return [float(x)]
raise ZazzException(f'Invalid type: {type(x).__name__} in field: {field_name}')
return [y if y else None for y in x.split(sep)]
return f
def join_set_sep(sep):
def f(x):
if pd.isnull(x):
return None
return sep.join(sorted(list(set(x.split('|')))))
return f
def parse_vcf(fn):
'''
'''
print ('Parsing VCF:', fn)
ret = {}
c=0
with open(fn) as f:
for l in f:
if l[0] == '#':
continue
c += 1
if c%10000 == 0:
print ('VCF LINES READ:', c)
ls = l.strip().split()
chromosome = ls[0].replace('chr', '')
position = int(ls[1])
reference = ls[3]
alternative = ls[4]
genotype = ls[9].split(':')[0]
#print (genotype)
#print (chromosome)
#print (position)
#print (reference)
#print (alternative)
if len(reference) != 1:
continue
if len(alternative) != 1:
continue
if genotype == '0/1':
geno = 'HET'
elif genotype == '1/1':
geno = 'HOM'
else:
print (genotype)
a=1/0
ret[(chromosome, position)] = (reference, alternative, geno)
print ('VCF LINES TOTAL:', c)
return ret
######### BED ##########
'''
http://genome.ucsc.edu/FAQ/FAQformat#format1
The first three required BED fields are:
chrom - The name of the chromosome (e.g. chr3, chrY, chr2_random) or scaffold (e.g. scaffold10671).
chromStart - The starting position of the feature in the chromosome or scaffold. The first base in a chromosome is numbered 0.
chromEnd - The ending position of the feature in the chromosome or scaffold. The chromEnd base is not included in the display of the feature. For example, the first 100 bases of a chromosome are defined as chromStart=0, chromEnd=100, and span the bases numbered 0-99.
====A====
chr1 5 6 K
chr1 10 20 L
chr1 25 26 M
====B====
chr1 7 9 A AA
chr1 8 10 B BB
chr1 9 12 C CC
chr1 10 11 D DD
chr1 10 20 E EE
chr1 12 14 F FF
chr1 17 25 G GG
chr1 18 20 H HH
a = BedTool('a.bed')
b = BedTool('b.bed')
#print (a.intersect(b, loj=True))
a.intersect(b, loj=True).saveas('c.bed')
chr1 5 6 K . -1 -1 . .
chr1 10 20 L chr1 9 12 C CC
chr1 10 20 L chr1 10 11 D DD
chr1 10 20 L chr1 10 20 E EE
chr1 10 20 L chr1 12 14 F FF
chr1 10 20 L chr1 17 25 G GG
chr1 10 20 L chr1 18 20 H HH
chr1 25 26 M . -1 -1 . .
'''
def bed_create_from_db(querySet, filename):
'''
QuerySet must be ordered, According to position!
'''
print ('Saving DB objects in BED format in: {}'.format(filename))
with open(filename, 'w') as f:
c = 0
for o in querySet:
c += 1
if c % 1000 == 0:
print (' Saved: {} records'.format(c))
record = [
o.Chromosome,
str(o.Position),
str(o.Position+1), ## FIX ME !!!!
str(o.id),
]
f.write('\t'.join(record) + '\n')
print (' Done')
def bed_loj(filename_1, filename_2, output_filename):
'''
https://daler.github.io/pybedtools/autodocs/pybedtools.bedtool.BedTool.intersect.html
'''
print (' Intersecting LOJ with BedTools..')
a = BedTool(filename_1)
b = BedTool(filename_2)
a.intersect(b, loj=True).saveas(output_filename)
print (' DONE')
######### END OF BED ###
def chromosome_sizes_hg19():
'''
http://hgdownload.cse.ucsc.edu/goldenPath/hg19/bigZips/hg19.chrom.sizes
'''
return {
'chr1': 249250621,
'chr2': 243199373,
'chr3': 198022430,
'chr4': 191154276,
'chr5': 180915260,
'chr6': 171115067,
'chr7': 159138663,
'chrX': 155270560,
'chr8': 146364022,
'chr9': 141213431,
'chr10': 135534747,
'chr11': 135006516,
'chr12': 133851895,
'chr13': 115169878,
'chr14': 107349540,
'chr15': 102531392,
'chr16': 90354753,
'chr17': 81195210,
'chr18': 78077248,
'chr20': 63025520,
'chrY' : 59373566,
'chr19': 59128983,
'chr22': 51304566,
'chr21': 48129895,
'chrM' : 16571,
}
def list_of_chromosomes():
return list(map(lambda x : 'chr' + x, list(map(str, range(1,23)) ) + ['X', 'Y', 'M']))
def accumulate_chromosome_sizes_hg19():
s = chromosome_sizes_hg19()
m = list_of_chromosomes()
offset = 0
ret = {}
for chromosome in m:
ret[chromosome] = offset
offset += s[chromosome]
return ret
def accumulative_position(chromosome, position):
chr_index = g['list_of_chromosomes'].index(chromosome)
if chr_index == 0:
return int(position)
return g['accumulate_chromosome_sizes_hg19'][g['list_of_chromosomes'][chr_index]] + int(position)
def pandas_to_vcf(df, chromosome_f, position_f, reference_f, alternative_f, vcf_filename):
print ('Converting pandas to VCF')
input_data = df.to_dict('records')
f = open(vcf_filename, 'w')
f.write('##fileformat=VCFv4.0\n')
f.write('\t'.join(['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO']) + '\n')
for line_index, record in enumerate(input_data):
#print (record)
if line_index%10000 == 0:
print ('Lines: {}/{}'.format(line_index, len(input_data)))
chromosome = chromosome_f(record)
if 'chr' in chromosome.lower():
chromosome = chromosome.replace('chr', '')
position = position_f(record)
reference = reference_f(record)
alternative = alternative_f(record)
try:
#savoura = convert_to_base64(json.dumps(record))
savoura = '.'
except TypeError as e:
if str(e) == "Object of type Timestamp is not JSON serializable":
print ('Error:', str(e), ' ignoring..')
continue
else:
raise e
to_print = [chromosome, position, '.', reference, alternative, '.', savoura, '.']
to_print_str = '\t'.join(map(str, to_print)) + '\n'
f.write(to_print_str)
f.close()
print (f'Created file: {vcf_filename}')
def setup_1():
'''
Setups DB + Javascript
'''
#print ('Adding accumulative_position..')
#df['accumulative_position'] = df.apply(lambda x: accumulative_position(*x['# locus'].split(':')), axis=1)
#print (' ..DONE')
def split_location(x):
#print (x)
if pd.isnull(x):
return 'UNKNOWN'
return x.split('|')[0]
def log_f(x):
#print (x)
if str(x)=='0.0':
return None
return int(-np.log10(float(x)))
def allele_coverage(x):
#print (x)
if '.' in x:
sp = x.split('.')
elif ',' in x:
sp = x.split(',')
if not len(sp) == 2:
error_message = 'More than 2 values in allele coverage'
import_errors[error_message] += 1
#print (x)
#assert False
return [None, None]
return list(map(int, sp))
def allele_coverage_2(x):
ac = x['allele_coverage']
#print ('Allele Coverage:', ac)
if not ',' in str(ac) and not '.' in str(ac):
int_ac = int(ac)
str_ac = str(ac)
coverage = int(x['coverage'])
for i in range(1,len(str_ac)):
part1 = int(str_ac[:i])
part2 = int(str_ac[i:])
if part1 + part2 == int(coverage):
ret = [part1, part2]
#print (f'Allele Coverage: {ac} Coverage: {coverage} Coverage: {ret}')
return ret
#print (f'Allele Coverage: {ac}')
#print ('Coverage:', coverage)
e = ZazzException('Invalid Coverage value')
e.set_info({'coverage': coverage, 'allele_coverage': ac})
raise e
else:
return allele_coverage(ac)
def maf_f(x):
if type(x).__name__ in ['int', 'float']:
ret = x
else:
ret = float(x.split(':')[0])
return ret
def sift_raw_f(x):
#return ','.join(str(x).split('|'))
return x
def f5000Exomes_AMAF(name):
def f(x):
if pd.isnull(x):
return None
if x.count(':') != 2:
e = ZazzException('Invalid 5000Exomes values')
e.set_info({'value': x})
raise e
values = dict(y.split('=') for y in x.split(':'))
return float(values[name])
return f
def cosmic_multi_f(x):
# print (x)
# print (type(x))
if str(x) == 'nan':
return ['NaN']
if pd.isnull(x):
return ['NaN']
if not str(x):
return ['NaN']
return str(x).split(':')
def dbsnp_multi_f(x):
if not 'rs' in str(x):
return ['NaN']
return x.split(':')
def go_f(x):
if not 'GO' in str(x):
return ['NaN']
return re.findall(r'GO:[\d]+', x)
def omim_f(x):
if not re.search(r'\d', str(x)):
return ['NaN']
return str(x).split(':')
def phylop_f(x):
if not re.search(r'\d', str(x)):
return [None]
return list(map(float, str(x).split(',')))
def alternative_f(x):
return ','.join(list(set(x['genotype'].split('/')) - set(x['ref'])))
def ANN_AAChange_refGene_columns(index):
def ret(s):
#print ('ANN_AAChange_refGene_exon:', s)
if s in ['.', 'UNKNOWN']:
return [None]
splitted = [x.split(':') for x in s.split(',')]
return [x[index] if index<len(x) else None for x in splitted]
#return [x.split(':')[index] for x in s.split(',')]
return ret
def VEP_DOMAINS_f(line):
'''
Superfamily_domains:SSF54277&SMART_domains:SM00666&PIRSF_domain:PIRSF000554&Gene3D:192.168.127.12&Pfam_domain:PF00564&hmmpanther:PTHR24357&hmmpanther:PTHR24357:SF60|||||Superfamily_domains:SSF54277&Pfam_domain:PF00564&Gene3D:192.168.127.12&hmmpanther:PTHR24357&hmmpanther:PTHR24357:SF59
'''
ret = line.split('|')
#b = [len(list(filter(lambda t: 'hmmpanther' in t, x.split('&')))) for x in ret]
s2 = [x.split('&') for x in ret]
s3 = [dict([(y.split(':')[0] if y.count(':')==1 else "hmmpanter2", ':'.join(y.split(':')[1:])) for y in x if y]) for x in s2]
s4 = defaultdict(set)
for x in s3:
for k,v in x.items():
s4[k].add(v)
s5 = set(list(map(len, s4.values())))
if s5 == set():
pass
elif s5 == set([1]):
pass
elif s5 == set([0]):
pass
else:
print (line)
print (s4)
print (s5)
assert False
return ret
# def zazz_clinvar_f(s):
#
# print ('=====1====')
# print (s)
# print ('=====2====')
# return 'kostas'
def zazz_clinvarzazz_f(clinvar_field):
def f(s):
'''
'''
s_splited = s.split('|')
different = set(s_splited)
assert len(different) == 1
first = s_splited[0]
if first == '':
return [None]
#print ('==1==')
#print (first)
#print ('==2==')
decoded = decode_base64_json(first)
#print ('==3==')
#print (json.dumps(decoded, indent=4))
# zazz_clinvarzazz_f Clinical Significance
ret = []
for rcv_code, rcv_value in decoded['RCV'].items():
scv_dictionary = rcv_value['SCV']
rcv_data = rcv_value['RCV_data']
Clinical_significance = rcv_data['Clinical significance']
condition_name = '/'.join(rcv_data['Condition name'])
rcv_review_status = rcv_data['Review status'] # "criteria provided, single submitter",
Review_status_stars = str(rcv_data['Review status stars'])
for scv_data, scv_value in scv_dictionary.items():
scv_data = scv_value['SCV_data']
interpretation = scv_data['interpretation']
scv_review_status = scv_data['Review status']
if clinvar_field == 'interpretation':
ret.append(interpretation)
elif clinvar_field == 'scv_review_status':
ret.append(scv_review_status)
elif clinvar_field == 'clinical_significance':
ret.append(Clinical_significance)
elif clinvar_field == 'condition_name':
ret.append(condition_name)
elif clinvar_field == 'rcv_review_status':
ret.append(rcv_review_status)
elif clinvar_field == 'review_status_stars':
ret.append(Review_status_stars)
else:
raise ZazzException('Unknown clinvar field: {}'.format(clinvar_field))
#print ('==4===')
#print (ret)
#a=1/0
return ret
return f
fields = [
# Main entries
{'name': 'Chromosome', 'col_name': 'Chromosome', 'type': 'CharField', 'parameters': {'max_length': '5'}, 'l': lambda x: x, 'order': 1},
{'name': 'Position', 'col_name': 'Position', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits':20, 'order': 2},
{'name': 'Reference', 'col_name': 'Reference', 'type': 'CharField', 'parameters': {'max_length': '255'}, 'component': 'freetext', 'l': lambda x:x, 'order': 3},
{'name': 'Alternative', 'col_name': 'Alternative', 'type': 'CharField', 'parameters': {'max_length': '255'}, 'component': 'freetext', 'l': lambda x:x, 'order': 4},
# FIELDS FROM RAW VCF
{'name':'RAW_NS', 'col_name': 'NS', 'type': 'IntegerField', 'parameters': {'null': True}, 'l': lambda x:x, 'xUnits': 10, 'order': 11 },
{'name':'RAW_HS', 'col_name': 'HS', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'l': lambda x:x, 'order': 12 },
{'name':'RAW_DP', 'col_name': 'DP', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 13 },
{'name':'RAW_RO', 'col_name': 'RO', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 14 },
{'name':'RAW_AO', 'col_name': 'AO', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 15 },
{'name':'RAW_SRF', 'col_name': 'SRF', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 16 },
{'name':'RAW_SRR', 'col_name': 'SRR', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 17 },
{'name':'RAW_SAF', 'col_name': 'SAF', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 18 },
{'name':'RAW_SAR', 'col_name': 'SAR', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 19 },
{'name':'RAW_FDP', 'col_name': 'FDP', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 20 },
{'name':'RAW_FRO', 'col_name': 'FRO', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 21 },
{'name':'RAW_FAO', 'col_name': 'FAO', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 22 },
{'name':'RAW_AF', 'col_name': 'AF', 'type': 'FloatField', 'parameters': {}, 'l': lambda x:x, 'order': 23 },
{'name':'RAW_QD', 'col_name': 'QD', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 24 },
{'name':'RAW_FSRF', 'col_name': 'FSRF', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 25 },
{'name':'RAW_FSRR', 'col_name': 'FSRR', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 26 },
{'name':'RAW_FSAF', 'col_name': 'FSAF', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 27 },
{'name':'RAW_FSAR', 'col_name': 'FSAR', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 28 },
{'name':'RAW_FXX', 'col_name': 'FXX', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 29 },
{'name':'RAW_TYPE', 'col_name': 'TYPE', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'l': lambda x:x, 'order': 30 },
{'name':'RAW_LEN', 'col_name': 'LEN', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 31 },
{'name':'RAW_HRUN', 'col_name': 'HRUN', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 32 },
{'name':'RAW_FR', 'col_name': 'FR', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'l': lambda x:x, 'order': 33 },
{'name':'RAW_RBI', 'col_name': 'RBI', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 34 },
{'name':'RAW_FWDB', 'col_name': 'FWDB', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 35 },
{'name':'RAW_REVB', 'col_name': 'REVB', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 36 },
{'name':'RAW_REFB', 'col_name': 'REFB', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 37 },
{'name':'RAW_VARB', 'col_name': 'VARB', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 38 },
{'name':'RAW_SSSB', 'col_name': 'SSSB', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 39 },
{'name':'RAW_SSEN', 'col_name': 'SSEN', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 40 },
{'name':'RAW_SSEP', 'col_name': 'SSEP', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 41 },
{'name':'RAW_STB', 'col_name': 'STB', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 42 },
{'name':'RAW_STBP', 'col_name': 'STBP', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 43 },
{'name':'RAW_PB', 'col_name': 'PB', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 44 },
{'name':'RAW_PBP', 'col_name': 'PBP', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 45 },
{'name':'RAW_MLLD', 'col_name': 'MLLD', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'order': 46 },
{'name':'RAW_OID', 'col_name': 'OID', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'l': lambda x:x, 'order': 47 },
{'name':'RAW_OPOS', 'col_name': 'OPOS', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 48 },
{'name':'RAW_OREF', 'col_name': 'OREF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'l': lambda x:x, 'order': 49 },
{'name':'RAW_OALT', 'col_name': 'OALT', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'l': lambda x:x, 'order': 50 },
{'name':'RAW_OMAPALT', 'col_name': 'OMAPALT', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'l': lambda x:x, 'order': 51 },
{'name':'RAW_GT_GT', 'col_name': 'GT_GT', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'l': lambda x:x, 'order': 52 },
{'name':'RAW_GT_GQ', 'col_name': 'GT_GQ', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 53 },
{'name':'RAW_GT_DP', 'col_name': 'GT_DP', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 54 },
{'name':'RAW_GT_RO', 'col_name': 'GT_RO', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 55 },
{'name':'RAW_GT_AO', 'col_name': 'GT_AO', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 56 },
{'name':'RAW_GT_SRF', 'col_name': 'GT_SRF', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 57 },
{'name':'RAW_GT_SRR', 'col_name': 'GT_SRR', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 58 },
{'name':'RAW_GT_SAF', 'col_name': 'GT_SAF', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 59 },
{'name':'RAW_GT_SAR', 'col_name': 'GT_SAR', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x:x, 'xUnits': 10, 'order': 60 },
{'name':'RAW_GT_FDP', 'col_name': 'GT_FDP', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 61 },
{'name':'RAW_GT_FRO', 'col_name': 'GT_FRO', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 62 },
{'name':'RAW_GT_FAO', 'col_name': 'GT_FAO', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 63 },
{'name':'RAW_GT_AF', 'col_name': 'GT_AF', 'type': 'FloatField', 'parameters': {}, 'l': lambda x:x, 'order': 64 },
{'name':'RAW_GT_FSRF', 'col_name': 'GT_FSRF', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 65 },
{'name':'RAW_GT_FSRR', 'col_name': 'GT_FSRR', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 66 },
{'name':'RAW_GT_FSAF', 'col_name': 'GT_FSAF', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 67 },
{'name':'RAW_GT_FSAR', 'col_name': 'GT_FSAR', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x:x, 'xUnits': 10, 'order': 68 },
# FIELDS FROM IONTORRENT
{'name': 'ION_Type', 'col_name': 'type', 'type': 'CharField', 'parameters': {'max_length': '100'}, 'order': 102},
#{'name': 'Position', 'col_name': 'accumulative_position', 'type': 'IntegerField', 'parameters': {}, 'l': lambda x: x, 'order': 2},
{'name': 'ION_Reference', 'col_name': 'ref', 'type': 'CharField', 'parameters': {'max_length': '255'}, 'component': 'freetext', 'l': lambda x:x, 'order': 105},
{'name': 'ION_Length', 'col_name': 'length', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l':lambda x: int(x), 'xUnits': 20, 'order': 106, },
{'name': 'ION_Genotype', 'col_name': 'genotype', 'type': 'CharField', 'parameters': {'max_length': '255'}, 'component': 'freetext', 'l': lambda x:x, 'order': 107},
{'name': 'ION_PValue', 'col_name': 'pvalue', 'type': 'IntegerField', 'parameters': {'null': True}, 'l': log_f, 'xUnits': 20, 'order': 108, },
{'name': 'ION_Coverage', 'col_name': 'coverage', 'type': 'IntegerField', 'parameters': {'null': 'True'}, 'l': lambda x: int(x), 'xUnits': 20, 'order': 109, },
{'name': 'ION_Allele_Coverage_1', 'col_name': 'allele_coverage', 'type': 'IntegerField', 'parameters': {'null': True}, 'line_l': lambda x: allele_coverage_2(x)[0], 'xUnits': 20, 'order': 110, },
{'name': 'ION_Allele_Coverage_2', 'col_name': 'allele_coverage', 'type': 'IntegerField', 'parameters': {'null': True}, 'line_l': lambda x: allele_coverage_2(x)[1], 'xUnits': 20, 'order': 111, },
{'name': 'ION_MAF', 'col_name': 'maf', 'type': 'FloatField', 'parameters': {'null': True}, 'l': lambda x : maf_f(x), 'xUnits': 20, 'order': 112, },
{'name': 'ION_Gene', 'col_name': 'gene', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l': lambda x:x, 'l_multi': splitUnique('Gene', '|'), 'table': 'ION_Transcripts', 'order': 113},
{'name': 'ION_Transcript', 'col_name': 'transcript', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l': lambda x:x, 'l_multi': splitUnique('Transcript', '|'), 'table': 'ION_Transcripts', 'order': 114},
{'name': 'ION_Location', 'col_name': 'location', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l': lambda x:x, 'l_multi': splitUnique('Transcript', '|'), 'table': 'ION_Transcripts', 'order': 115},
{'name': 'ION_Function', 'col_name': 'function', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l': lambda x:x, 'l_multi': splitUnique('Transcript', '|'), 'table': 'ION_Transcripts', 'order': 116}, ## Prosuces 10/2000 errors!
{'name': 'ION_Codon', 'col_name': 'codon', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l': lambda x:x, 'l_multi': splitUnique('Transcript', '|'), 'table': 'ION_Transcripts', 'order': 117},
{'name': 'ION_Exon', 'col_name': 'exon', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l': lambda x:x, 'l_multi': splitUnique('Transcript', '|'), 'table': 'ION_Transcripts', 'order': 118},
{'name': 'ION_Protein', 'col_name': 'protein', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l': lambda x:x, 'l_multi': splitUnique('Transcript', '|'), 'table': 'ION_Transcripts', 'order': 119},
{'name': 'ION_Coding', 'col_name': 'coding', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l': lambda x:x, 'l_multi': splitUnique('Transcript', '|'), 'table': 'ION_Transcripts', 'order': 120},
{'name': 'ION_Sift', 'col_name': 'sift', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 20, 'database': 'multi_1', 'l': sift_raw_f, 'l_multi': splitUnique('Transcript', '|', float), 'table': 'ION_Transcripts', 'order': 121},
{'name': 'ION_Polyphen', 'col_name': 'polyphen', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 20, 'database': 'multi_1', 'l': sift_raw_f, 'l_multi': splitUnique('Transcript', '|', float), 'table': 'ION_Transcripts', 'order': 122},
{'name': 'ION_Grantham', 'col_name': 'grantham', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 20, 'database': 'multi_1', 'l': sift_raw_f, 'l_multi': splitUnique('Transcript', '|', float), 'table': 'ION_Transcripts', 'order': 123},
{'name': 'ION_NormalizedAlt', 'col_name': 'normalizedAlt', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l_multi': splitUnique('Transcript', '|'), 'table': 'ION_Transcripts', 'order': 124},
{'name': 'ION_F5000Exomes_AMAF', 'col_name': '5000Exomes', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': f5000Exomes_AMAF('AMAF'), 'order': 125},
{'name': 'ION_F5000Exomes_EMAF', 'col_name': '5000Exomes', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': f5000Exomes_AMAF('EMAF'), 'order': 126},
{'name': 'ION_F5000Exomes_GMAF', 'col_name': '5000Exomes', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': f5000Exomes_AMAF('GMAF'), 'order': 127},
{'name': 'ION_Clinvar', 'col_name': 'clinvar', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'l': lambda x:x, 'order': 128},
{'name': 'ION_COSMIC', 'col_name': 'cosmic', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l_multi': cosmic_multi_f, 'table': 'ION_cosmic', 'order': 129,
'renderer': '''
function(x) {
if (x.ION_COSMIC != 'NaN') {
return "<a href=https://cancer.sanger.ac.uk/cosmic/search?q=" + x.ION_COSMIC + ">" + x.ION_COSMIC + "</a>";
}
return 'NaN';
}
'''},
{'name': 'ION_DbSNP', 'col_name': 'dbsnp', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l_multi': dbsnp_multi_f, 'table': 'ION_dbsnp', 'order': 130,
'renderer': '''
function(x) {
if (x.ION_DbSNP != 'NaN') {
return "<a href=https://www.ncbi.nlm.nih.gov/snp/" + x.ION_DbSNP + ">" + x.ION_DbSNP + "</a>";
}
return 'NaN';
}
'''},
{'name': 'ION_Drugbank', 'col_name': 'drugbank', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l_multi': cosmic_multi_f, 'table': 'ION_drugbank', 'order': 131},
{'name': 'ION_GO', 'col_name': 'go', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l_multi': go_f, 'table': 'ION_go', 'order': 132},
{'name': 'ION_OMIM', 'col_name': 'omim', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l_multi': omim_f, 'table': 'ION_omim', 'order': 133,
'renderer': '''
function(x) {
if (x.ION_OMIM != 'NaN') {
return "<a href=https://www.omim.org/entry/" + x.ION_OMIM + ">" + x.ION_OMIM + "</a>";
}
return 'NaN';
}
'''
},
{'name': 'ION_Pfam', 'col_name': 'pfam', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'l_multi': omim_f, 'table': 'ION_pfam', 'order': 134,
'renderer': '''
function(x) {
if (x.ION_Pfam != 'NaN') {
return "<a href=https://pfam.xfam.org/family/" + x.ION_Pfam + ">" + x.ION_Pfam + "</a>";
}
return 'NaN';
}
'''
},
{'name': 'ION_Phylop', 'col_name': 'phylop', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'database': 'multi_1', 'l_multi': phylop_f, 'table': 'ION_phylop', 'order': 135},
########## FIELDS FROM ANNOVAR #########
{'name': 'ANN_Func_refGene', 'col_name': 'Func.refGene', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'order': 201},
{'name': 'ANN_Gene_refGene', 'col_name': 'Gene.refGene', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'order': 202},
{'name': 'ANN_GENEDETAIL_REFGENE', 'col_name': 'GeneDetail.refGene', 'type': 'CharField', 'parameters': {'max_length': '500', 'null': 'True'}, 'database': 'multi_1', 'table': 'ANN_GeneDetail_refGene', 'l_multi': lambda x : x.replace('\\x3d', '=').split('\\x3b'), 'order': 203},
{'name': 'ANN_ExonicFunc_refGene', 'col_name': 'ExonicFunc.refGene', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'order': 204},
{'name': 'ANN_AAChange_refGene_gene', 'col_name': 'AAChange.refGene', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'ANN_AAChange_refGene', 'l_multi': ANN_AAChange_refGene_columns(0), 'order': 205},
{'name': 'ANN_AAChange_refGene_transcript', 'col_name': 'AAChange.refGene', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'ANN_AAChange_refGene', 'l_multi': ANN_AAChange_refGene_columns(1), 'order': 206},
{'name': 'ANN_AAChange_refGene_exon', 'col_name': 'AAChange.refGene', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'ANN_AAChange_refGene', 'l_multi': ANN_AAChange_refGene_columns(2), 'order': 207},
{'name': 'ANN_AAChange_refGene_coding', 'col_name': 'AAChange.refGene', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'ANN_AAChange_refGene', 'l_multi': ANN_AAChange_refGene_columns(3), 'order': 208},
{'name': 'ANN_AAChange_refGene_protein', 'col_name': 'AAChange.refGene', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'ANN_AAChange_refGene', 'l_multi': ANN_AAChange_refGene_columns(4), 'order': 209},
{'name': 'ANN_cytoBand', 'col_name': 'cytoBand', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'order': 210},
{'name': 'ANN_ExAC_ALL', 'col_name': 'ExAC_ALL', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 211},
{'name': 'ANN_ExAC_AFR', 'col_name': 'ExAC_AFR', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 212},
{'name': 'ANN_ExAC_AMR', 'col_name': 'ExAC_AMR', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 213},
{'name': 'ANN_ExAC_EAS', 'col_name': 'ExAC_EAS', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 214},
{'name': 'ANN_ExAC_FIN', 'col_name': 'ExAC_FIN', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 215},
{'name': 'ANN_ExAC_NFE', 'col_name': 'ExAC_NFE', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 216},
{'name': 'ANN_ExAC_OTH', 'col_name': 'ExAC_OTH', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 217},
{'name': 'ANN_ExAC_SAS', 'col_name': 'ExAC_SAS', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 218},
{'name': 'ANN_avsnp147', 'col_name': 'avsnp147', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'component': 'freetext', 'order': 219},
{'name': 'ANN_SIFT_score', 'col_name': 'SIFT_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 220},
{'name': 'ANN_SIFT_pred', 'col_name': 'SIFT_pred', 'type': 'CharField', 'parameters': {'max_length': '10', 'null': 'True'}, 'order': 221},
{'name': 'ANN_Polyphen2_HDIV_score', 'col_name': 'Polyphen2_HDIV_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 222},
{'name': 'ANN_Polyphen2_HDIV_pred', 'col_name': 'Polyphen2_HDIV_pred', 'type': 'CharField', 'parameters': {'max_length': '10', 'null': 'True'}, 'order': 223},
{'name': 'ANN_Polyphen2_HVAR_score', 'col_name': 'Polyphen2_HVAR_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 224},
{'name': 'ANN_Polyphen2_HVAR_pred', 'col_name': 'Polyphen2_HVAR_pred', 'type': 'CharField', 'parameters': {'max_length': '10', 'null': 'True'}, 'order': 225},
{'name': 'ANN_LRT_score', 'col_name': 'LRT_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 226},
{'name': 'ANN_LRT_pred', 'col_name': 'LRT_pred', 'type': 'CharField', 'parameters': {'max_length': '10', 'null': 'True'}, 'order': 228},
{'name': 'ANN_MutationTaster_score', 'col_name': 'MutationTaster_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 229},
{'name': 'ANN_MutationTaster_pred', 'col_name': 'MutationTaster_pred', 'type': 'CharField', 'parameters': {'max_length': '10', 'null': 'True'}, 'order': 230},
{'name': 'ANN_MutationAssessor_score', 'col_name': 'MutationAssessor_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 231},
{'name': 'ANN_MutationAssessor_pred', 'col_name': 'MutationAssessor_pred', 'type': 'CharField', 'parameters': {'max_length': '10', 'null': 'True'}, 'order': 232},
{'name': 'ANN_FATHMM_score', 'col_name': 'FATHMM_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 233},
{'name': 'ANN_FATHMM_pred', 'col_name': 'FATHMM_pred', 'type': 'CharField', 'parameters': {'max_length': '10', 'null': 'True'}, 'order': 234},
{'name': 'ANN_PROVEAN_score', 'col_name': 'PROVEAN_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 235},
{'name': 'ANN_PROVEAN_pred', 'col_name': 'PROVEAN_pred', 'type': 'CharField', 'parameters': {'max_length': '10', 'null': 'True'}, 'order': 236},
{'name': 'ANN_VEST3_score', 'col_name': 'VEST3_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 237},
{'name': 'ANN_CADD_raw', 'col_name': 'CADD_raw', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 238},
{'name': 'ANN_CADD_phred', 'col_name': 'CADD_phred', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 239},
{'name': 'ANN_DANN_score', 'col_name': 'DANN_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 240},
{'name': 'ANN_fathmm_MKL_coding_score', 'col_name': 'fathmm-MKL_coding_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 241},
{'name': 'ANN_fathmm_MKL_coding_pred', 'col_name': 'fathmm-MKL_coding_pred', 'type': 'CharField', 'parameters': {'max_length': '10', 'null': 'True'}, 'order': 242},
{'name': 'ANN_MetaSVM_score', 'col_name': 'MetaSVM_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 243},
{'name': 'ANN_MetaSVM_pred', 'col_name': 'MetaSVM_pred', 'type': 'CharField', 'parameters': {'max_length': '10', 'null': 'True'}, 'order': 244},
{'name': 'ANN_MetaLR_score', 'col_name': 'MetaLR_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 245},
{'name': 'ANN_MetaLR_pred', 'col_name': 'MetaLR_pred', 'type': 'CharField', 'parameters': {'max_length': '10', 'null': 'True'}, 'order': 246},
{'name': 'ANN_integrated_fitCons_score', 'col_name': 'integrated_fitCons_score', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 247},
{'name': 'ANN_integrated_confidence_value', 'col_name': 'integrated_confidence_value', 'type': 'IntegerField', 'parameters': {'null': True}, 'xUnits': 10, 'l': lambda x: None if x == '.' else int(x) , 'order': 248},
{'name': 'ANN_GERP', 'col_name': 'GERP++_RS', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 249},
{'name': 'ANN_phyloP7way_vertebrate', 'col_name': 'phyloP7way_vertebrate', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 250},
{'name': 'ANN_phyloP20way_mammalian', 'col_name': 'phyloP20way_mammalian', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 251},
{'name': 'ANN_phastCons7way_vertebrate', 'col_name': 'phastCons7way_vertebrate', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 252},
{'name': 'ANN_phastCons20way_mammalian', 'col_name': 'phastCons20way_mammalian', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 253},
{'name': 'ANN_SiPhy_29way_logOdds', 'col_name': 'SiPhy_29way_logOdds', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'l': lambda x: None if x == '.' else float(x) , 'order': 254},
{'name': 'ANN_CLNALLELEID', 'col_name': 'CLNALLELEID', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'component': 'freetext', 'order': 255},
{'name': 'ANN_CLNDN', 'col_name': 'CLNDN', 'type': 'CharField', 'parameters': {'max_length': '10', 'null': 'True'}, 'database': 'multi_1', 'table': 'ANN_CLINVAR', 'l_multi': lambda x : x.split('|'), 'order': 256},
{'name': 'ANN_CLNDISDB', 'col_name': 'CLNDISDB', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'database': 'multi_1', 'table': 'ANN_CLINVAR', 'l_multi': lambda x : x.split('|'), 'component': 'freetext', 'order': 257},
{'name': 'ANN_CLNREVSTAT', 'col_name': 'CLNREVSTAT', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'order': 258},
{'name': 'ANN_CLNSIG', 'col_name': 'CLNSIG', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'order': 259},
########## FIELDS FROM VEP #########
#{'name': 'VEP_Feature', 'col_name': 'Feature', 'type': 'CharField', 'parameters': {'max_length': '200', 'null': 'True'}, 'l': lambda x:x.split('|')[0], 'order': 94},
{'name': 'VEP_Allele', 'col_name': 'Allele', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 301},
{'name': 'VEP_Consequence', 'col_name': 'Consequence', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 302},
{'name': 'VEP_IMPACT', 'col_name': 'IMPACT', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 302},
{'name': 'VEP_SYMBOL', 'col_name': 'SYMBOL', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 303},
{'name': 'VEP_Gene', 'col_name': 'Gene', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 303},
{'name': 'VEP_Feature_type', 'col_name': 'Feature_type', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 304},
{'name': 'VEP_Feature', 'col_name': 'Feature', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 305},
{'name': 'VEP_BIOTYPE', 'col_name': 'BIOTYPE', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 306},
{'name': 'VEP_EXON', 'col_name': 'EXON', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 307},
{'name': 'VEP_INTRON', 'col_name': 'INTRON', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 308},
{'name': 'VEP_HGVSc', 'col_name': 'HGVSc', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 309},
{'name': 'VEP_HGVSp', 'col_name': 'HGVSp', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 310},
{'name': 'VEP_cDNA_position', 'col_name': 'cDNA_position', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 311},
{'name': 'VEP_CDS_position', 'col_name': 'CDS_position', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 312},
{'name': 'VEP_Amino_acids', 'col_name': 'Amino_acids', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 313},
{'name': 'VEP_Codons', 'col_name': 'Codons', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 314},
{'name': 'VEP_Existing_variation', 'col_name': 'Existing_variation', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'compoent': 'freetext', 'l_multi': lambda x : x.split('|'), 'order': 315},
{'name': 'VEP_DISTANCE', 'col_name': 'DISTANCE', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 316},
{'name': 'VEP_STRAND', 'col_name': 'STRAND', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 317},
{'name': 'VEP_FLAGS', 'col_name': 'FLAGS', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 318},
{'name': 'VEP_VARIANT_CLASS', 'col_name': 'VARIANT_CLASS', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 319},
{'name': 'VEP_SYMBOL_SOURCE', 'col_name': 'SYMBOL_SOURCE', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 320},
{'name': 'VEP_HGNC_ID', 'col_name': 'HGNC_ID', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 321},
{'name': 'VEP_CANONICAL', 'col_name': 'CANONICAL', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 322},
{'name': 'VEP_TSL', 'col_name': 'TSL', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 323},
{'name': 'VEP_APPRIS', 'col_name': 'APPRIS', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 324},
{'name': 'VEP_CCDS', 'col_name': 'CCDS', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 325},
{'name': 'VEP_ENSP', 'col_name': 'ENSP', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 326},
{'name': 'VEP_SWISSPROT', 'col_name': 'SWISSPROT', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 327},
{'name': 'VEP_TREMBL', 'col_name': 'TREMBL', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 328},
{'name': 'VEP_UNIPARC', 'col_name': 'UNIPARC', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 329},
{'name': 'VEP_GENE_PHENO', 'col_name': 'GENE_PHENO', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 330},
{'name': 'VEP_SIFT', 'col_name': 'SIFT', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 331},
{'name': 'VEP_PolyPhen', 'col_name': 'PolyPhen', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 332},
# {'name': 'VEP_DOMAINS', 'col_name': 'DOMAINS', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': VEP_DOMAINS_f, 'order': 333}, # This is problematic. FIXME
{'name': 'VEP_miRNA', 'col_name': 'miRNA', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 334},
{'name': 'VEP_HGVS_OFFSET', 'col_name': 'HGVS_OFFSET', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 335},
{'name': 'VEP_AF', 'col_name': 'AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 336},
{'name': 'VEP_AFR_AF', 'col_name': 'AFR_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 337},
{'name': 'VEP_AMR_AF', 'col_name': 'AMR_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 338},
{'name': 'VEP_EAS_AF', 'col_name': 'EAS_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 339},
{'name': 'VEP_EUR_AF', 'col_name': 'EUR_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 340},
{'name': 'VEP_SAS_AF', 'col_name': 'SAS_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 341},
{'name': 'VEP_AA_AF', 'col_name': 'AA_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 342},
{'name': 'VEP_EA_AF', 'col_name': 'EA_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 343},
{'name': 'VEP_gnomAD_AF', 'col_name': 'gnomAD_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 344},
{'name': 'VEP_gnomAD_AFR_AF', 'col_name': 'gnomAD_AFR_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 345},
{'name': 'VEP_gnomAD_AMR_AF', 'col_name': 'gnomAD_AMR_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 346},
{'name': 'VEP_gnomAD_ASJ_AF', 'col_name': 'gnomAD_ASJ_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 347},
{'name': 'VEP_gnomAD_EAS_AF', 'col_name': 'gnomAD_EAS_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 348},
{'name': 'VEP_gnomAD_FIN_AF', 'col_name': 'gnomAD_FIN_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 349},
{'name': 'VEP_gnomAD_NFE_AF', 'col_name': 'gnomAD_NFE_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 350},
{'name': 'VEP_gnomAD_OTH_AF', 'col_name': 'gnomAD_OTH_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 351},
{'name': 'VEP_gnomAD_SAS_AF', 'col_name': 'gnomAD_SAS_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 352},
{'name': 'VEP_MAX_AF', 'col_name': 'MAX_AF', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 353},
{'name': 'VEP_MAX_AF_POPS', 'col_name': 'MAX_AF_POPS', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 354},
{'name': 'VEP_CLIN_SIG', 'col_name': 'CLIN_SIG', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 355},
{'name': 'VEP_SOMATIC', 'col_name': 'SOMATIC', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 356},
{'name': 'VEP_PHENO', 'col_name': 'PHENO', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 357},
{'name': 'VEP_PUBMED', 'col_name': 'PUBMED', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 358},
{'name': 'VEP_MOTIF_NAME', 'col_name': 'MOTIF_NAME', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 359},
{'name': 'VEP_MOTIF_POS', 'col_name': 'MOTIF_POS', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 360},
{'name': 'VEP_HIGH_INF_POS', 'col_name': 'HIGH_INF_POS', 'type': 'CharField', 'parameters': {'max_length': '100', 'null': 'True'}, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : x.split('|'), 'order': 361},
{'name': 'VEP_MOTIF_SCORE_CHANGE', 'col_name': 'MOTIF_SCORE_CHANGE', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 10, 'database': 'multi_1', 'table': 'VEP_MULTI', 'l_multi': lambda x : [(None if y=='' else float(y)) for y in x.split('|')], 'order': 362},
# Custom Clinvar parser
{'name': 'CLINVARZAZZ_interpretation', 'col_name': 'ClinVar_ZAZZCLINVAR', 'type': 'CharField', 'parameters': {'max_length': '255', 'null': 'True'}, 'database': 'multi_1', 'table': 'ZAZZ_CLINVAR', 'l_multi': zazz_clinvarzazz_f('interpretation'), 'l_raw_multi': lambda x : '|'.join(map(str, x)), 'order': 401},
{'name': 'CLINVARZAZZ_scv_review_status', 'col_name': 'ClinVar_ZAZZCLINVAR', 'type': 'CharField', 'parameters': {'max_length': '255', 'null': 'True'}, 'database': 'multi_1', 'table': 'ZAZZ_CLINVAR', 'l_multi': zazz_clinvarzazz_f('scv_review_status'), 'l_raw_multi': lambda x : '|'.join(map(str, x)), 'order': 402},
{'name': 'CLINVARZAZZ_clinical_significance', 'col_name': 'ClinVar_ZAZZCLINVAR', 'type': 'CharField', 'parameters': {'max_length': '255', 'null': 'True'}, 'database': 'multi_1', 'table': 'ZAZZ_CLINVAR', 'l_multi': zazz_clinvarzazz_f('clinical_significance'), 'l_raw_multi': lambda x : '|'.join(map(str, x)), 'order': 403},
{'name': 'CLINVARZAZZ_condition_name', 'col_name': 'ClinVar_ZAZZCLINVAR', 'type': 'CharField', 'parameters': {'max_length': '255', 'null': 'True'}, 'database': 'multi_1', 'table': 'ZAZZ_CLINVAR', 'l_multi': zazz_clinvarzazz_f('condition_name'), 'l_raw_multi': lambda x : '|'.join(map(str, x)), 'order': 404},
{'name': 'CLINVARZAZZ_rcv_review_status', 'col_name': 'ClinVar_ZAZZCLINVAR', 'type': 'CharField', 'parameters': {'max_length': '255', 'null': 'True'}, 'database': 'multi_1', 'table': 'ZAZZ_CLINVAR', 'l_multi': zazz_clinvarzazz_f('rcv_review_status'), 'l_raw_multi': lambda x : '|'.join(map(str, x)), 'order': 405},
{'name': 'CLINVARZAZZ_review_status_stars', 'col_name': 'ClinVar_ZAZZCLINVAR', 'type': 'CharField', 'parameters': {'max_length': '255', 'null': 'True'}, 'database': 'multi_1', 'table': 'ZAZZ_CLINVAR', 'l_multi': zazz_clinvarzazz_f('review_status_stars'), 'l_raw_multi': lambda x : '|'.join(map(str, x)), 'order': 406},
]
print ('TOTAL FIELDS: {}'.format(len(fields)))
externals = []
create_table('Samples', fields, externals)
create_js(fields)
return fields
def import_annotated_vcf(filename, fields):
'''
'''
# Loading annotated VCF
print (f'Import annotated VCF: {filename}')
print ('Loading..')
df = | pd.read_excel(filename) | pandas.read_excel |
import re
import os
import sys
import datetime
import numpy as np
import pandas as pd
from pandas.tseries.offsets import BDay
import stock.utils.symbol_util
from stock.marketdata.storefactory import get_store
from stock.globalvar import *
from config import store_type
from stock.utils.calc_price import get_zt_price
def get_last_trading_date(today):
yest = today - BDay(1)
folder = TICK_DIR["daily"]
while True:
yest_str = yest.strftime("%Y-%m-%d")
filepath = os.path.join(folder, yest_str + ".csv")
if os.path.isfile(filepath):
break
yest = yest - BDay(1)
return yest
def get_industry():
df_industry = stock.utils.symbol_util.load_industry()
df_res = df_industry.groupby("exsymbol")["industry"].agg(industry=lambda x: ",".join(x))
return df_res
def get_concept():
df = stock.utils.symbol_util.load_concept()
df_res = df.groupby("exsymbol")["concept"].agg(concept=lambda x: ",".join(x))
return df_res
def filter_by_history(date, exsymbols):
store = get_store(store_type)
result = []
df = pd.DataFrame(columns=["increase60", "increase5", "future"])
for exsymbol in exsymbols:
if not store.has(exsymbol):
continue
df_stock = store.get(exsymbol)
df_past = df_stock.loc[:date].copy()
if len(df_past) == 0:
continue
close_min60 = np.min(df_past.iloc[-60:].close)
close_min5 = np.min(df_past.iloc[-5:].close)
increase60 = df_past.iloc[-1].close/close_min60-1
increase5 = df_past.iloc[-1].close/close_min5-1
future = df_stock.iloc[-1].close/df_past.iloc[-1].close-1
df.loc[exsymbol] = [increase60, increase5, future]
return df
def get_zhangting(today):
today_str = today.strftime("%Y-%m-%d")
yest = get_last_trading_date(today)
yest_str = yest.strftime("%Y-%m-%d")
df_today = stock.utils.symbol_util.get_realtime_by_date(today_str)
df_tick = stock.utils.symbol_util.get_tick_by_date(today_str)
df_tick.loc[:, "kaipan_money"] = df_tick["kaipan_money"]/1e8
df_today["opengap"] = df_today.apply(lambda x: x["close"] if x["open"] == 0.0 else x["open"], axis=1)/df_today.yest_close - 1
df_today["zt_price"] = df_today.apply(lambda x: get_zt_price(x.name[2:], x["yest_close"]), axis=1)
df_yest = stock.utils.symbol_util.get_realtime_by_date(yest_str)
df_yest["zt_price"] = df_yest.apply(lambda x: get_zt_price(x.name[2:], x["yest_close"]), axis=1)
df_yest.loc[:, "is_zhangting"] = np.absolute(df_yest["zt_price"]-df_yest["close"])<1e-8
df_yest_zt = df_yest[(df_yest.is_zhangting==True) & (df_yest.lt_mcap>0) & (df_yest.volume>0)].copy()
df_yest_zt.loc[:, "turnover"] = df_yest_zt["volume"]/(df_yest_zt["lt_mcap"]/df_yest_zt["close"]*1e6)
df_yest_zt.loc[:, "yest_fengdan"] = df_yest_zt["b1_v"] * df_yest_zt["b1_p"] *100 / df_yest_zt["lt_mcap"] / 1e8
df_yest_zt.loc[:, "yest_fengdan_money"] = df_yest_zt["b1_v"]*df_yest_zt["b1_p"]/1e6
df_yest_zt.loc[:, "yest_lt_mcap"] = df_yest_zt["lt_mcap"]
df_res = df_yest_zt[["yest_fengdan", "yest_fengdan_money", "yest_lt_mcap", "turnover"]].merge(df_today, how="inner", left_index=True, right_index=True)
df_res = df_res.merge(df_tick[["kaipan_money"]], how="left", left_index=True, right_index=True)
df_tick_yest = stock.utils.symbol_util.get_tick_by_date(yest_str)
df_res = df_res.merge(df_tick_yest[["zhangting_sell", "zhangting_min"]], how="inner", left_index=True, right_index=True)
df_hist = filter_by_history(yest_str, df_res.index)
df_res = df_res.merge(df_hist, how="inner", left_index=True, right_index=True)
df_industry = get_industry()
df_concept = get_concept()
df_res = df_res.merge(df_industry, how="left", left_index=True, right_index=True)
df_res = df_res.merge(df_concept, how="left", left_index=True, right_index=True)
df_res = df_res # & (df_res.lt_mcap<100)]# & (df_res.zhangting_min>100)]
columns = ["opengap", "yest_fengdan_money", "zhangting_sell", "kaipan_money", "lt_mcap", "industry"]
print("========================== zhangting ==========================")
print(df_res[columns].sort_values("zhangting_sell", ascending=True))
def get_zhangting_pause(today):
today_str = today.strftime("%Y-%m-%d")
yest = get_last_trading_date(today)
yest_str = yest.strftime("%Y-%m-%d")
yest2 = get_last_trading_date(yest)
yest2_str = yest2.strftime("%Y-%m-%d")
df_yest = stock.utils.symbol_util.get_realtime_by_date(yest_str)
df_yest["zt_price"] = df_yest.apply(lambda x: get_zt_price(x.name[2:], x["yest_close"]), axis=1)
df_yest.loc[:, "is_zhangting"] = np.absolute(df_yest["zt_price"]-df_yest["close"])<1e-8
df_yest_nozt = df_yest[(df_yest.is_zhangting==False) & (df_yest.lt_mcap>0) & (df_yest.volume>0)].copy()
df_yest_nozt.loc[:, "yest_chg"] = df_yest_nozt.chgperc
df_yest2 = stock.utils.symbol_util.get_realtime_by_date(yest2_str)
df_yest2["zt_price"] = df_yest2.apply(lambda x: get_zt_price(x.name[2:], x["yest_close"]), axis=1)
df_yest2.loc[:, "is_zhangting"] = np.absolute(df_yest2["zt_price"]-df_yest2["close"])<1e-8
df_yest2_zt = df_yest2[(df_yest2.is_zhangting==True) & (df_yest2.lt_mcap>0) & (df_yest2.volume>0)].copy()
df_yest2_zt.loc[:, "yest2_chg"] = df_yest2_zt.chgperc
df_today = stock.utils.symbol_util.get_realtime_by_date(today_str)
df_today.loc[:, "opengap"] = df_today.apply(lambda x: x["close"] if x["open"] == 0.0 else x["open"], axis=1)/df_today.yest_close - 1
df_tick = stock.utils.symbol_util.get_tick_by_date(today_str)
df_tick.loc[:, "kaipan_money"] = df_tick["kaipan_money"]/1e8
df_res = df_today.merge(df_yest_nozt[["yest_chg"]], how="inner", left_index=True, right_index=True)
df_res = df_res.merge(df_yest2_zt[["yest2_chg"]], how="inner", left_index=True, right_index=True)
df_res = df_res.merge(df_tick[["kaipan_money"]], left_index=True, right_index=True)
df_res.loc[:, "fengdan"] = df_res["b1_v"] * df_res["b1_p"] *100 / df_res["lt_mcap"] / 1e8
df_res.loc[:, "fengdan_money"] = df_res["b1_v"]*df_res["b1_p"]/1e6
df_industry = get_industry()
df_res = df_res.merge(df_industry, how="left", left_index=True, right_index=True)
columns = ["opengap", "fengdan_money", "kaipan_money", "industry"]
print("========================== zhangting pause ==========================")
print(df_res[columns].sort_values("kaipan_money", ascending=True))
def get_yizi(today):
today_str = today.strftime("%Y-%m-%d")
df_today = stock.utils.symbol_util.get_realtime_by_date(today_str)
df_tick = stock.utils.symbol_util.get_tick_by_date(today_str)
df_tick.loc[:, "kaipan_money"] = df_tick["kaipan_money"]/1e8
df_today["opengap"] = df_today.apply(lambda x: x["close"] if x["open"] == 0.0 else x["open"], axis=1)/df_today.yest_close - 1
df_today["zt_price"] = df_today.apply(lambda x: get_zt_price(x.name[2:], x["yest_close"]), axis=1)
df_today["is_yizi"] = np.absolute(df_today["zt_price"]-df_today["open"])<1e-8
df_today["fengdan_money"] = df_today.apply(lambda x: x["b1_v"] * x["b1_p"]/1e6 if x["is_yizi"] else 0, axis=1)
df_yizi = df_today[(df_today.is_yizi==True) & (df_today.lt_mcap>0)].copy()
df_res = df_yizi.merge(df_tick[["kaipan_money"]], left_index=True, right_index=True)
df_industry = get_industry()
df_res = df_res.merge(df_industry, how="left", left_index=True, right_index=True)
columns = ["opengap", "fengdan_money", "kaipan_money", "industry"]
print("========================== yizi ==========================")
print(df_res[columns].sort_values("fengdan_money", ascending=True))
def get_open_up(today):
today_str = today.strftime("%Y-%m-%d")
yest = get_last_trading_date(today)
yest_str = yest.strftime("%Y-%m-%d")
df_today = stock.utils.symbol_util.get_realtime_by_date(today_str)
df_tick = stock.utils.symbol_util.get_tick_by_date(today_str)
df_tick.loc[:, "kaipan_volume"] = df_tick["kaipan_money"] / df_tick["kaipan_price"]
df_today.loc[:, "opengap"] = df_today.apply(lambda x: x["close"] if x["open"] == 0.0 else x["open"], axis=1)/df_today.yest_close - 1
df_today.loc[:, "zt_price"] = df_today.apply(lambda x: get_zt_price(x.name[2:], x["yest_close"]), axis=1)
df_today.loc[:, "opengap"] = df_today.apply(lambda x: x["close"] if x["open"] == 0.0 else x["open"], axis=1)/df_today.yest_close - 1
df_today.loc[:, "is_zhangting"] = np.absolute(df_today["zt_price"]-df_today["open"])<1e-8
df_today_nozt = df_today[df_today.is_zhangting==False]
df_yest = stock.utils.symbol_util.get_realtime_by_date(yest_str)
df_yest.loc[:, "zt_price"] = df_yest.apply(lambda x: get_zt_price(x.name[2:], x["yest_close"]), axis=1)
df_yest.loc[:, "is_zhangting"] = np.absolute(df_yest["zt_price"]-df_yest["close"])<1e-8
df_yest_nozt = df_yest[(df_yest.is_zhangting==False) & (df_yest.lt_mcap>0) & (df_yest.volume>0)].copy()
df_res = df_tick[["kaipan_volume", "kaipan_money"]].merge(df_today_nozt[["opengap", "volume", "chgperc"]], how="inner", left_index=True, right_index=True)
df_res = df_res.merge(df_yest_nozt[["volume", "chgperc"]], how="inner", left_index=True, right_index=True, suffixes=('', '_yest'))
df_res.loc[:, "volumeperc"] = df_res["kaipan_volume"] / df_res["volume_yest"] / 100
df_industry = get_industry()
df_res = df_res.merge(df_industry, how="left", left_index=True, right_index=True)
df_res.loc[:, "chgperc"] = df_res["chgperc"] / 100
df_res.loc[:, "chgperc_yest"] = df_res["chgperc_yest"] / 100
df_res = df_res[(df_res.opengap > 0.03) & (df_res.volumeperc > 0.03) & (df_res.chgperc_yest < 0.03)]
columns = ["opengap", "volumeperc", "kaipan_money", "industry", "chgperc_yest"]
print("========================== openup ==========================")
print(df_res[columns].sort_values("kaipan_money", ascending=False))
if __name__ == "__main__":
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
today = None
if len(sys.argv) == 1:
today = pd.datetime.today()
else:
today = | pd.datetime.strptime(sys.argv[1], "%Y-%m-%d") | pandas.datetime.strptime |
#Fuel Consumption Rate Functions for PuMA
#By <NAME>
import pandas as pd
def gallons_consumed_per_month(df):
gpm = df.groupby(df.index.to_period('M')).agg({'gallons': 'sum'})
return gpm
def gallonsPerHour(fuelConsumption):
'''
calculates gallons consumed for each hour monitored
:param fuelConsumption: A pandas Series with datetime index
:return: dataframe with 1 column containing gallons per hour with datetime index
'''
return fuelConsumption.groupby(pd.Grouper(freq='H')).sum(min_count = 1)
def gallons_consumed_per_area(df,galColumn,area):
'''calculates total gallons consumed per area
:param: df is a pandas dataframe including a column representing gallons
:param: galColumn is a string name of the column containing gallons
:param: area float or int representing the area
:return: float value representing the sum of gallons divided by area'''
return df[galColumn].sum()/area
def gallons_per_hour_per_month_year(fuelConsomption):
''':param: fuelConsumption pandas series with datetime index and numeric data'''
grouped = fuelConsomption.groupby(pd.Grouper(freq="M"))
gph_my = pd.concat([gallonsPerHour(grouped)])
def gallons_per_day_and_per_month(df, galColumn):
'''gallons consumed per day per month
:param: df is a pandas dataframe including a column representing gallons and a datetime index
:param: galColumn is a string name of the column containing gallons
:return: pandas.Series of total gallons consumed each data and pandas.series of total gallons per month
'''
gpd = df[galColumn].groupby(pd.Grouper(freq='D')).sum(min_count=10) #days with fiewer than 10 fuel click records will by Nan
gpm = gpd.groupby( | pd.Grouper(freq='M') | pandas.Grouper |
# -*- coding: utf-8 -*-
from datetime import timedelta
import numpy as np
import pandas as pd
battery_names = ["bl", "vb", "bv"]
def twdb_dot(df_row, dual_well=False, drop_dcp_metadata=True):
"""Parser for twdb DOT dataloggers."""
return _twdb_stevens_or_dot(
df_row, reverse=False, dual_well=dual_well, drop_dcp_metadata=drop_dcp_metadata
)
def twdb_fts(df_row, drop_dcp_metadata=True, dual_well=False):
"""Parser for twdb fts dataloggers
format examples:
C510D20018036133614G39-0NN170WXW00097 :WL 31 #60 -72.91 -72.89 -72.89 -72.89 -72.91 -72.92 -72.93 -72.96 -72.99 -72.97 -72.95 -72.95
"""
message = df_row["dcp_message"].lower()
message_timestamp = df_row["message_timestamp_utc"]
if _invalid_message_check(message):
return _empty_df(message_timestamp)
data = []
for line in message.strip('"').split(":"):
try:
channel = line[:2]
channel_data = [float(val.strip("+-")) for val in line[10:].split()]
df = _twdb_assemble_dataframe(
message_timestamp, channel, channel_data, reverse=False
)
data.append(df)
except Exception as e:
print(
"Warning: Could not parse values for channel {}: {}".format(channel, e)
)
df = pd.concat(data)
if not drop_dcp_metadata:
for col in df_row.index:
df[col] = df_row[col]
return df
def twdb_stevens(df_row, dual_well=False, drop_dcp_metadata=True):
"""Parser for twdb stevens dataloggers."""
return _twdb_stevens_or_dot(
df_row, reverse=True, dual_well=dual_well, drop_dcp_metadata=drop_dcp_metadata
)
def twdb_sutron(df_row, drop_dcp_metadata=True, dual_well=False):
"""Parser for twdb sutron dataloggers.
Data is transmitted every 12 hours and each message contains 12 water level
measurements on the hour for the previous 12 hours and one battery voltage
measurement for the current hour
format examples:
'":ott 60 #60 -190.56 -190.66 -190.69 -190.71 -190.74 -190.73 -190.71 -190.71 -190.71 -190.71 -190.72 -190.72 :BL 13.05 '
'":SENSE01 60 #60 -82.19 -82.19 -82.18 -82.19 -82.19 -82.22 -82.24 -82.26 -82.27 -82.28 -82.28 -82.26 :BL 12.41 '
'":OTT 703 60 #60 -231.47 -231.45 -231.44 -231.45 -231.47 -231.50 -231.51 -231.55 -231.56 -231.57 -231.55 -231.53 :6910704 60 #60 -261.85 -261.83 -261.81 -261.80 -261.81 -261.83 -261.85 -261.87 -261.89 -261.88 -261.86 -261.83 :BL 13.21'
'":Sense01 10 #10 -44.70 -44.68 -44.66 -44.65 -44.63 -44.61 -44.60 -44.57 -44.56 -44.54 -44.52 -44.50 :BL 13.29'
'"\r\n-101.11 \r\n-101.10 \r\n-101.09 \r\n-101.09 \r\n-101.08 \r\n-101.08 \r\n-101.08 \r\n-101.10 \r\n-101.11 \r\n-101.09 \r\n-101.09 \r\n-101.08'
'"\r\n// \r\n// \r\n// \r\n// \r\n// \r\n-199.88 \r\n-199.92 \r\n-199.96 \r\n-199.98 \r\n-200.05 \r\n-200.09 \r\n-200.15'
'":Sense01 60 #60 M M M M M M M M M M M M :BL 12.65'
"""
message = df_row["dcp_message"].strip().lower()
message_timestamp = df_row["message_timestamp_utc"]
if _invalid_message_check(message):
return _empty_df(message_timestamp)
lines = message.strip('":').split(":")
data = []
for line in lines:
lsplit = line.split(" ")
channel = lsplit[0]
if channel.lower() in battery_names:
try:
channel_data = [field.strip('+-" ') for field in lsplit]
df = _twdb_assemble_dataframe(
message_timestamp, channel, channel_data, reverse=False
)
data.append(df)
except Exception as e:
print(
"Warning: Could not parse values for channel {}: {}".format(
channel, e
)
)
else:
try:
channel_data = [field.strip('+-" ') for field in lsplit[3:]]
df = _twdb_assemble_dataframe(
message_timestamp, channel, channel_data, reverse=False
)
data.append(df)
except Exception as e:
print(
"Warning: Could not parse values for channel {}: {}".format(
channel, e
)
)
df = pd.concat(data)
if not drop_dcp_metadata:
for col in df_row.index:
df[col] = df_row[col]
return df
def twdb_texuni(dataframe, drop_dcp_metadata=True, dual_well=False):
"""Parser for twdb texuni dataloggers.
Data is transmitted every 12 hours and each message contains 12 water level
measurements on the hour for the previous 12 hours
format examples:
'"\r\n+0.000,-245.3,\r\n+0.000,-245.3,\r\n+0.000,-245.3,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.4,\r\n+0.000,-245.5,\r\n+0.000,-245.5,\r\n+0.000,-245.6,\r\n+0.000,-245.6,\r\n+0.000,-245.6,\r\n+0.000,-245.6,\r\n+0.000,-245.6,\r\n+0.000,-245.6,\r\n+412.0,+2013.,+307.0,+1300.,+12.75,+0.000,-245.4,-245.3,-245.6,+29.55,'
' \r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-110.0,\r\n+0.000,-110.0,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-110.0,\r\n+0.000,-110.0,\r\n+0.000,-110.0,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+340.0,+2013.,+307.0,+1400.,+12.07,+0.000,-109.9,-109.8,-110.1,+30.57,'
"""
message = dataframe["dcp_message"]
message_timestamp = dataframe["message_timestamp_utc"]
channel = "wl"
channel_data = [
row.split(",")[1].strip("+- ")
for row in message.strip('" \r\n').splitlines()[:-1]
]
df = _twdb_assemble_dataframe(
message_timestamp, channel, channel_data, reverse=True
)
if not drop_dcp_metadata:
for col in dataframe.index:
df[col] = dataframe[col]
return df
def _twdb_assemble_dataframe(message_timestamp, channel, channel_data, reverse=False):
data = []
base_timestamp = message_timestamp.replace(minute=0, second=0, microsecond=0)
if reverse:
data.reverse()
for hrs, value in enumerate(channel_data):
timestamp = base_timestamp - timedelta(hours=hrs)
try:
value = float(value)
except Exception:
value = np.nan
data.append([timestamp, channel, value])
if len(data) > 0:
df = pd.DataFrame(data, columns=["timestamp_utc", "channel", "channel_data"])
df.index = pd.to_datetime(df["timestamp_utc"])
del df["timestamp_utc"]
return df
else:
return pd.DataFrame()
def _twdb_stevens_or_dot(df_row, reverse, dual_well=False, drop_dcp_metadata=True):
"""Parser for twdb stevens and DOT dataloggers - the only difference being
that with stevens dataloggers, water level data needs to be reversed to be
correctly interpretted.
Data is transmitted every 12 hours and each message contains 12 water level
measurements on the hour for the previous 12 hours and one battery voltage
measurement for the current hour
format examples:
'"BV:12.5 451.70$ 451.66$ 451.66$ 451.62$ 451.59$ 451.57$ 451.54$ 451.53$ 451.52$ 451.52$ 451.52$ 451.52$ '
'"BV:12.2 Channel:5 Time:43 +441.48 +443.25 +440.23 +440.67 +441.26 +441.85 +442.66 +443.84 +445.24 +442.15 +442.88 +443.91 '
'"BV:12.6 Channel:5 Time:28 +304.63 +304.63 +304.63 +304.56 +304.63 +304.63 +304.63 +304.63 +304.63 +304.63 +304.63 +304.71 Channel:6 Time:28 +310.51 +310.66 +310.59 +310.51 +310.51 +310.59 +310.59 +310.51 +310.66 +310.51 +310.66 +310.59 '
"""
message = df_row["dcp_message"].strip().lower()
message_timestamp = df_row["message_timestamp_utc"]
if _invalid_message_check(message):
return _empty_df(message_timestamp)
data = []
if dual_well:
fields = message.strip('" \x10\x00').split("\r")
channel_data = {}
channel_data["bv"] = [fields[0].split(":")[1].split()[0]]
for field in fields[1:]:
df = pd.DataFrame()
try:
channel, channel_datum = field.strip("\n").split(": ")
if channel in channel_data:
channel_data[channel].append(channel_datum.strip("+-"))
else:
channel_data[channel] = [channel_datum.strip("+-")]
except Exception:
pass
for channel in channel_data:
df = _twdb_assemble_dataframe(
message_timestamp, channel, channel_data[channel], reverse=reverse
)
data.append(df)
df = | pd.concat(data) | pandas.concat |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import pandas as pd
def main():
list_x = [5, 7, 5, 3]
list_y = [7, 5, 3, 5]
list_label = ["north", "east", "south", "west"]
list_edge = ["north", "east", "south", "west"]
data = {
'list_x': list_x,
'list_y': list_y,
'list_label': list_label,
'list_edge': list_edge,
}
df = | pd.DataFrame(data) | pandas.DataFrame |
'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import numpy as np
import pandas as pd
import csv
from keras.layers import ELU
np.random.seed(1337) # for reproducibility
from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
### Configuration ###############
use_onehot = True
use_dropout = True
use_ELU = False
reuse_model_with_weight = False
batch_size = 128
nb_classes = 5
nb_epoch = 100
###################
def ageofyear(x):
try:
y = x.split()
except:
return None
if len(y) <= 1:
return 0
elif 'year' in y[1]:
return int(y[0]) * 365
elif 'month' in y[1]:
return int(y[0]) * (365 / 12)
elif 'week' in y[1]:
return int(y[0]) * 7# +100 to avoid being zero
elif 'day' in y[1]:
return int(y[0]) # +500 to avoid being zero
def split_slash(x):
if ('Mix' in x):
return 'Mix'
elif ('/' in x):
return x.split('/')[1]
def arrange_test_data(path, begin, end):
print('Loading testing data...')
data = pd.read_csv(path)
result = pd.DataFrame()
if end > 0:
data = data.drop(data.index[end:]) ## Remain 100 rows just for debugging
if begin > 0:
data = data.drop(data.index[:begin]) ## Remain 100 rows just for debugging
data.fillna('', inplace=True)
######## TODO: Adjust column ####################
if (True):
data['IsMix'] = data['Breed'].str.contains('mix', case=False).astype(int)
data['Breed2'] = data['Breed'].map(split_slash).fillna(value=0)
data['Breed'] = data['Breed'].map(
lambda x: ((x.split(' Mix')[0]).split('/')[0])
)
data['IsTabby'] = data['Color'].str.contains('Tabby', case=False).astype(int)
data['Color'] = data['Color'].map(
lambda x: (x.replace(' ', ''))
)
data['Color'] = data['Color'].map(
lambda x: (x.replace('Tabby', ''))
)
data['Color2'] = data['Color'].map(split_slash).fillna(value=0)
data['Color'] = data['Color'].map(
lambda x: (x.split('/')[0])
)
#data['MixColor'] = data['Color'].str.contains('/', case=False).fillna(value=0).astype(int)
#data['Black'] = data['Color'].str.contains('Black', case=False).fillna(value=0).astype(int)
#data['Red'] = data['Color'].str.contains('Red', case=False).fillna(value=0).astype(int)
#data['Brown'] = data['Color'].str.contains('Brown', case=False).fillna(value=0).astype(int)
data['IsNamed'] = data['Name'].map(
lambda x: (len(x) > 0)
)
data['AgeInDays'] = data['AgeuponOutcome'].map(ageofyear).fillna(value=0)
if (True):
data['Month'] = data['DateTime'].map(
lambda x: pd.tslib.Timestamp(x).month
).fillna(value=0)
#data['Year'] = data['DateTime'].map(
# lambda x: pd.tslib.Timestamp(x).year
#).fillna(value=0)
data['Day'] = data['DateTime'].map(
lambda x: pd.tslib.Timestamp(x).dayofyear + ((pd.tslib.Timestamp(x).year - 2010) * 365)
).fillna(value=0)
data['Hour'] = data['DateTime'].map(
lambda x: pd.tslib.Timestamp(x).hour
).fillna(value=0)
data['Weekday'] = data['DateTime'].map(
lambda x: pd.tslib.Timestamp(x).dayofweek
).fillna(value=0)
target_to_remove= ['ID']
target_to_remove.append('Name')
target_to_remove.append('DateTime')
#target_to_remove.append('AnimalType')
#target_to_remove.append('SexuponOutcome')
target_to_remove.append('AgeuponOutcome')
#target_to_remove.append('Breed')
#target_to_remove.append('Color')
############################
if 'OutcomeType' in data.columns:
result = data['OutcomeType'].copy()
target_to_remove.append('OutcomeType')
target_to_remove.append('OutcomeSubtype')
if 'AnimalID' in data.columns:
target_to_remove.remove('ID')
target_to_remove.append('AnimalID')
arranged_test_data = data.drop(target_to_remove, axis=1)
arranged_test_data = arranged_test_data.reset_index(drop=True)
result = result.reset_index(drop=True)
#print(arranged_test_data)
#print(result)
return arranged_test_data, result
def generateDict(dictionary, table):
if not dictionary:
dictionary = dict()
for idx, row in table.iterrows():
for columnHeader, col in row.iteritems():
if (None == dictionary.get(columnHeader, None)):
dictionary[columnHeader] = dict()
if (None == dictionary[columnHeader].get(col, None)):
dictionary[columnHeader][col] = len(dictionary[columnHeader]) # 0, 1, 2,,,
print('--------------')
for col in dictionary:
print(col, " : ", len(dictionary[col]))
print('--------------')
return dictionary
def map_to_float(dictionary, table):
for idx, row in table.iterrows():
for columnHeader, col in row.iteritems():
table.set_value(idx, columnHeader, dictionary[columnHeader][col])
for columnHeader in table.columns:
table[columnHeader] = table[columnHeader].astype(float)
for idx, row in table.iterrows():
for columnHeader, col in row.iteritems():
table.set_value(idx, columnHeader, col / len(dictionary[columnHeader]))
return np.array(table, np.float32)
def map_to_integer(dictionary, table):
for idx, row in table.iterrows():
for columnHeader, col in row.iteritems():
table.set_value(idx, columnHeader, dictionary[columnHeader][col])#table[columnHeader][idx] = dictionary[columnHeader][col]
return table
def map_to_integer_for_outcometype(table):
dic = dict()
for idx, val in table.iteritems():
if None == dic.get(val, None):
dic[val] = len(dic)
table.set_value(idx, dic[val])
return table
def to_categorical_2d(dictionary, table): ## We need dictionary to get max value of whole dataset (not only trainset but testset)
## 1. Find max of each column
total_nb_classes = 0
for idx, col in enumerate(dictionary):
total_nb_classes += len(dictionary[col])
#print('idx: ', idx, 'col: ', col, 'max: ',len(dictionary[col]), 'tot_max:', total_nb_classes)
## 2. generate np.zeros(len(table), sum(max))
Y = np.zeros((len(table), total_nb_classes))
print('table.shape: ', table.shape,' ---> NewTable.shape: ',Y.shape,', len(dictionary): ', len(dictionary))
## 3. For all rows
for idx, row in table.iterrows():
## 4.for all column
new_col = 0
for columnHeader, col in row.iteritems():
## 5.Insert data into new np array
#print('row:', row, 'col: ', col, 'y[]: ', y[row, col], 'new_col: ', new_col)
Y[idx, col + new_col] = 1.
new_col += len(dictionary[columnHeader])
return Y
X_train, Y_train = arrange_test_data('../input/train.csv', 0, 0)
X_test, Y_test = arrange_test_data('../input/test.csv', 0, 0) ## dummy for dictionary generation
X_total = | pd.concat([X_train, X_test]) | pandas.concat |
"""Personal Challenge_Draft.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1-25-B3CO6yVCH9u2vgbhIjyyFeU3tJ3w
"""
# Working environment set up
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import CountVectorizer
import string
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import seaborn as sns
from nltk.corpus import wordnet
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.ensemble import RandomForestClassifier
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
def load_data():
'''
This function will separately return the features and response variable for the input data
'''
data = pd.read_csv('data.csv')
X = data['Lyric']
y = data['Genre']
return X, y
# Use pos_tag to get the type of the world and then map the tag to the format wordnet lemmatizer would accept.
def get_wordnet_pos(word):
"""Map POS tag to first character lemmatize() accepts"""
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
def transform_data():
'''
This function will transform the features and will reuturn the countvectorized features.
Steps are:
1. Remove punctuations
2. Tokenize
3. Lemmatization
4. Remove stop words
5. CountVectorize
'''
X, y = load_data()
X = X.apply(lambda x: x.translate(str.maketrans('', '', string.punctuation))) # To remove the punctuations
X_Tokenize = X.apply(lambda x: word_tokenize(x)) # To tokenize
lemmatizer = WordNetLemmatizer()
X_lemmatize = X_Tokenize.apply(lambda x: ' '.join([lemmatizer.lemmatize(w, pos='v') for w in x]))
stop_words = set(stopwords.words('english'))
stop_words_more = ('10', '100', '20', '2x', '3x', '4x', '50', 'im') # Add more stop words
stop_words = stop_words.add(x for x in stop_words_more)
CountVect = CountVectorizer(stop_words=stop_words, min_df=300, lowercase=True, ngram_range=(1, 1))
Transformmed_array = CountVect.fit_transform(X_lemmatize)
X_vectorized = pd.DataFrame(Transformmed_array.toarray(), columns=CountVect.get_feature_names())
return X_vectorized, y
def EDA_visualize(X, y, N):
'''
:para X: X is the features to be trained
:para y: y is the Gnere classification to be trained
:para N: nlargest frequencied words for each type of Genre
:return: 1. Barplot to visulize the counts for each type of y 2. Return the n largest frequencies words for each type of y
'''
sns.catplot(x='Genre', kind='count', data=pd.DataFrame(y[:50000]))
DF_Combine = pd.concat([X, y], axis=1)
DF_nlargest = pd.DataFrame(np.ones((3, 1)), columns=['exm'], index=['Hip Hop', 'Pop', 'Rock']) # Initilnize
for value in DF_Combine.columns[:-1]:
DF_nlargest[value] = pd.DataFrame(DF_Combine.groupby('Genre')[value].sum())
print(DF_nlargest.apply(lambda s, n: s.nlargest(n).index, axis=1, n=N))
# X_temp, y_temp = transform_data()
def TuneParameter_visulize(X_train, y_train, X_hold, y_hold):
'''
It will return severl plots aims to tune paramters.
parameters are:
1. max_depth
2. n_estimators
3. max_features...
Todo: plotting more parameters
'''
# Tune max_depth
max_depths = np.linspace(10, 200, 15, endpoint=True)
train_results = []
validation_results = []
for depth in max_depths:
rf = RandomForestClassifier(max_depth=depth, n_jobs=-1)
rf.fit(X_train, y_train)
train_results.append(accuracy_score(y_train, rf.predict(X_train)))
validation_results.append(accuracy_score(y_hold, rf.predict(X_hold)))
line1 = plt.plot(max_depths, train_results, 'b', label='Train accuracy')
line2 = plt.plot(max_depths, validation_results, 'r', label='Estimated accuracy')
plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})
plt.ylabel('accuracy score')
plt.xlabel('Tree depth')
plt.show()
def main():
'''
It will return:
1. EDA visulization
2. Visulize parameter tuning process
3. Series include Expected accuracy
4. Series include the predicted y_test
'''
# Load data
X_input, y_input = transform_data()
# Train, holdset, test split
y_test = pd.DataFrame(y_input[-5000:], columns=['Genre'])
y_train = pd.DataFrame(y_input[:50000], columns=['Genre'])
X_train = | pd.DataFrame(X_input.iloc[:50000, :], columns=X_input.columns) | pandas.DataFrame |
import pandas as pd
import os
import numpy as np
import gc
import copy
import datetime
import warnings
from tqdm import tqdm
from scipy import sparse
from numpy import array
from scipy.sparse import csr_matrix
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfTransformer
###############################################
#########数据加载
###############################################
user_app = pd.read_csv('../../data/processed_data/user_app.csv', dtype={'uId':np.int32, 'appId':str})
app_info = pd.read_csv('../../data/processed_data/app_info.csv', dtype={'appId':str, 'category':int})
###############################################
########## 压缩函数
###############################################
# 对数据进行类型压缩,节约内存
from tqdm import tqdm_notebook
class _Data_Preprocess:
def __init__(self):
self.int8_max = np.iinfo(np.int8).max
self.int8_min = np.iinfo(np.int8).min
self.int16_max = np.iinfo(np.int16).max
self.int16_min = np.iinfo(np.int16).min
self.int32_max = np.iinfo(np.int32).max
self.int32_min = np.iinfo(np.int32).min
self.int64_max = np.iinfo(np.int64).max
self.int64_min = np.iinfo(np.int64).min
self.float16_max = np.finfo(np.float16).max
self.float16_min = np.finfo(np.float16).min
self.float32_max = np.finfo(np.float32).max
self.float32_min = np.finfo(np.float32).min
self.float64_max = np.finfo(np.float64).max
self.float64_min = np.finfo(np.float64).min
'''
function: _get_type(self,min_val, max_val, types)
get the correct types that our columns can trans to
'''
def _get_type(self, min_val, max_val, types):
if types == 'int':
if max_val <= self.int8_max and min_val >= self.int8_min:
return np.int8
elif max_val <= self.int16_max <= max_val and min_val >= self.int16_min:
return np.int16
elif max_val <= self.int32_max and min_val >= self.int32_min:
return np.int32
return None
elif types == 'float':
if max_val <= self.float16_max and min_val >= self.float16_min:
return np.float16
if max_val <= self.float32_max and min_val >= self.float32_min:
return np.float32
if max_val <= self.float64_max and min_val >= self.float64_min:
return np.float64
return None
'''
function: _memory_process(self,df)
column data types trans, to save more memory
'''
def _memory_process(self, df):
init_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('Original data occupies {} GB memory.'.format(init_memory))
df_cols = df.columns
for col in tqdm_notebook(df_cols):
try:
if 'float' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'float')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
elif 'int' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'int')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
except:
print(' Can not do any process for column, {}.'.format(col))
afterprocess_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('After processing, the data occupies {} GB memory.'.format(afterprocess_memory))
return df
memory_preprocess = _Data_Preprocess()
# 用法:
# baseSet=memory_preprocess._memory_process(baseSet)
###############################################
########## 统计用户安装app的数量和占比
###############################################
app_counts = user_app[['appId']].drop_duplicates().count()
userSub = user_app.groupby('uId')['appId'].nunique().reset_index().rename(columns={'appId': 'user_app_active_counts'})
userSub['user_app_active_ratio'] = userSub['user_app_active_counts'].apply(lambda x: x/app_counts)
del app_counts
user_app_active_counts = userSub.copy()
###############################################
########统计用户每个年龄段安装的app
###############################################
age_train = pd.read_csv('../../data/processed_data/age_train.csv',dtype={'uId':np.int32, 'age_group':np.int8})
userSub = pd.merge(age_train, user_app, how='left', on='uId')
userSub=pd.pivot_table(userSub, values='uId', index=['appId'],columns=['age_group'],aggfunc='count', fill_value=0)
userSub['sum']=userSub.sum(axis=1)
userSub= userSub.reset_index()
userSub.rename(columns={1:'age_1',2:'age_2',3:'age_3',4:'age_4',5:'age_5',6:'age_6'},inplace=True)
userSub.drop(axis=0, index=0, inplace=True)
userSub['age1_%']= userSub.apply(lambda x: round(x['age_1']/x['sum'],2),axis=1)
userSub['age2_%']= userSub.apply(lambda x: round(x['age_2']/x['sum'],2),axis=1)
userSub['age3_%']= userSub.apply(lambda x: round(x['age_3']/x['sum'],2),axis=1)
userSub['age4_%']= userSub.apply(lambda x: round(x['age_4']/x['sum'],2),axis=1)
userSub['age5_%']= userSub.apply(lambda x: round(x['age_5']/x['sum'],2),axis=1)
userSub['age6_%']= userSub.apply(lambda x: round(x['age_6']/x['sum'],2),axis=1)
age1 = userSub[(userSub['age1_%'] >= 0.3)][['appId']].copy()
age1['age_num1'] = 1
age2 = userSub[(userSub['age2_%'] >= 0.6)][['appId']].copy()
age2['age_num2'] = 1
age3 = userSub[(userSub['age3_%'] >= 0.6)][['appId']].copy()
age3['age_num3'] = 1
age4 = userSub[(userSub['age4_%'] >= 0.6)][['appId']].copy()
age4['age_num4'] = 1
age5 = userSub[(userSub['age5_%'] >= 0.3)][['appId']].copy()
age5['age_num5'] = 1
age6 = userSub[(userSub['age6_%'] >= 0.3)][['appId']].copy()
age6['age_num6'] = 1
userSub = pd.merge(user_app, age1, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age2, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age3, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age4, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age5, how='left', on='appId').fillna(0)
userSub = pd.merge(userSub, age6, how='left', on='appId').fillna(0)
userSub = userSub.groupby('uId').sum().reset_index()
user_active_app_age = userSub.copy()
###############################################
########## 用户安装各app类型的数量
###############################################
userSub = pd.merge(user_app, app_info, how='left', on='appId').fillna(method='pad')
userSub = pd.pivot_table(userSub, values='appId', index=['uId'],columns=['category'], aggfunc='count', fill_value=0).reset_index()
userSub['use_app_cate_nums']=0
for i in range(25):
userSub['use_app_cate_nums']+=userSub[float(i)]
for i in range(26,30):
userSub['use_app_cate_nums']+=userSub[float(i)]
for i in range(34,36):
userSub['use_app_cate_nums']+=userSub[float(i)]
for i in range(25):
userSub[str(float(i))+ '_ratio']=userSub[float(i)]/userSub['use_app_cate_nums']
for i in range(26,30):
userSub[str(float(i))+ '_ratio']=userSub[float(i)]/userSub['use_app_cate_nums']
for i in range(34,36):
userSub[str(float(i))+ '_ratio']=userSub[float(i)]/userSub['use_app_cate_nums']
user_active_category_counts = userSub.copy()
###############################################
########## 用户安装了多少种app类型
###############################################
userSub = pd.merge(user_app, app_info, how='left', on='appId').fillna(method='pad')
userSub = userSub[['uId', 'category']].groupby('uId')['category'].nunique().reset_index()
userSub.rename(columns={'category': 'active_cate_nums'}, inplace=True)
user_active_cate_nums = userSub.copy()
###############################################
########## 计算每个app的目标客户年龄指数
###############################################
age_train = pd.read_csv('../../data/processed_data/age_train.csv',dtype={'uId':np.int32, 'age_group':np.int8})
userSub = | pd.merge(age_train, user_app, how='left', on='uId') | pandas.merge |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns # used for plot interactive graph. I like it most for plot
from sklearn.model_selection import train_test_split # to split the data into two parts
from sklearn import preprocessing
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from keras.models import Model, Sequential
from keras.layers import Dense, Input, Activation
from keras.models import model_from_json
from keras import optimizers
from keras import backend as K
from keras.utils import plot_model
class ParkerAnalysis:
def __init__(self):
pass
def run(self):
pdata = pd.read_csv("./data/parker_sleeping.csv", header=0)
#pdata = pd.read_csv("./data/data.csv")
pdata.drop("id", axis=1, inplace=True)
pdata.drop("Counter", axis=1, inplace=True)
print(pdata)
data_cols = list(pdata.columns[1:8])
print(pdata.head())
print(pdata.describe())
fig, ax = plt.subplots()
ax.axis((0,6,0,3000))
ax.plot(pdata)
ax.legend(pdata.columns.values.tolist())
plt.show()
def do_correlation_matrix(self, drop_cols):
pdata = pd.read_csv("./data/data.csv", header=0)
for d in drop_cols:
pdata.drop(d, axis=1, inplace=True)
data_cols = list(pdata.columns[0:11])
corr = pdata[data_cols].corr() # .corr is used for find corelation
g = sns.clustermap(corr, cbar=True, square=True, annot=True, fmt='.2f', annot_kws={'size': 8},
cmap='coolwarm', figsize=(8, 8))
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.setp(g.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
plt.show()
g.savefig("pretty_map")
def do_data_scaling_an_normalization(self):
pdata = pd.read_csv("./data/parker_sleeping.csv", header=0)
pdata.drop("Day", axis=1, inplace=True)
pdata.drop("Counter", axis=1, inplace=True)
pdata.drop("Bed", axis=1, inplace=True)
pdata.drop("Sunshine", axis=1, inplace=True)
print("Data:")
print(pdata)
pretty_printer = lambda x: str.format('{:.2f}', x)
nd_normalized = preprocessing.normalize(pdata, norm="l2")
min_max_scaler = preprocessing.MinMaxScaler()
nd_scaled = min_max_scaler.fit_transform(pdata)
fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)
ax1.axis((0, 6, 0, 3000))
ax1.set_title("Raw Data")
ax1.plot(pdata)
ax2.set_title("Normalized")
ax2.axis((0,6,0, 0.2))
ax2.plot(nd_normalized)
ax3.set_title("Scaled")
ax3.axis((0, 6, 0, 1))
ax3.plot(nd_scaled)
ax1.legend(pdata.columns.values.tolist())
ax2.legend(pdata.columns.values.tolist())
ax3.legend(pdata.columns.values.tolist())
plt.show()
def preprocess_data(self, pdata, preserve):
preserved = pdata["{0}".format(preserve)]
pdata.drop("{0}".format(preserve), axis=1, inplace=True)
nd_normalized = preprocessing.normalize(pdata, norm="l2")
min_max_scaler = preprocessing.MinMaxScaler()
nd_scaled = min_max_scaler.fit_transform(nd_normalized)
# fig, (ax1,ax2,ax3) = plt.subplots(3)
# box1 = ax1.get_position()
# ax1.set_title("Raw Data")
# ax1.plot(pdata)
#
# box2 = ax2.get_position()
# ax2.set_title("Normalized")
# ax2.plot(nd_normalized)
#
# box3 = ax3.get_position()
# ax3.set_title("Scaled")
# ax3.plot(nd_scaled)
#
# plt.show()
preprocessed_data = pd.DataFrame(data=nd_scaled, columns=pdata.columns, dtype='float')
preprocessed_data["{0}".format(preserve)] = preserved.values
return preprocessed_data
def do_machine_learning_random_forest(self):
data = pd.read_csv("./data/data.csv", header=0)
data.drop("Unnamed: 32", axis=1, inplace=True)
data.drop("id", axis=1, inplace=True)
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
data = self.preprocess_data(data, preserve="diagnosis")
prediction_var = ['fractal_dimension_mean',
'smoothness_mean',
'symmetry_mean',
'radius_mean',
'texture_mean',
'compactness_mean']
train, test = train_test_split(data, test_size=0.3)
train_X = train[prediction_var]
train_y = train.diagnosis
test_X = test[prediction_var]
test_y = test.diagnosis
model = RandomForestClassifier(n_estimators=100)
model.fit(train_X, train_y.astype(int))
prediction = model.predict(test_X)
accuracy = metrics.accuracy_score(prediction, test_y)
plt.show()
print("Calculation complete. Random Forest Accuracy: {0}".format(accuracy))
return accuracy
def do_neural_network_estimation(self):
data = | pd.read_csv("./data/data.csv", header=0) | pandas.read_csv |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from numpy import array, append, nan, full
from numpy.testing import assert_almost_equal
import pandas as pd
from pandas.tslib import Timedelta
from catalyst.assets import Equity, Future
from catalyst.data.data_portal import HISTORY_FREQUENCIES, OHLCV_FIELDS
from catalyst.data.minute_bars import (
FUTURES_MINUTES_PER_DAY,
US_EQUITIES_MINUTES_PER_DAY,
)
from catalyst.testing import parameter_space
from catalyst.testing.fixtures import (
CatalystTestCase,
WithTradingSessions,
WithDataPortal,
alias,
)
from catalyst.testing.predicates import assert_equal
from catalyst.utils.numpy_utils import float64_dtype
class DataPortalTestBase(WithDataPortal,
WithTradingSessions,
CatalystTestCase):
ASSET_FINDER_EQUITY_SIDS = (1, 2)
START_DATE = pd.Timestamp('2016-08-01')
END_DATE = pd.Timestamp('2016-08-08')
TRADING_CALENDAR_STRS = ('NYSE', 'us_futures')
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = True
# Since the future with sid 10001 has a tick size of 0.0001, its prices
# should be rounded out to 4 decimal places. To test that this rounding
# occurs correctly, store its prices out to 5 decimal places by using a
# multiplier of 100,000 when writing its values.
OHLC_RATIOS_PER_SID = {10001: 100000}
@classmethod
def make_root_symbols_info(self):
return pd.DataFrame({
'root_symbol': ['BAR', 'BUZ'],
'root_symbol_id': [1, 2],
'exchange': ['CME', 'CME'],
})
@classmethod
def make_futures_info(cls):
trading_sessions = cls.trading_sessions['us_futures']
return pd.DataFrame({
'sid': [10000, 10001],
'root_symbol': ['BAR', 'BUZ'],
'symbol': ['BARA', 'BUZZ'],
'start_date': [trading_sessions[1], trading_sessions[0]],
'end_date': [cls.END_DATE, cls.END_DATE],
# TODO: Make separate from 'end_date'
'notice_date': [cls.END_DATE, cls.END_DATE],
'expiration_date': [cls.END_DATE, cls.END_DATE],
'tick_size': [0.01, 0.0001],
'multiplier': [500, 50000],
'exchange': ['CME', 'CME'],
})
@classmethod
def make_equity_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Equity]
# No data on first day.
dts = trading_calendar.minutes_for_session(cls.trading_days[0])
dfs = []
dfs.append(pd.DataFrame(
{
'open': full(len(dts), nan),
'high': full(len(dts), nan),
'low': full(len(dts), nan),
'close': full(len(dts), nan),
'volume': full(len(dts), 0),
},
index=dts))
dts = trading_calendar.minutes_for_session(cls.trading_days[1])
dfs.append(pd.DataFrame(
{
'open': append(100.5, full(len(dts) - 1, nan)),
'high': append(100.9, full(len(dts) - 1, nan)),
'low': append(100.1, full(len(dts) - 1, nan)),
'close': append(100.3, full(len(dts) - 1, nan)),
'volume': append(1000, full(len(dts) - 1, nan)),
},
index=dts))
dts = trading_calendar.minutes_for_session(cls.trading_days[2])
dfs.append(pd.DataFrame(
{
'open': [nan, 103.50, 102.50, 104.50, 101.50, nan],
'high': [nan, 103.90, 102.90, 104.90, 101.90, nan],
'low': [nan, 103.10, 102.10, 104.10, 101.10, nan],
'close': [nan, 103.30, 102.30, 104.30, 101.30, nan],
'volume': [0, 1003, 1002, 1004, 1001, 0]
},
index=dts[:6]
))
dts = trading_calendar.minutes_for_session(cls.trading_days[3])
dfs.append(pd.DataFrame(
{
'open': full(len(dts), nan),
'high': full(len(dts), nan),
'low': full(len(dts), nan),
'close': full(len(dts), nan),
'volume': full(len(dts), 0),
},
index=dts))
asset1_df = | pd.concat(dfs) | pandas.concat |
# coding: utf-8
# ## Lending Club - classification of loans
#
# This project aims to analyze data for loans through 2007-2015 from Lending Club available on Kaggle. Dataset contains over 887 thousand observations and 74 variables among which one is describing the loan status. The goal is to create machine learning model to categorize the loans as good or bad.
#
# Contents:
#
# 1. Preparing dataset for preprocessing
# 2. Reviewing variables - drop and edit
# 3. Missing values
# 4. Preparing dataset for modeling
# 5. Undersampling approach
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import datetime
import warnings
warnings.filterwarnings('ignore')
import seaborn as sns
sns.set(font_scale=1.6)
from sklearn.preprocessing import StandardScaler
# ### 1. Preparing dataset for preprocessing
#
# In this part I will load data, briefly review the variables and prepare the 'y' value that will describe each loan as good or bad.
# In[2]:
data= | pd.read_csv('../input/loan.csv',parse_dates=True) | pandas.read_csv |
"""
This code implements a support vector classifier using the sklearn package to learn a classification model for a chessboard-like dataset.
Written using Python 3.7
"""
# builtin modules
import os
import psutil
import requests
import sys
import math
# 3rd party modules
import pandas as pd
import numpy as np
import plotly.graph_objects as go
from sklearn import datasets, linear_model
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor
#TODO: add comments to each of the sklearn functions imported above,
# as to where they are used and why
def get_data(source_file):
# Define input and output filepaths
input_path = os.path.join(os.getcwd(),'datasets','in', source_file)
# Read input data
df = pd.read_csv(input_path)
return df
def plot_inputs(df, names_in:list = ['A','B','label']):
"""
Plot the input dataset as a scatter plot, showing the two classes with two different patterns.
- source_file: csv file with the input samples
- weights: from perceptron_classify function
- names_in: a list of the names of the columns (headers) in the input df
returns:
- a plot of the figure in the default browser, and
- a PNG version of the plot to the "images" project directory
"""
# Create the figure for plotting the initial data
fig = go.Figure(data=go.Scatter(x=df[names_in[0]],
y=df[names_in[1]],
mode='markers',
marker=dict(
color=df[names_in[2]],
colorscale='Viridis',
line_width=1,
size = 16),
text=df[names_in[2]], # hover text goes here
showlegend=False)) # turn off legend only for this item
## Create the 1D array for X values from the first feature; this is just to be able to plot a line
## within the space defined by the two features explored
#X = np.linspace(0, max(df[names_in[0]].max(),df[names_in[1]].max()))
## Vector Y will calculated from the weights, w1, w2, the bias, b, and the value of X in its 1D linear space
#Y = []
#for b, w1, w2 in [weights]: #(matrix.tolist()[0] for matrix in weights):
# for x in X:
# if w2 == 0:
# y = 0.0
# else:
# y = (-(b / w2) / (b / w1))* x + (-b / w2) # per the equation of a line, e.g. C = Ax + By
# Y.append(y)
## Add the threshold line to the plot
#fig.add_trace(go.Scatter(x=X, y=Y,
# mode= 'lines',
# name = 'Threshold'))
# Give the figure a title
fig.update_layout(title='Perceptron Algorithm | Classification with support vector classifiers | Problem 3')
# Show the figure, by default will open a browser window
fig.show()
# export plot to png file to images directory
# create an images directory if not already present
if not os.path.exists("images"):
os.mkdir("images")
## write the png file with the plot/figure
return fig.write_image("images/fig3.png")
def plot_model(X, y, xx, y_, Z, model_type:str):
"""
Plot the decision boundary from:
- X: the features dataset,
- y: the labels vector,
- h: step size in the mesh, e.g. 0.02
- grid_search: model of the grid_search already fitted
- model_type: str of the type of model used for title of plot and filename of image to export
returns:
- a plot of the figure in the default browser, and
- a PNG version of the plot to the "images" project directory
"""
# Create the figure for plotting the model
fig = go.Figure(data=go.Scatter(x=X[:, 0], y=X[:, 1],
mode='markers',
showlegend=False,
marker=dict(size=10,
color=y,
colorscale='Jet',
line=dict(color='black', width=1))
))
# Add the heatmap to the plot
fig.add_trace(go.Heatmap(x=xx[0], y=y_, z=Z,
colorscale='Jet',
showscale=False))
# Give the figure a title and name the x,y axis as well
fig.update_layout(
title='Perceptron Algorithm | Classification with support vector classifiers | ' + model_type.upper(),
xaxis_title='True Values',
yaxis_title='Predicted Values')
# Show the figure, by default will open a browser window
fig.show()
# export plot to png file to images directory
# create an images directory if not already present
if not os.path.exists("images"):
os.mkdir("images")
## write the png file with the plot/figure
return fig.write_image("images/fig3-" + model_type + ".png")
def train_split(df, test_percentage:float = 0.40):
# only define test_percentage,
# by default train_percentage = (1 - test_percentage)
# our X matrix will be the first two cols of the dataframe: 'A' and 'B'
X = df[df.columns[0:2]].values
# our y vector will be the third col of the dataframe: 'label'
y = df['label']
# create training and testing vars
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_percentage, stratify = y)
print (X_train.shape, y_train.shape)
print (X_test.shape, y_test.shape)
return X, y, X_train, X_test, y_train, y_test
def apply_CSVC(X, y, X_train, X_test, y_train, y_test, model_type:str, k: int, kernel_type:str, parameters:dict):
if model_type == 'logistic':
logistic = linear_model.LogisticRegression()
start = parameters['C'][0]
stop = parameters['C'][-1]
num = len(parameters['C'])
C = np.logspace(start, stop, num)
penalty = ['l2']
hyperparameters = dict(C=C, penalty=penalty)
grid_search = GridSearchCV(logistic, hyperparameters, cv = k, verbose = 0)
if model_type == 'knn':
grid_params = parameters
grid_search = GridSearchCV(KNeighborsClassifier(), grid_params, cv = k, verbose = 0)
if model_type == 'decision_tree':
grid_search = GridSearchCV(DecisionTreeClassifier(random_state=42), parameters, verbose=1, cv=3)
if model_type == 'random_forest':
grid_search = GridSearchCV(RandomForestRegressor(random_state=42), parameters, verbose=1, cv=3)
if model_type == 'none':
svc = svm.SVC()
# specify cv as integer for number of folds in (stratified)KFold,
# cv set to perform 5-fold cross validation, although 'None' already uses the default 5-fold cross validation
grid_search = GridSearchCV(svc, parameters, cv = k)
grid_search.fit(X, y) # fit the model #TODO: clarify if fit shall be done on train datasets or on complete set
#get results best and test
best_score = grid_search.best_score_
predictions = grid_search.predict(X_test)
test_score = grid_search.score(X_test, y_test)
#print results
print("Best parameters for", kernel_type.upper(), "are:", clf.best_params_, sep=' ')
print("Best score for", kernel_type.upper(), "is:", clf.best_score_, sep=' ')
print("Test score for", kernel_type.upper(), "is:", test_score, sep=' ')
# let's plot the model
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
h = .02 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, h)
, np.arange(y_min, y_max, h))
y_ = np.arange(y_min, y_max, h)
Z = grid_search.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
#print(Z)
#Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plot_model(X, y, xx, y_, Z, model_type)
return best_score, test_score
def write_csv(filename, a, b, c):
# write the outputs csv file
filepath = os.path.join(os.getcwd(),'datasets','out', filename)
df_a = pd.DataFrame(a)
df_b = | pd.DataFrame(b) | pandas.DataFrame |
import base64
import os, shutil, io, zipfile
from re import L, match
import json
from datetime import datetime, timedelta
from urllib.parse import urljoin
import requests
import pandas as pd
import pint
import numpy as np
#import geopandas as gpd
from django.views.decorators.csrf import csrf_protect, csrf_exempt
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponseBadRequest
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import authenticate
from django.core.files.storage import FileSystemStorage
from django.shortcuts import redirect, render
from django.contrib.auth.decorators import login_required
from django.utils.text import slugify
from api.exceptions import ModelNotExistException
from api.models.outputs import Run, Cambium
from api.tasks import run_model, task_status, build_model,upload_ts
from api.models.calliope import Abstract_Tech, Abstract_Tech_Param, Parameter
from api.models.configuration import Model, ParamsManager, Model_User,User_File, Location, Technology, Tech_Param, Loc_Tech, Loc_Tech_Param, Timeseries_Meta
from api.utils import zip_folder, initialize_units, convert_units, noconv_units
from taskmeta.models import CeleryTask
@csrf_protect
def build(request):
"""
Build and save the input files (YAML and CSV) for a Calliope run.
Parameters:
model_uuid (uuid): required
scenario_id (int): required
start_date (timestamp): required
end_date (timestamp): required
cluster (bool): optional
manual (bool): optional
Returns (json): Action Confirmation
Example:
GET: /api/build/
"""
# Input parameters
model_uuid = request.GET.get("model_uuid", None)
scenario_id = request.GET.get("scenario_id", None)
start_date = request.GET.get("start_date", None)
end_date = request.GET.get("end_date", None)
cluster = (request.GET.get("cluster", 'true') == 'true')
manual = (request.GET.get("manual", 'false') == 'true')
timestep = request.GET.get("timestep", '1H')
try:
pd.tseries.frequencies.to_offset(timestep)
except ValueError:
payload = {
"status": "Failed",
"message": "'"+timestep+"' is not a valid timestep.",
}
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
try:
start_date = datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.strptime(end_date,
"%Y-%m-%d") + timedelta(hours=23)
subset_time = str(start_date.date()) + " to " + str(end_date.date())
year = start_date.year
# model and scenario instances
scenario = model.scenarios.get(id=scenario_id)
# Create run instance
run = Run.objects.create(
model=model,
scenario=scenario,
year=year,
subset_time=subset_time,
status=task_status.QUEUED,
inputs_path="",
cluster=cluster,
manual=manual,
timestep=timestep,
)
# Generate File Path
timestamp = datetime.now().strftime("%Y-%m-%d %H%M%S")
model_name = ParamsManager.simplify_name(model.name)
scenario_name = ParamsManager.simplify_name(scenario.name)
inputs_path = "{}/{}/{}/{}/{}/{}/{}/inputs".format(
settings.DATA_STORAGE,
model.uuid,
model_name,
scenario_name,
year,
subset_time,
timestamp,
)
inputs_path = inputs_path.lower().replace(" ", "-")
os.makedirs(inputs_path, exist_ok=True)
# Celery task
async_result = build_model.apply_async(
kwargs={
"inputs_path": inputs_path,
"run_id": run.id,
"model_uuid": model_uuid,
"scenario_id": scenario_id,
"start_date": start_date,
"end_date": end_date,
}
)
build_task = CeleryTask.objects.get(task_id=async_result.id)
run.build_task = build_task
run.save()
payload = {
"status": "Success",
"model_uuid": model_uuid,
"scenario_id": scenario_id,
"year": start_date.year,
}
except Exception as e:
payload = {
"status": "Failed",
"message": "Please contact admin at <EMAIL> ' \
'regarding this error: {}".format(
str(e)
),
}
return HttpResponse(json.dumps(payload, indent=4),
content_type="application/json")
@csrf_protect
def optimize(request):
"""
Optimize a Calliope problem
Parameters:
model_uuid (uuid): required
run_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/optimize/
"""
run_id = request.POST["run_id"]
model_uuid = request.POST["model_uuid"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
payload = {"run_id": run_id}
# run instance does not exist.
try:
run = model.runs.get(id=run_id)
except ObjectDoesNotExist as e:
print(e)
payload["message"] = "Run ID {} does not exist.".format(run_id)
return HttpResponse(
json.dumps(payload, indent=4), content_type="application/json"
)
# model path does not exist
model_path = os.path.join(run.inputs_path, "model.yaml")
if not os.path.exists(model_path):
payload["message"] = "Invalid model config path!"
return HttpResponse(
json.dumps(payload, indent=4), content_type="application/json"
)
# run celery task
async_result = run_model.apply_async(
kwargs={"run_id": run_id, "model_path": model_path,
"user_id": request.user.id}
)
run_task, _ = CeleryTask.objects.get_or_create(task_id=async_result.id)
run.run_task = run_task
run.status = task_status.QUEUED
run.save()
payload = {"task_id": async_result.id}
return HttpResponse(json.dumps(payload, indent=4),
content_type="application/json")
@csrf_protect
def delete_run(request):
"""
Delete a scenario run
Parameters:
model_uuid (uuid): required
run_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_run/
"""
model_uuid = request.POST["model_uuid"]
run_id = request.POST["run_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
run = model.runs.get(id=run_id)
if run.outputs_key:
data = {
'filename': run.outputs_key,
'project_uuid': str(model.uuid),
'private_key': settings.CAMBIUM_API_KEY,
}
try:
url = urljoin(settings.CAMBIUM_URL, "api/remove-data/")
requests.post(url, data=data).json()
except Exception as e:
print("Cambium removal failed - {}".format(e))
run.delete()
return HttpResponseRedirect("")
@csrf_protect
def publish_run(request):
"""
Publish a scenario run to Cambium (https://cambium.nrel.gov/)
Parameters:
model_uuid (uuid): required
run_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/publish_run/
"""
model_uuid = request.POST["model_uuid"]
run_id = request.POST["run_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
run = model.runs.filter(id=run_id).first()
msg = Cambium.push_run(run)
payload = {'message': msg}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def update_run_description(request):
"""
Update the description for a run
Parameters:
model_uuid (uuid): required
run_id (int): required
description (str): required
Returns (json): Action Confirmation
Example:
POST: /api/update_run_description/
"""
model_uuid = request.POST["model_uuid"]
run_id = int(request.POST["id"])
description = request.POST["value"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
try:
run = model.runs.get(id=run_id)
except ObjectDoesNotExist as e:
print(e)
payload = {}
payload["message"] = "Run ID {} does not exist.".format(run_id)
return HttpResponse(
json.dumps(payload, indent=4), content_type="application/json"
)
if description != run.description:
run.description = description
run.save()
payload = description
return HttpResponse(payload, content_type="text/plain")
def basic_auth_required(api_view):
def wrapper(request, *args, **kwargs):
try:
auth = request.META["HTTP_AUTHORIZATION"].split()
assert auth[0].lower() == "basic"
email, password = base64.b64decode(auth[1]).decode("utf-8").split(":")
user = authenticate(username=email, password=password)
if user is not None and user.is_active:
request.user = user
return api_view(request, *args, **kwargs)
else:
msg = "Invalid email or password! Please try again."
return HttpResponse(json.dumps({"error": msg}),
content_type="application/json")
except Exception as e:
msg = str(e)
if str(e) == "'HTTP_AUTHORIZATION'":
msg = "Authentorization failed! Please try Basic Auth."
return HttpResponse(json.dumps({"error": msg}),
content_type="application/json")
return wrapper
@csrf_protect
def download(request):
"""
Download files from a path to client machine
Parameters:
model_uuid (uuid): required
run_id (int): required
Returns (json): Action Confirmation
Example:
GET: /api/download/
"""
model_uuid = request.GET['model_uuid']
run_id = request.GET['run_id']
download_type = request.GET['type']
model = Model.by_uuid(model_uuid)
model.handle_view_access(request.user)
try:
run = Run.objects.get(id=run_id)
except Exception:
raise Http404
if download_type == 'inputs':
path = run.inputs_path
elif download_type == "outputs":
path = run.outputs_path
else:
raise Http404
if os.path.exists(path):
file_path = zip_folder(path)
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="application/text")
response['Content-Disposition'] = 'inline; filename='+slugify(model.name+'_'+run.scenario.name+'_'+str(run.year)+'_')+os.path.basename(file_path)
return response
return HttpResponse(
json.dumps({"message": "Not Found!"}, indent=4),
content_type="application/json"
)
@csrf_protect
def upload_outputs(request):
"""
Upload a zipped outputs file.
Parameters:
model_uuid (uuid): required
run_id (int): required
description (str): optional
myfile (file): required
Returns:
Example:
POST: /api/upload_outputs/
"""
model_uuid = request.POST["model_uuid"]
run_id = request.POST["run_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
try:
run = Run.objects.get(id=run_id)
except Exception:
print("No Run Found")
raise Http404
if (request.method == "POST") and ("myfile" in request.FILES):
myfile = request.FILES["myfile"]
if os.path.splitext(myfile.name)[1].lower() == '.zip':
model_dir = run.inputs_path.replace("/inputs","")
out_dir = os.path.join(model_dir,"outputs")
if not os.path.exists(out_dir):
os.makedirs(out_dir, exist_ok=True)
print(myfile.name)
fs = FileSystemStorage()
filename = fs.save(os.path.join(out_dir,myfile.name), myfile)
# Default assumes CSV files were directly zipped into archive
run.outputs_path = out_dir
shutil.unpack_archive(filename,out_dir)
# Loop through options for archived output directories rather than base CSVs
# TODO: Add user input on location of output CSVs via API option
for dir in ['outputs','model_outputs']:
if dir in os.listdir(out_dir):
run.outputs_path = os.path.join(out_dir,dir)
run.save()
return redirect("/%s/runs/" % model_uuid)
return redirect("/%s/runs/" % model_uuid)
print("No File Found")
raise Http404
@csrf_protect
@login_required
def upload_locations(request):
"""
Upload a CSV file with new/updated locations.
Parameters:
model_uuid (uuid): required
description (str): optional
myfile (file): required
col_map (dict): optional
Returns:
Example:
POST: /api/upload_locations/
"""
model_uuid = request.POST["model_uuid"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
context = {
'logs':[],
"model": model
}
if (request.method == "POST") and ("myfile" in request.FILES):
myfile = request.FILES["myfile"]
if os.path.splitext(myfile.name)[1].lower() == '.csv':
df = pd.read_csv(myfile)
else:
context['logs'].append('File format not supported. Please use a .csv.')
return render(request, "bulkresults.html", context)
if not set(['pretty_name','longitude','latitude']).issubset(set(df.columns)):
context['logs'].append('Missing required columns. pretty_name, longitude, latitude are required.')
return render(request, "bulkresults.html", context)
df = df.loc[:,df.columns.isin(['id','pretty_name','longitude','latitude','available_area','description'])]
df['model_id'] = model.id
df['name'] = df['pretty_name'].apply(lambda x: ParamsManager.simplify_name(x))
for i,row in df.iterrows():
if pd.isnull(row['pretty_name']):
context['logs'].append(str(i)+'- Missing pretty name. Skipped')
continue
if pd.isnull(row['latitude']) or pd.isnull(row['longitude']):
context['logs'].append(str(i)+'- Missing latitude or longitude. Skipped')
continue
if pd.isnull(row['available_area']):
row['available_area'] = None
if pd.isnull(row['description']):
row['description'] = None
if 'id' not in row.keys() or pd.isnull(row['id']):
location = Location.objects.create(**(row.dropna()))
else:
location = Location.objects.filter(id=row['id']).first()
if not location:
context['logs'].append(str(i)+'- Location '+row['pretty_name']+': No location with id '+str(row['id'])+' found to update. Skipped.')
continue
location.name = row['name']
location.pretty_name = row['pretty_name']
location.longitude = row['longitude']
location.latitude = row['latitude']
location.available_area = row['available_area']
location.description = row['description']
location.save()
return render(request, "bulkresults.html", context)
context['logs'].append("No file found")
return render(request, "bulkresults.html", context)
@csrf_protect
@login_required
def upload_techs(request):
"""
Upload a CSV file with new/updated technologies.
Parameters:
model_uuid (uuid): required
description (str): optional
myfile (file): required
col_map (dict): optional
Returns:
Example:
POST: /api/upload_techs/
"""
model_uuid = request.POST["model_uuid"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
context = {
'logs':[],
"model": model
}
if (request.method == "POST") and ("myfile" in request.FILES):
myfile = request.FILES["myfile"]
if os.path.splitext(myfile.name)[1].lower() == '.csv':
df = pd.read_csv(myfile)
else:
context['logs'].append('File format not supported. Please use a .csv.')
return render(request, "bulkresults.html", context)
if not set(['pretty_name','abstract_tech']).issubset(set(df.columns)):
context['logs'].append('Missing required columns. pretty_name, abstract_tech are required.')
return render(request, "bulkresults.html", context)
df['name'] = df['pretty_name'].apply(lambda x: ParamsManager.simplify_name(str(x)))
if 'pretty_tag' in df.columns:
df['tag'] = df['pretty_tag'].apply(lambda x: ParamsManager.simplify_name(str(x)))
ureg = initialize_units()
for i,row in df.iterrows():
if pd.isnull(row['abstract_tech']):
context['logs'].append(str(i)+'- Tech '+row['pretty_name']+': Missing abstract_tech. Skipped.')
continue
if row['abstract_tech'] in ['conversion','conversion_plus']:
if 'carrier_in' not in row.keys() or 'carrier_out' not in row.keys() or pd.isnull(row['carrier_in']) or pd.isnull(row['carrier_out']):
context['logs'].append(str(i)+'- Tech '+row['pretty_name']+': Conversion techs require both carrier_in and carrier_out. Skipped.')
continue
else:
if 'carrier' not in row.keys() or | pd.isnull(row['carrier']) | pandas.isnull |
from pathlib import Path
import torch
import sgad
import numpy as np
import copy
import time
from torch.utils.data import DataLoader
from yacs.config import CfgNode as CN
from tqdm import tqdm
from torch import nn
from torchvision.utils import save_image
import torch.nn.functional as F
import pandas
import os
import time
import random
from sgad.cgn.models.cgn import Reshape, init_net
from sgad.utils import save_cfg, Optimizers, compute_auc, Subset
from sgad.sgvae.utils import Mean, ConvBlock, Encoder, TextureDecoder, ShapeDecoder, rp_trick
from sgad.sgvae.utils import batched_score, logpx
class VAE(nn.Module):
def __init__(self,
z_dim=32,
h_channels=32,
img_dim=32,
img_channels=3,
init_type='orthogonal',
init_gain=0.1,
init_seed=None,
batch_size=32,
vae_type="texture",
std_approx="exp",
lr=0.0002,
betas=[0.5, 0.999],
device=None,
log_var_x_estimate = "conv_net",
**kwargs
):
"""
VAE constructor
argument values:
log_var_x_estimate - ["conv_net", "global", "else"] - if else is chosen, supply the
model.log_var_net_x with a callable function that returns the log var
std_approx - ["exp", "softplus"] - how std is computed from log_var
vae_type - ["shape", "texture"] - the composition of encoder and decoder is a little different
"""
# supertype init
super(VAE, self).__init__()
# set seed
if init_seed is not None:
torch.random.manual_seed(init_seed)
# params
self.batch_size = batch_size
self.z_dim = z_dim
self.img_dim = img_dim
self.img_channels = img_channels
if not std_approx in ["exp", "softplus"]:
raise ValueError("std_approx can only be `exp` or `softplus`")
self.std_approx = std_approx # exp or softplus
init_sz = img_dim // 4
# encoder + decoder
self.encoder = Encoder(z_dim, img_channels, h_channels, img_dim)
if vae_type == "texture":
self.out_channels = img_channels
self.decoder = TextureDecoder(z_dim, self.out_channels+1, h_channels, init_sz)
elif vae_type == "shape":
self.out_channels = 1
self.decoder = ShapeDecoder(z_dim, self.out_channels+1, h_channels, init_sz)
else:
raise ValueError(f'vae type {vae_type} unknown, try "shape" or "texture"')
# mu, log_var estimators
self.mu_net_z = nn.Linear(z_dim*2, z_dim)
self.log_var_net_z = nn.Linear(z_dim*2, z_dim)
self.mu_net_x = nn.Conv2d(self.out_channels+1, self.out_channels, 3, stride=1, padding=1, bias=False)
# either use a convnet or a trainable scalar for log_var_x
# but also you can support your own function that uses the output of the last layer of the decoder
if log_var_x_estimate == "conv_net":
self.log_var_net_x = nn.Sequential(
nn.Conv2d(self.out_channels+1, 1, 3, stride=1, padding=1, bias=False),
Mean(1,2,3),
Reshape(-1,1,1,1)
)
self.log_var_x_global = None
elif log_var_x_estimate == "global":
self.log_var_x_global = nn.Parameter(torch.Tensor([-1.0]))
self.log_var_net_x = lambda x: self.log_var_x_global
else:
warnings.warn(f"log_var_x_estimate {log_var_x_estimate} not known, you should set .log_var_net_x with a callable function")
# initialize the net
init_net(self, init_type=init_type, init_gain=init_gain)
# Optimizers
self.opts = Optimizers()
self.opts.set('vae', self, lr=lr, betas=betas)
# reset seed
torch.random.seed()
# choose device automatically
self.move_to(device)
# create config
self.config = CN()
self.config.z_dim = z_dim
self.config.h_channels = h_channels
self.config.img_dim = img_dim
self.config.img_channels = img_channels
self.config.batch_size = batch_size
self.config.init_type = init_type
self.config.init_gain = init_gain
self.config.init_seed = init_seed
self.config.vae_type = vae_type
self.config.std_approx = std_approx
self.config.lr = lr
self.config.betas = betas
self.config.log_var_x_estimate = log_var_x_estimate
def fit(self, X, y=None,
X_val=None, y_val=None,
n_epochs=20,
save_iter=1000,
verb=True,
save_results=True,
save_path=None,
workers=12,
max_train_time=np.inf # in seconds
):
"""Fit the model given X (and possibly y).
If y is supported, the classes should be labeled by integers like in range(n_classes).
Returns (losses_all, best_model, best_epoch)
"""
# setup the dataloader
y = torch.zeros(X.shape[0]).long() if y is None else y
tr_loader = DataLoader(Subset(torch.tensor(X).float(), y),
batch_size=self.config.batch_size,
shuffle=True,
num_workers=workers)
# also check the validation data
if X_val is not None and y_val is None:
raise ValueError("X_val given without y_val - please provide it as well.")
auc_val = best_auc_val = -1.0
# loss values
losses_all = {'iter': [], 'epoch': [], 'kld': [], 'logpx': [], 'elbo': [], 'auc_val': []}
# setup save paths
if save_results and save_path == None:
raise ValueError('If you want to save results, provide the save_path argument.')
if save_results:
model_path = Path(save_path)
weights_path = model_path / 'weights'
sample_path = model_path / 'samples'
weights_path.mkdir(parents=True, exist_ok=True)
sample_path.mkdir(parents=True, exist_ok=True)
# dump config
cfg = copy.deepcopy(self.config)
cfg.n_epochs = n_epochs
cfg.save_iter = save_iter
cfg.save_path = save_path
cfg.workers = workers
save_cfg(cfg, model_path / "cfg.yaml")
# samples for reconstruction
x_sample = X[random.sample(range(X.shape[0]), 30),:,:,:]
pbar = tqdm(range(n_epochs))
niter = 0
start_time = time.time()
for epoch in pbar:
for i, data in enumerate(tr_loader):
# Data to device
x = data['ims'].to(self.device)
# encode data, compute kld
mu_z, log_var_z = self.encode(x)
std_z = self.std(log_var_z)
z = rp_trick(mu_z, std_z)
kld = torch.mean(self.kld(mu_z, log_var_z))
# decode, compute logpx
mu_x, log_var_x = self.decode(z)
std_x = self.std(log_var_x)
lpx = torch.mean(logpx(x, mu_x, std_x))
# compute elbo
elbo = torch.mean(kld - lpx)
# do a step
self.opts.zero_grad(['vae'])
elbo.backward()
self.opts.step(['vae'], False) # use zero_grad = false here?
# collect losses
def get_val(t):
return float(t.data.cpu().numpy())
niter += 1
losses_all['iter'].append(niter)
losses_all['epoch'].append(epoch)
losses_all['elbo'].append(get_val(elbo))
losses_all['kld'].append(get_val(kld))
losses_all['logpx'].append(get_val(lpx))
losses_all['auc_val'].append(auc_val)
# output
if verb:
msg = f"[Batch {i}/{len(tr_loader)}]"
msg += ''.join(f"[elbo: {get_val(elbo):.3f}]")
msg += ''.join(f"[kld: {get_val(kld):.3f}]")
msg += ''.join(f"[logpx: {get_val(lpx):.3f}]")
msg += ''.join(f"[auc val: {auc_val:.3f}]")
pbar.set_description(msg)
# Saving
batches_done = epoch * len(tr_loader) + i
if save_results:
if batches_done % save_iter == 0:
print(f"Saving samples and weights to {model_path}")
self.save_sample_images(x_sample, sample_path, batches_done, n_cols=3)
self.save_weights(f"{weights_path}/vae_{batches_done:d}.pth")
outdf = | pandas.DataFrame.from_dict(losses_all) | pandas.DataFrame.from_dict |
import os
import pandas as pd
import cv2
import scipy.stats as stat
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from .matplotlibstyle import *
import datetime
class Datahandler():
'Matches EL images paths to IV data based on imput columns'
def __init__(self,workdir,ELfolderpath=None,IVfile=None):
'initialize and create folder'
self.dataset_id = None
# Create directory for computation on this dataset
self.pathDic = {
'workdir': workdir,
'ELfolderpath': ELfolderpath,
'IVfile': IVfile,
'figures': workdir+"figures\\",
'models': workdir+"models\\",
'traces': workdir+"traces\\",
'outputs': workdir+"outputs\\",
'Matchfile': workdir+"match.csv",
}
for key, value in self.pathDic.items():
if key in ['ELfolderpath','IVfile','Matchfile']: continue
if not os.path.exists(value): os.mkdir(value)
if os.path.exists(self.pathDic['Matchfile']):
self.loadMatchData()
def readEL(self):
'Read images from ELfolderpath and store in dataframe'
if not self.pathDic['ELfolderpath']: raise ValueError('ELfolderpath not defined')
images = []
for subdir,dirs,files in os.walk(self.pathDic['ELfolderpath']):
for file in files:
ext = os.path.splitext(file)[1]
if ext == ".db": continue
name = os.path.splitext(file)[0]
size = os.path.getsize(subdir+"\\"+file)
location = subdir+"\\"+file
line = size,ext,name,location
images.append(line)
self.ELdf = pd.DataFrame(images)
self.ELdf.columns=['size','extension','filename','path']
def readIV(self,sep=","):
'Read IV data from IVfile csv'
if not self.pathDic['IVfile']: raise ValueError('IVfile not defined')
self.IVdf = | pd.read_csv(self.pathDic['IVfile'], sep=sep) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# gh-17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
assert_series_equal(result, expected)
# compose a series
result = (df['a'] + df['b']).apply(lambda x: {'s': x})
expected = Series([{'s': 3}, {'s': 3}])
assert_series_equal(result, expected)
# gh-18775
df = DataFrame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
'13-05-2011 08:20:35',
'15-01-2013 09:09:09'])
result = df.apply(lambda x: {}, axis=1)
expected = Series([{}, {}, {}])
assert_series_equal(result, expected)
def test_with_dictlike_columns_with_infer(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
expected = DataFrame({'s': [3, 3]})
assert_frame_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
assert_frame_equal(result, expected)
def test_with_listlike_columns(self):
# gh-17348
df = DataFrame({'a': Series(np.random.randn(4)),
'b': ['a', 'list', 'of', 'words'],
'ts': date_range('2016-10-01', periods=4, freq='H')})
result = df[['a', 'b']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
assert_series_equal(result, expected)
result = df[['a', 'ts']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
assert_series_equal(result, expected)
# gh-18919
df = DataFrame({'x': Series([['a', 'b'], ['q']]),
'y': Series([['z'], ['q', 't']])})
df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
result = df.apply(
lambda row: [el for el in row['x'] if el in row['y']],
axis=1)
expected = Series([[], ['q']], index=df.index)
assert_series_equal(result, expected)
def test_infer_output_shape_columns(self):
# gh-18573
df = DataFrame({'number': [1., 2.],
'string': ['foo', 'bar'],
'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
pd.Timestamp('2017-11-29 03:45:00')]})
result = df.apply(lambda row: (row.number, row.string), axis=1)
expected = Series([(t.number, t.string) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_infer_output_shape_listlike_columns(self):
# gh-16353
df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
# gh-17970
df = DataFrame({"a": [1, 2, 3]}, index=list('abc'))
result = df.apply(lambda row: np.ones(1), axis=1)
expected = Series([np.ones(1) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
result = df.apply(lambda row: np.ones(2), axis=1)
expected = Series([np.ones(2) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
# gh-17892
df = pd.DataFrame({'a': [pd.Timestamp('2010-02-01'),
pd.Timestamp('2010-02-04'),
pd.Timestamp('2010-02-05'),
pd.Timestamp('2010-02-06')],
'b': [9, 5, 4, 3],
'c': [5, 3, 4, 2],
'd': [1, 2, 3, 4]})
def fun(x):
return (1, 2)
result = df.apply(fun, axis=1)
expected = Series([(1, 2) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_coerce_for_shapes(self):
# we want column names to NOT be propagated
# just because the shape matches the input shape
df = DataFrame(np.random.randn(4, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_names(self):
# if a Series is returned, we should use the resulting index names
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: Series([1, 2, 3],
index=['test', 'other', 'cols']),
axis=1)
expected = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other', 'cols'])
assert_frame_equal(result, expected)
result = df.apply(
lambda x: pd.Series([1, 2], index=['test', 'other']), axis=1)
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other'])
assert_frame_equal(result, expected)
def test_result_type(self):
# result_type should be consistent no matter which
# path we take in the code
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand')
expected = df.copy()
expected.columns = [0, 1, 2]
assert_frame_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
expected = df[['A', 'B']].copy()
expected.columns = [0, 1]
assert_frame_equal(result, expected)
# broadcast result
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3],
index=columns),
axis=1,
result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
# series result
result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1)
expected = df.copy()
assert_frame_equal(result, expected)
# series result with other index
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3], index=columns),
axis=1)
expected = df.copy()
expected.columns = columns
assert_frame_equal(result, expected)
@pytest.mark.parametrize("result_type", ['foo', 1])
def test_result_type_error(self, result_type):
# allowed result_type
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2, 3],
axis=1,
result_type=result_type)
@pytest.mark.parametrize(
"box",
[lambda x: list(x),
lambda x: tuple(x),
lambda x: np.array(x, dtype='int64')],
ids=['list', 'tuple', 'array'])
def test_consistency_for_boxed(self, box):
# passing an array or list should not affect the output shape
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: box([1, 2]), axis=1)
expected = Series([box([1, 2]) for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: box([1, 2]), axis=1, result_type='expand')
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1)
assert_frame_equal(result, expected)
def zip_frames(frames, axis=1):
"""
take a list of frames, zip them together under the
assumption that these all have the first frames' index/columns.
Returns
-------
new_frame : DataFrame
"""
if axis == 1:
columns = frames[0].columns
zipped = [f.loc[:, c] for c in columns for f in frames]
return pd.concat(zipped, axis=1)
else:
index = frames[0].index
zipped = [f.loc[i, :] for i in index for f in frames]
return pd.DataFrame(zipped)
class TestDataFrameAggregate(TestData):
def test_agg_transform(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
with np.errstate(all='ignore'):
f_abs = np.abs(self.frame)
f_sqrt = np.sqrt(self.frame)
# ufunc
result = self.frame.transform(np.sqrt, axis=axis)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
result = self.frame.apply(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
result = self.frame.transform(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
# list-like
result = self.frame.apply([np.sqrt], axis=axis)
expected = f_sqrt.copy()
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.sqrt], axis=axis)
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
result = self.frame.apply([np.abs, np.sqrt], axis=axis)
expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['absolute', 'sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['absolute', 'sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.abs, 'sqrt'], axis=axis)
assert_frame_equal(result, expected)
def test_transform_and_agg_err(self, axis):
# cannot both transform and agg
def f():
self.frame.transform(['max', 'min'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.agg(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.transform(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
df = pd.DataFrame({'A': range(5), 'B': 5})
def f():
with np.errstate(all='ignore'):
df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']}, axis=axis)
@pytest.mark.parametrize('method', [
'abs', 'shift', 'pct_change', 'cumsum', 'rank',
])
def test_transform_method_name(self, method):
# https://github.com/pandas-dev/pandas/issues/19760
df = pd.DataFrame({"A": [-1, 2]})
result = df.transform(method)
expected = operator.methodcaller(method)(df)
tm.assert_frame_equal(result, expected)
def test_demo(self):
# demonstration tests
df = pd.DataFrame({'A': range(5), 'B': 5})
result = df.agg(['min', 'max'])
expected = DataFrame({'A': [0, 4], 'B': [5, 5]},
columns=['A', 'B'],
index=['min', 'max'])
tm.assert_frame_equal(result, expected)
result = df.agg({'A': ['min', 'max'], 'B': ['sum', 'max']})
expected = DataFrame({'A': [4.0, 0.0, np.nan],
'B': [5.0, np.nan, 25.0]},
columns=['A', 'B'],
index=['max', 'min', 'sum'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
def test_agg_multiple_mixed_no_warning(self):
# https://github.com/pandas-dev/pandas/issues/20909
mdf = pd.DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
expected = pd.DataFrame({"A": [1, 6], 'B': [1.0, 6.0],
"C": ['bar', 'foobarbaz'],
"D": [pd.Timestamp('2013-01-01'), pd.NaT]},
index=['min', 'sum'])
# sorted index
with | tm.assert_produces_warning(None) | pandas.util.testing.assert_produces_warning |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
assert result == expected
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with pytest.raises(TypeError):
dti + dti
with pytest.raises(TypeError):
dti_tz + dti_tz
with pytest.raises(TypeError):
dti_tz + dti
with pytest.raises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
assert idx[0] in idx
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
from flask import render_template, request, redirect, url_for, session
from app import app
from model import *
from model.main import *
import json
import pandas as pd
import numpy as np
class DataStore():
model=None
model_month=None
sale_model=None
data = DataStore()
@app.route('/', methods=["GET"])
def home():
percent=percentageMethod()
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
with open('percent.json') as f:
file2 = json.load(f)
labels=file2['index']
data=file2['data']
if "username" in session:
return render_template('index.html', last_year=lastYear(), last_month=lastMonth(),dataset=data, label=labels, percent=percent,
month_index=month_index, month_data=month_data)
else:
return render_template('login.html')
# Register new user
@app.route('/register', methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
elif request.method == "POST":
registerUser()
return redirect(url_for("login"))
#Check if email already exists in the registratiion page
@app.route('/checkusername', methods=["POST"])
def check():
return checkusername()
# Everything Login (Routes to renderpage, check if username exist and also verifypassword through Jquery AJAX request)
@app.route('/login', methods=["GET"])
def login():
if request.method == "GET":
if "username" not in session:
return render_template("login.html")
else:
return redirect(url_for("home"))
@app.route('/checkloginusername', methods=["POST"])
def checkUserlogin():
return checkloginusername()
@app.route('/checkloginpassword', methods=["POST"])
def checkUserpassword():
return checkloginpassword()
#The admin logout
@app.route('/logout', methods=["GET"]) # URL for logout
def logout(): # logout function
session.pop('username', None) # remove user session
return redirect(url_for("home")) # redirect to home page with message
#Forgot Password
@app.route('/forgot-password', methods=["GET"])
def forgotpassword():
return render_template('forgot-password.html')
#404 Page
@app.route('/404', methods=["GET"])
def errorpage():
return render_template("404.html")
#Blank Page
@app.route('/blank', methods=["GET"])
def blank():
return render_template('blank.html')
@app.route('/totalyear', methods=["GET"])
def total_year():
total_year=totalYear()
file1=pd.read_json('total_year.json',orient='index')
year_index=np.array(file1['year'])
year_data=np.array(file1['total'])
return render_template("total_year.html",year_index=year_index, year_data=year_data)
@app.route('/totalmonth', methods=["GET"])
def total_month():
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
num=6
# Fit model
model=fit_model()
data.model_month=model
predict_rs, fitted_data=predict(model,6)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['total'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("total_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=model, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num=num)
def check_stationary():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def fit_model():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
data=data1['total']
stationary=check_stationary()
p=stationary[1]
if (p<0.05):
result1 = fit_model_stationary(data)
else:
result1 = fit_model_non_stationary(data)
return result1
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','total']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','total']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','total']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/totalmonth', methods=["POST"])
def total_month_num():
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
#Get data
if request.method == "POST":
num = int(request.form.get("num_month"))
predict_rs, fitted_data=predict(data.model_month,num)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['total'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("total_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=data.model_month, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num=num)
def check_stationary():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data2=total_day[['date','total']]
data2.set_index('date', inplace=True)
date = pd.date_range(data2.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','total']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','total']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/totaldate', methods=["GET"])
def total_date():
total_date=totalDate()
date_index=np.array(total_date['date'])
date_data=np.array(total_date['total'])
num=30
# Fit model
model_date=fit_model_date()
data.model=model_date
predict_rs_date, fitted_data_date=predict_date(model_date,30)
pred_index_date=np.array(predict_rs_date['date'])
pred_data_date=np.array(predict_rs_date['total'])
#Test model
test_rs= test_date(pred_data_date[0], fitted_data_date)
return render_template("total_date.html",date_index=date_index, date_data=date_data, stationary=check_stationary_date(), model_date=model_date, pred_index=pred_index_date, pred_data=pred_data_date, test_rs=test_rs, num=num)
def check_stationary_date():
total_date=totalDate()
data1=total_date[['date','total']]
data1.set_index('date', inplace=True)
result=stationary_trend(data1)
return result
def fit_model_date():
total_date=totalDate()
data1=total_date[['date','total']]
data1.set_index('date', inplace=True)
data=data1['total']
result1 = fit_model_fast(data)
return result1
def predict_date(model_date, num_predict):
if num_predict==0:
num_predict=30
fitted_date, confint_date = model_date.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','total']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict)
fitted_seri_date = pd.Series(fitted_date, index=date)
dff=pd.DataFrame(fitted_seri_date)
dff=dff.reset_index()
dff.columns=['date','total']
dff['date'] = pd.to_datetime(dff['date']).dt.to_period('D')
pred=dff[['date','total']]
return pred, fitted_date
def test_date(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/totaldate', methods=["POST"])
def total_date_num():
total_date=totalDate()
date_index=np.array(total_date['date'])
date_data=np.array(total_date['total'])
#Get data
if request.method == "POST":
num = int(request.form.get("num_date"))
predict_rs_date, fitted_data_date=predict_date(data.model,num)
pred_index_date=np.array(predict_rs_date['date'])
pred_data_date=np.array(predict_rs_date['total'])
test_rs= test_date(pred_data_date[0], fitted_data_date)
return render_template("total_date.html",date_index=date_index, date_data=date_data, stationary=check_stationary_date(), model_date=data.model, pred_index=pred_index_date, pred_data=pred_data_date, test_rs=test_rs, num=num)
def check_stationary_date():
total_date=totalDate()
data1=total_date[['date','total']]
data1.set_index('date', inplace=True)
result=stationary_trend(data1)
return result
def predict_date(model_date, num_predict):
if num_predict==0:
num_predict=6
fitted_date, confint_date = model_date.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data2=total_day[['date','total']]
data2.set_index('date', inplace=True)
date = pd.date_range(data2.index[-1], periods=num_predict)
fitted_seri_date = pd.Series(fitted_date, index=date)
dff=pd.DataFrame(fitted_seri_date)
dff=dff.reset_index()
dff.columns=['date','total']
dff['date'] = pd.to_datetime(dff['date']).dt.to_period('D')
pred=dff[['date','total']]
return pred, fitted_date
def test_date(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
return mse, rmse, mae, mape
@app.route('/revenueyear', methods=["GET"])
def revenue_year():
sale_year=saleYear()
year_index=np.array(sale_year['year'])
year_data=np.array(sale_year['quantity'])
return render_template("revenue_year.html",year_index=year_index, year_data=year_data)
@app.route('/revenuemonth', methods=["GET"])
def revenue_month():
total_month=saleMonth()
file1=pd.read_json('sale_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['quantity'])
num_sale=6
# Fit model
model=fit_model()
data.model_month=model
predict_rs, fitted_data=predict(model,6)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['quantity'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("revenue_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=model, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num_sale=num_sale)
def check_stationary():
total_month=saleMonth()
data1=total_month[['month_year','quantity']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def fit_model():
total_month=saleMonth()
data1=total_month[['month_year','quantity']]
data1.set_index('month_year', inplace=True)
data=data1['quantity']
stationary=check_stationary()
p=stationary[1]
if (p<0.05):
result1 = fit_model_stationary(data)
else:
result1 = fit_model_non_stationary(data)
return result1
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['quantity', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','quantity']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','quantity']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','quantity']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/revenuemonth', methods=["POST"])
def revenue_month_num():
total_month=saleMonth()
file1=pd.read_json('sale_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['quantity'])
#Get data
if request.method == "POST":
num_sale= int(request.form.get("sale_month"))
predict_rs, fitted_data=predict(data.model_month,num_sale)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['quantity'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("revenue_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=data.model_month, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num_sale=num_sale)
def check_stationary():
total_month=saleMonth()
data1=total_month[['month_year','quantity']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['quantity', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data2=total_day[['date','quantity']]
data2.set_index('date', inplace=True)
date = pd.date_range(data2.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','quantity']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','quantity']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/revenuedate', methods=["GET"])
def revenue_date():
total_date=saleDate()
date_index=np.array(total_date['date'])
date_data=np.array(total_date['quantity'])
num=30
# Fit model
model_date=fit_model_date()
data.sale_model=model_date
predict_rs_date, fitted_data_date=predict_date(model_date,30)
pred_index_date=np.array(predict_rs_date['date'])
pred_data_date=np.array(predict_rs_date['quantity'])
#Test model
test_rs= test_date(pred_data_date[0], fitted_data_date)
return render_template("revenue_date.html",date_index=date_index, date_data=date_data, stationary=check_stationary_date(), model_date=model_date, pred_index=pred_index_date, pred_data=pred_data_date, test_rs=test_rs, num=num)
def check_stationary_date():
total_date=saleDate()
data1=total_date[['date','quantity']]
data1.set_index('date', inplace=True)
result=stationary_trend(data1)
return result
def fit_model_date():
total_date=saleDate()
data1=total_date[['date','quantity']]
data1.set_index('date', inplace=True)
data=data1['quantity']
result1 = fit_model_fast(data)
return result1
def predict_date(model_date, num_predict):
if num_predict==0:
num_predict=30
fitted_date, confint_date = model_date.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['quantity', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','quantity']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict)
fitted_seri_date = pd.Series(fitted_date, index=date)
dff=pd.DataFrame(fitted_seri_date)
dff=dff.reset_index()
dff.columns=['date','quantity']
dff['date'] = pd.to_datetime(dff['date']).dt.to_period('D')
pred=dff[['date','quantity']]
return pred, fitted_date
def test_date(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/revenuedate', methods=["POST"])
def revenue_date_num():
total_date=saleDate()
date_index=np.array(total_date['date'])
date_data=np.array(total_date['quantity'])
#Get data
if request.method == "POST":
num = int(request.form.get("sale_date"))
predict_rs_date, fitted_data_date=predict_date(data.sale_model,num)
pred_index_date=np.array(predict_rs_date['date'])
pred_data_date=np.array(predict_rs_date['quantity'])
test_rs= test_date(pred_data_date[0], fitted_data_date)
return render_template("revenue_date.html",date_index=date_index, date_data=date_data, stationary=check_stationary_date(), model_date=data.sale_model, pred_index=pred_index_date, pred_data=pred_data_date, test_rs=test_rs, num=num)
def check_stationary_date():
total_date=saleDate()
data1=total_date[['date','quantity']]
data1.set_index('date', inplace=True)
result=stationary_trend(data1)
return result
def predict_date(model_date, num_predict):
if num_predict==0:
num_predict=6
fitted_date, confint_date = model_date.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['quantity', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data2=total_day[['date','quantity']]
data2.set_index('date', inplace=True)
date = pd.date_range(data2.index[-1], periods=num_predict)
fitted_seri_date = pd.Series(fitted_date, index=date)
dff= | pd.DataFrame(fitted_seri_date) | pandas.DataFrame |
import pandas
import numpy
from itertools import islice
from CommonDef.DefStr import *
from Statistics_TechIndicators.CalcTechIndictors import *
def AddAdjOHLbyAdjC(srcData):
dstData = srcData.copy()
adjClose_offset = srcData[strAdjClose] - srcData[strClose]
dstData[strAdjOpen] = pandas.Series(srcData[strOpen]+adjClose_offset, index=srcData.index)
dstData[strAdjHigh] = pandas.Series(srcData[strHigh]+adjClose_offset, index=srcData.index)
dstData[strAdjLow] = pandas.Series(srcData[strLow]+adjClose_offset, index=srcData.index)
return dstData
def TransToAdjOHLCbyAdjC(srcData):
dstData = srcData.copy()
adjClose_scale = srcData[strAdjClose] / srcData[strClose]
dstData[strOpen] *= adjClose_scale
dstData[strHigh] *= adjClose_scale
dstData[strLow] *= adjClose_scale
dstData[strClose] = srcData[strAdjClose]
dstData = dstData.drop(columns=[strAdjClose])
return dstData
def AddNeighborFeatures(srcData, neighbor_size, DropNan = True):
dstData = srcData.copy()
neighborsData = {}
start_index = neighbor_size
for index, row in islice(dstData.iterrows(), start_index, None):
for i in range(neighbor_size):
neighborsData[strNeighbor+str(i)] = dstData.shift(i+1)
for i in range(neighbor_size):
for col_name in list(dstData):
neighborsData[strNeighbor+str(i)].rename(columns={col_name: col_name+'_N'+str(i)}, inplace=True)
for neighbor_key in neighborsData:
dstData = dstData.join(neighborsData[neighbor_key])
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# SMA: simple moving average
def AddSMAIndictor(srcData, window, DropNan = True):
dstData = srcData.copy()
SMA = GetRollingMean(srcData[strClose], window).to_frame()
SMA.rename(columns= {strClose: strSMA+'_W'+str(window)}, inplace=True)
dstData = dstData.join(SMA)
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# EMA: exponential moving average
def AddEMAIndictor(srcData, window, DropNan = True):
dstData = srcData.copy()
EMA = GetEMA(srcData[strClose], window).to_frame()
EMA.rename(columns= {strClose: strEMA+'_W'+str(window)}, inplace=True)
dstData = dstData.join(EMA)
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# SMMA: smoothed moving average
def AddSMMAIndictor(srcData, window, DropNan = True):
dstData = srcData.copy()
SMMA = GetSMMA(srcData[strClose], window).to_frame()
SMMA.rename(columns= {strClose: strSMMA+'_W'+str(window)}, inplace=True)
dstData = dstData.join(SMMA)
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# DMA: Different of Moving Average (10, 50)
def AddDMAIndictor(srcData, short_window, long_window, DropNan = True):
dstData = srcData.copy()
DMA = GetDMA(srcData[strClose], short_window, long_window).to_frame()
DMA.rename(columns= {strClose: strDMA+'_SW'+str(short_window)+'_LW'+str(long_window)}, inplace=True)
dstData = dstData.join(DMA)
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# MSTD: moving standard deviation
def AddMSTDIndictor(srcData, window, DropNan = True):
dstData = srcData.copy()
MSTD = GetRollingStd(srcData[strClose], window).to_frame()
MSTD.rename(columns= {strClose: strMSTD+'_W'+str(window)}, inplace=True)
dstData = dstData.join(MSTD)
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# MVAR: moving variance
def AddMVARIndictor(srcData, window, DropNan = True):
dstData = srcData.copy()
MVAR = GetRollingVar(srcData[strClose], window).to_frame()
MVAR.rename(columns= {strClose: strMVAR+'_W'+str(window)}, inplace=True)
dstData = dstData.join(MVAR)
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# RSV: raw stochastic value
def AddRSVIndictor(srcData, window, DropNan = True):
dstData = srcData.copy()
RSV = GetRSV(srcData[strClose], srcData[strHigh], srcData[strLow], window).to_frame()
RSV.rename(columns= {strClose: strRSV+'_W'+str(window)}, inplace=True)
dstData = dstData.join(RSV)
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# RSI: relative strength index
def AddRSIIndictor(srcData, window, DropNan = True):
dstData = srcData.copy()
RSI = GetRSI(srcData[strClose], window).to_frame()
RSI.rename(columns= {strClose: strRSI+'_W'+str(window)}, inplace=True)
dstData = dstData.join(RSI)
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# MACD: moving average convergence divergence
def AddMACDIndictor(srcData, fast_period, slow_period, signal_period, DropNan = True):
dstData = srcData.copy()
DIF, DEM, OSC = GetMACD(srcData[strClose], fast_period, slow_period, signal_period)
DIF.rename(strMACD_DIF, inplace=True)
DEM.rename(strMACD_DEM, inplace=True)
OSC.rename(strMACD_OSC, inplace=True)
MACD = pandas.concat([DIF, DEM, OSC], axis=1)
dstData = dstData.join(MACD)
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# Williams Overbought/Oversold index
def AddWRIndictor(srcData, window, DropNan = True):
dstData = srcData.copy()
WR = GetWR(srcData[strClose], srcData[strHigh], srcData[strLow], window)
WR.rename(strWR+'_W'+str(window), inplace=True)
dstData = dstData.join(WR)
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# CCI: Commodity Channel Index
def AddCCIIndictor(srcData, window, DropNan = True):
dstData = srcData.copy()
CCI = GetCCI(srcData[strClose], srcData[strHigh], srcData[strLow], window)
CCI.rename(strCCI+'_W'+str(window), inplace=True)
dstData = dstData.join(CCI)
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# TR: true range
def AddTRIndictor(srcData, DropNan = True):
dstData = srcData.copy()
dstData[strTR] = GetTR(srcData[strClose], srcData[strHigh], srcData[strLow])
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# ATR: average true range
def AddATRIndictor(srcData, window, DropNan = True):
dstData = srcData.copy()
dstData[strATR+'_W'+str(window)] = GetATR(srcData[strClose], srcData[strHigh], srcData[strLow], window)
if DropNan:
dstData = dstData.dropna(axis=0, how='any')
return dstData
# DMI: Directional Moving Index
def AddDMIIndictor(srcData, window, DropNan = True):
dstData = srcData.copy()
pDI, nDI, ADX, ADXR = GetDMI(srcData[strClose], srcData[strHigh], srcData[strLow], window)
pDI.rename(strpDI+'_W'+str(window), inplace=True)
nDI.rename(strnDI+'_W'+str(window), inplace=True)
ADX.rename(strADX+'_W'+str(window), inplace=True)
ADXR.rename(strADXR+'_W'+str(window), inplace=True)
DMI = | pandas.concat([pDI, nDI, ADX, ADXR], axis=1) | pandas.concat |
import json
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.exceptions import PipelineScoreError
from evalml.model_understanding.prediction_explanations.explainers import (
abs_error,
cross_entropy,
explain_prediction,
explain_predictions,
explain_predictions_best_worst
)
from evalml.problem_types import ProblemTypes
def compare_two_tables(table_1, table_2):
assert len(table_1) == len(table_2)
for row, row_answer in zip(table_1, table_2):
assert row.strip().split() == row_answer.strip().split()
test_features = [[1], np.ones((15, 1)), pd.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}).iloc[0],
pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}), pd.DataFrame()]
@pytest.mark.parametrize("test_features", test_features)
def test_explain_prediction_value_error(test_features):
with pytest.raises(ValueError, match="features must be stored in a dataframe or datatable with exactly one row."):
explain_prediction(None, input_features=test_features, training_data=None)
explain_prediction_answer = """Feature Name Feature Value Contribution to Prediction
=========================================================
d 40.00 +++++
b 20.00 -----""".splitlines()
explain_prediction_regression_dict_answer = {
"explanations": [{
"feature_names": ["d", "b"],
"feature_values": [40, 20],
"qualitative_explanation": ["+++++", "-----"],
"quantitative_explanation": [None, None],
"class_name": None
}]
}
explain_predictions_regression_df_answer = pd.DataFrame({'feature_names': ['d', 'b'],
'feature_values': [40, 20],
'qualitative_explanation': ['+++++', '-----'],
"quantitative_explanation": [None, None]})
explain_prediction_binary_dict_answer = {
"explanations": [{
"feature_names": ["d", "b"],
"feature_values": [40, 20],
"qualitative_explanation": ["+++++", "-----"],
"quantitative_explanation": [None, None],
"class_name": "class_1"
}]
}
explain_prediction_binary_df_answer = pd.DataFrame({
"feature_names": ["d", "b"],
"feature_values": [40, 20],
"qualitative_explanation": ["+++++", "-----"],
"quantitative_explanation": [None, None],
"class_name": ["class_1", "class_1"]
})
explain_prediction_multiclass_answer = """Class: class_0
Feature Name Feature Value Contribution to Prediction
=========================================================
a 10.00 +++++
c 30.00 ---
Class: class_1
Feature Name Feature Value Contribution to Prediction
=========================================================
a 10.00 +++
b 20.00 ++
Class: class_2
Feature Name Feature Value Contribution to Prediction
=========================================================
c 30.00 ---
d 40.00 ---
""".splitlines()
explain_prediction_multiclass_dict_answer = {
"explanations": [
{"feature_names": ["a", "c"],
"feature_values": [10, 30],
"qualitative_explanation": ["+++++", "---"],
"quantitative_explanation": [None, None],
"class_name": "class_0"},
{"feature_names": ["a", "b"],
"feature_values": [10, 20],
"qualitative_explanation": ["+++", "++"],
"quantitative_explanation": [None, None],
"class_name": "class_1"},
{"feature_names": ["c", "d"],
"feature_values": [30, 40],
"qualitative_explanation": ["---", "---"],
"quantitative_explanation": [None, None],
"class_name": "class_2"},
]
}
explain_prediction_multiclass_df_answer = pd.DataFrame({
"feature_names": ["a", "c", "a", "b", "c", "d"],
"feature_values": [10, 30, 10, 20, 30, 40],
"qualitative_explanation": ["+++++", "---", "+++", "++", "---", "---"],
"quantitative_explanation": [None, None, None, None, None, None],
"class_name": ['class_0', 'class_0', 'class_1', 'class_1', 'class_2', 'class_2']
})
@pytest.mark.parametrize("problem_type, output_format, shap_values, normalized_shap_values, answer",
[(ProblemTypes.REGRESSION,
"text",
{"a": [1], "b": [-2.1], "c": [-0.25], "d": [2.3]},
{"a": [0.5], "b": [-2.1], "c": [-0.25], "d": [2.3]},
explain_prediction_answer),
(ProblemTypes.REGRESSION,
"dict",
{"a": [1], "b": [-2.1], "c": [-0.25], "d": [2.3]},
{"a": [0.5], "b": [-2.1], "c": [-0.25], "d": [2.3]},
explain_prediction_regression_dict_answer
),
(ProblemTypes.REGRESSION,
"dataframe",
{"a": [1], "b": [-2.1], "c": [-0.25], "d": [2.3]},
{"a": [0.5], "b": [-2.1], "c": [-0.25], "d": [2.3]},
explain_predictions_regression_df_answer
),
(ProblemTypes.BINARY,
"text",
[{}, {"a": [0.5], "b": [-0.89], "c": [0.33], "d": [0.89]}],
[{}, {"a": [0.5], "b": [-0.89], "c": [-0.25], "d": [0.89]}],
explain_prediction_answer),
(ProblemTypes.BINARY,
"dict",
[{}, {"a": [0.5], "b": [-0.89], "c": [0.33], "d": [0.89]}],
[{}, {"a": [0.5], "b": [-0.89], "c": [-0.25], "d": [0.89]}],
explain_prediction_binary_dict_answer),
(ProblemTypes.BINARY,
"dataframe",
[{}, {"a": [0.5], "b": [-0.89], "c": [0.33], "d": [0.89]}],
[{}, {"a": [0.5], "b": [-0.89], "c": [-0.25], "d": [0.89]}],
explain_prediction_binary_df_answer),
(ProblemTypes.MULTICLASS,
"text",
[{}, {}, {}],
[{"a": [1.1], "b": [0.09], "c": [-0.53], "d": [-0.06]},
{"a": [0.53], "b": [0.24], "c": [-0.15], "d": [-0.22]},
{"a": [0.03], "b": [0.02], "c": [-0.42], "d": [-0.47]}],
explain_prediction_multiclass_answer),
(ProblemTypes.MULTICLASS,
"dict",
[{}, {}, {}],
[{"a": [1.1], "b": [0.09], "c": [-0.53], "d": [-0.06]},
{"a": [0.53], "b": [0.24], "c": [-0.15], "d": [-0.22]},
{"a": [0.03], "b": [0.02], "c": [-0.42], "d": [-0.47]}],
explain_prediction_multiclass_dict_answer),
(ProblemTypes.MULTICLASS,
"dataframe",
[{}, {}, {}],
[{"a": [1.1], "b": [0.09], "c": [-0.53], "d": [-0.06]},
{"a": [0.53], "b": [0.24], "c": [-0.15], "d": [-0.22]},
{"a": [0.03], "b": [0.02], "c": [-0.42], "d": [-0.47]}],
explain_prediction_multiclass_df_answer)
])
@pytest.mark.parametrize("input_type", ["pd", "ww"])
@patch("evalml.model_understanding.prediction_explanations._user_interface._compute_shap_values")
@patch("evalml.model_understanding.prediction_explanations._user_interface._normalize_shap_values")
def test_explain_prediction(mock_normalize_shap_values,
mock_compute_shap_values,
problem_type, output_format, shap_values, normalized_shap_values, answer,
input_type):
mock_compute_shap_values.return_value = shap_values
mock_normalize_shap_values.return_value = normalized_shap_values
pipeline = MagicMock()
pipeline.problem_type = problem_type
pipeline.classes_ = ["class_0", "class_1", "class_2"]
# By the time we call transform, we are looking at only one row of the input data.
pipeline.compute_estimator_features.return_value = ww.DataTable(pd.DataFrame({"a": [10], "b": [20], "c": [30], "d": [40]}))
features = pd.DataFrame({"a": [1], "b": [2]})
training_data = pd.DataFrame()
if input_type == "ww":
features = ww.DataTable(features)
training_data = ww.DataTable(training_data)
table = explain_prediction(pipeline, features, output_format=output_format, top_k=2, training_data=training_data)
if isinstance(table, str):
compare_two_tables(table.splitlines(), answer)
elif isinstance(table, pd.DataFrame):
pd.testing.assert_frame_equal(table, answer)
else:
assert table == answer
def test_error_metrics():
pd.testing.assert_series_equal(abs_error(pd.Series([1, 2, 3]), pd.Series([4, 1, 0])), pd.Series([3, 1, 3]))
pd.testing.assert_series_equal(cross_entropy(pd.Series([1, 0]),
pd.DataFrame({"a": [0.1, 0.2], "b": [0.9, 0.8]})),
pd.Series([-np.log(0.9), -np.log(0.2)]))
input_features_and_y_true = [([[1]], pd.Series([1]), "^Input features must be a dataframe with more than 10 rows!"),
(pd.DataFrame({"a": [1]}), pd.Series([1]), "^Input features must be a dataframe with more than 10 rows!"),
(pd.DataFrame({"a": range(15)}), pd.Series(range(12)), "^Parameters y_true and input_features must have the same number of data points.")
]
@pytest.mark.parametrize("input_features,y_true,error_message", input_features_and_y_true)
def test_explain_predictions_best_worst_value_errors(input_features, y_true, error_message):
with pytest.raises(ValueError, match=error_message):
explain_predictions_best_worst(None, input_features, y_true)
def test_explain_predictions_raises_pipeline_score_error():
with pytest.raises(PipelineScoreError, match="Division by zero!"):
def raise_zero_division(input_features):
raise ZeroDivisionError("Division by zero!")
pipeline = MagicMock()
pipeline.problem_type = ProblemTypes.BINARY
pipeline.predict_proba.side_effect = raise_zero_division
explain_predictions_best_worst(pipeline, pd.DataFrame({"a": range(15)}), pd.Series(range(15)))
def test_explain_predictions_value_errors():
with pytest.raises(ValueError, match="Parameter input_features must be a non-empty dataframe."):
explain_predictions(None, pd.DataFrame())
def test_output_format_checked():
input_features, y_true = pd.DataFrame(data=[range(15)]), pd.Series(range(15))
with pytest.raises(ValueError, match="Parameter output_format must be either text, dict, or dataframe. Received bar"):
explain_predictions(None, input_features, output_format="bar")
with pytest.raises(ValueError, match="Parameter output_format must be either text, dict, or dataframe. Received xml"):
explain_prediction(None, input_features=input_features, training_data=None, output_format="xml")
input_features, y_true = pd.DataFrame(data=range(15)), pd.Series(range(15))
with pytest.raises(ValueError, match="Parameter output_format must be either text, dict, or dataframe. Received foo"):
explain_predictions_best_worst(None, input_features, y_true=y_true, output_format="foo")
regression_best_worst_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Value: 1
Target Value: 2
Absolute Difference: 1.0
Index ID: {index_0}
table goes here
Worst 1 of 1
Predicted Value: 2
Target Value: 3
Absolute Difference: 4.0
Index ID: {index_1}
table goes here
"""
regression_best_worst_answer_dict = {
"explanations": [
{"rank": {"prefix": "best", "index": 1},
"predicted_values": {"probabilities": None, "predicted_value": 1, "target_value": 2,
"error_name": "Absolute Difference", "error_value": 1.},
"explanations": ["explanation_dictionary_goes_here"]},
{"rank": {"prefix": "worst", "index": 1},
"predicted_values": {"probabilities": None, "predicted_value": 2, "target_value": 3,
"error_name": "Absolute Difference", "error_value": 4.},
"explanations": ["explanation_dictionary_goes_here"]}
]
}
regression_best_worst_answer_df = pd.DataFrame({
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"rank": [1, 1],
"predicted_value": [1, 2],
"target_value": [2, 3],
"error_name": ["Absolute Difference"] * 2,
"error_value": [1., 4.],
"prefix": ["best", "worst"],
})
no_best_worst_answer = """Test Pipeline Name
Parameters go here
1 of 2
table goes here
2 of 2
table goes here
"""
no_best_worst_answer_dict = {
"explanations": [
{"explanations": ["explanation_dictionary_goes_here"]},
{"explanations": ["explanation_dictionary_goes_here"]}
]
}
no_best_worst_answer_df = pd.DataFrame({
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"prediction_number": [0, 1]
})
binary_best_worst_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Probabilities: [benign: 0.05, malignant: 0.95]
Predicted Value: malignant
Target Value: malignant
Cross Entropy: 0.2
Index ID: {index_0}
table goes here
Worst 1 of 1
Predicted Probabilities: [benign: 0.1, malignant: 0.9]
Predicted Value: malignant
Target Value: benign
Cross Entropy: 0.78
Index ID: {index_1}
table goes here
"""
binary_best_worst_answer_dict = {
"explanations": [
{"rank": {"prefix": "best", "index": 1},
"predicted_values": {"probabilities": {"benign": 0.05, "malignant": 0.95},
"predicted_value": "malignant", "target_value": "malignant",
"error_name": "Cross Entropy", "error_value": 0.2},
"explanations": ["explanation_dictionary_goes_here"]},
{"rank": {"prefix": "worst", "index": 1},
"predicted_values": {"probabilities": {"benign": 0.1, "malignant": 0.9},
"predicted_value": "malignant", "target_value": "benign",
"error_name": "Cross Entropy", "error_value": 0.78},
"explanations": ["explanation_dictionary_goes_here"]}
]
}
binary_best_worst_answer_df = pd.DataFrame({
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"rank": [1, 1],
"prefix": ["best", "worst"],
"label_benign_probability": [0.05, 0.1],
"label_malignant_probability": [0.95, 0.9],
"predicted_value": ["malignant", "malignant"],
"target_value": ["malignant", "benign"],
"error_name": ["Cross Entropy"] * 2,
"error_value": [0.2, 0.78]
})
multiclass_table = """Class: setosa
table goes here
Class: versicolor
table goes here
Class: virginica
table goes here"""
multiclass_best_worst_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Probabilities: [setosa: 0.8, versicolor: 0.1, virginica: 0.1]
Predicted Value: setosa
Target Value: setosa
Cross Entropy: 0.15
Index ID: {{index_0}}
{multiclass_table}
Worst 1 of 1
Predicted Probabilities: [setosa: 0.2, versicolor: 0.75, virginica: 0.05]
Predicted Value: versicolor
Target Value: versicolor
Cross Entropy: 0.34
Index ID: {{index_1}}
{multiclass_table}
""".format(multiclass_table=multiclass_table)
multiclass_best_worst_answer_dict = {
"explanations": [
{"rank": {"prefix": "best", "index": 1},
"predicted_values": {"probabilities": {"setosa": 0.8, "versicolor": 0.1, "virginica": 0.1},
"predicted_value": "setosa", "target_value": "setosa",
"error_name": "Cross Entropy", "error_value": 0.15},
"explanations": ["explanation_dictionary_goes_here"]},
{"rank": {"prefix": "worst", "index": 1},
"predicted_values": {"probabilities": {"setosa": 0.2, "versicolor": 0.75, "virginica": 0.05},
"predicted_value": "versicolor", "target_value": "versicolor",
"error_name": "Cross Entropy", "error_value": 0.34},
"explanations": ["explanation_dictionary_goes_here"]}
]
}
multiclass_best_worst_answer_df = pd.DataFrame({
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"rank": [1, 1],
"prefix": ["best", "worst"],
"label_setosa_probability": [0.8, 0.2],
"label_versicolor_probability": [0.1, 0.75],
"label_virginica_probability": [0.1, 0.05],
"predicted_value": ["setosa", "versicolor"],
"target_value": ["setosa", "versicolor"],
"error_name": ["Cross Entropy"] * 2,
"error_value": [0.15, 0.34]
})
multiclass_no_best_worst_answer = """Test Pipeline Name
Parameters go here
1 of 2
{multiclass_table}
2 of 2
{multiclass_table}
""".format(multiclass_table=multiclass_table)
@pytest.mark.parametrize("problem_type,output_format,answer,explain_predictions_answer,custom_index",
[(ProblemTypes.REGRESSION, "text", regression_best_worst_answer, no_best_worst_answer, [0, 1]),
(ProblemTypes.REGRESSION, "dict", regression_best_worst_answer_dict, no_best_worst_answer_dict, [0, 1]),
(ProblemTypes.REGRESSION, "dataframe", regression_best_worst_answer_df, no_best_worst_answer_df, [0, 1]),
(ProblemTypes.REGRESSION, "text", regression_best_worst_answer, no_best_worst_answer, [4, 23]),
(ProblemTypes.REGRESSION, "dict", regression_best_worst_answer_dict, no_best_worst_answer_dict, [4, 10]),
(ProblemTypes.REGRESSION, "dataframe", regression_best_worst_answer_df, no_best_worst_answer_df, [4, 10]),
(ProblemTypes.REGRESSION, "text", regression_best_worst_answer, no_best_worst_answer, ["foo", "bar"]),
(ProblemTypes.REGRESSION, "dict", regression_best_worst_answer_dict, no_best_worst_answer_dict, ["foo", "bar"]),
(ProblemTypes.REGRESSION, "dataframe", regression_best_worst_answer_df, no_best_worst_answer_df, ["foo", "bar"]),
(ProblemTypes.BINARY, "text", binary_best_worst_answer, no_best_worst_answer, [0, 1]),
(ProblemTypes.BINARY, "dict", binary_best_worst_answer_dict, no_best_worst_answer_dict, [0, 1]),
(ProblemTypes.BINARY, "dataframe", binary_best_worst_answer_df, no_best_worst_answer_df, [0, 1]),
(ProblemTypes.BINARY, "text", binary_best_worst_answer, no_best_worst_answer, [7, 11]),
(ProblemTypes.BINARY, "dict", binary_best_worst_answer_dict, no_best_worst_answer_dict, [7, 11]),
(ProblemTypes.BINARY, "dataframe", binary_best_worst_answer_df, no_best_worst_answer_df, [7, 11]),
(ProblemTypes.BINARY, "text", binary_best_worst_answer, no_best_worst_answer, ["first", "second"]),
(ProblemTypes.BINARY, "dict", binary_best_worst_answer_dict, no_best_worst_answer_dict, ["first", "second"]),
(ProblemTypes.BINARY, "dataframe", binary_best_worst_answer_df, no_best_worst_answer_df, ["first", "second"]),
(ProblemTypes.MULTICLASS, "text", multiclass_best_worst_answer, multiclass_no_best_worst_answer, [0, 1]),
(ProblemTypes.MULTICLASS, "dict", multiclass_best_worst_answer_dict, no_best_worst_answer_dict, [0, 1]),
(ProblemTypes.MULTICLASS, "dataframe", multiclass_best_worst_answer_df, no_best_worst_answer_df, [0, 1]),
(ProblemTypes.MULTICLASS, "text", multiclass_best_worst_answer, multiclass_no_best_worst_answer, [19, 103]),
(ProblemTypes.MULTICLASS, "dict", multiclass_best_worst_answer_dict, no_best_worst_answer_dict, [17, 235]),
(ProblemTypes.MULTICLASS, "dataframe", multiclass_best_worst_answer_df, no_best_worst_answer_df, [17, 235]),
(ProblemTypes.MULTICLASS, "text", multiclass_best_worst_answer, multiclass_no_best_worst_answer, ["2020-10", "2020-11"]),
(ProblemTypes.MULTICLASS, "dict", multiclass_best_worst_answer_dict, no_best_worst_answer_dict, ["2020-15", "2020-15"]),
(ProblemTypes.MULTICLASS, "dataframe", multiclass_best_worst_answer_df, no_best_worst_answer_df, ["2020-15", "2020-15"]),
])
@patch("evalml.model_understanding.prediction_explanations.explainers.DEFAULT_METRICS")
@patch("evalml.model_understanding.prediction_explanations._user_interface._make_single_prediction_shap_table")
def test_explain_predictions_best_worst_and_explain_predictions(mock_make_table, mock_default_metrics,
problem_type, output_format, answer,
explain_predictions_answer, custom_index):
if output_format == "text":
mock_make_table.return_value = "table goes here"
elif output_format == "dataframe":
shap_table = pd.DataFrame({
"feature_names": [0],
"feature_values": [0],
"qualitative_explanation": [0],
"quantitative_explanation": [0],
})
# Use side effect so that we always get a new copy of the dataframe
mock_make_table.side_effect = lambda *args, **kwargs: shap_table.copy()
else:
mock_make_table.return_value = {"explanations": ["explanation_dictionary_goes_here"]}
pipeline = MagicMock()
pipeline.parameters = "Parameters go here"
input_features = pd.DataFrame({"a": [3, 4]}, index=custom_index)
pipeline.problem_type = problem_type
pipeline.name = "Test Pipeline Name"
def _add_custom_index(answer, index_best, index_worst, output_format):
if output_format == "text":
answer = answer.format(index_0=index_best, index_1=index_worst)
elif output_format == "dataframe":
col_name = "prefix" if "prefix" in answer.columns else "rank"
n_repeats = answer[col_name].value_counts().tolist()[0]
answer['index_id'] = [index_best] * n_repeats + [index_worst] * n_repeats
else:
answer["explanations"][0]["predicted_values"]["index_id"] = index_best
answer["explanations"][1]["predicted_values"]["index_id"] = index_worst
return answer
if problem_type == ProblemTypes.REGRESSION:
abs_error_mock = MagicMock(__name__="abs_error")
abs_error_mock.return_value = pd.Series([4., 1.], dtype="float64")
mock_default_metrics.__getitem__.return_value = abs_error_mock
pipeline.predict.return_value = ww.DataColumn( | pd.Series([2, 1]) | pandas.Series |
import pandas as _pd
# from atmPy.tools import thermodynamics
from atmPy.general import timeseries as _timeseries
import numpy as _np
from atmPy.aerosols.physics import sampling_efficiency as _sampeff
from atmPy.tools import pandas_tools as _pandas_tools
_date_time_alts = ['uas_datetime']
_pressure_alt = ['StaticP', 'PRESS']
_temp_alt = ['AT_cont', 'AT']
_RH_alt = ['RH_cont', 'RH']
_temp_payload_alt = ['CONDT']
_cn_concentration_alt = ['CONCN']
def read_csv(fname, temperature_limits=(-20, -0.5)):
"""
Arguments
---------
temerature_limits: tuple.
The temperature reading has false readings in it which can cause porblems later"""
df = _pd.read_csv(fname, sep='\t')
_pandas_tools.ensure_column_exists(df, 'DateTime', _date_time_alts)
_pandas_tools.ensure_column_exists(df, 'Pressure_Pa', _pressure_alt)
_pandas_tools.ensure_column_exists(df, 'Temperature', _temp_alt)
_pandas_tools.ensure_column_exists(df, 'Relative_humidity', _RH_alt)
_pandas_tools.ensure_column_exists(df, 'Temperature_instrument', _temp_payload_alt, raise_error=False)
_pandas_tools.ensure_column_exists(df, 'CN_concentration', _cn_concentration_alt, raise_error=False)
try:
# df.Temperature_payload = df.Temperature_payload.astype(float)
df.Temperature_instrument = _pd.to_numeric(df.Temperature_instrument, errors='coerce')
df.CN_concentration = _pd.to_numeric(df.CN_concentration, errors='coerce')
df.CONCN = _pd.to_numeric(df.CONCN, errors='coerce')
df.COUNT = _pd.to_numeric(df.COUNT, errors='coerce')
except AttributeError:
pass
# return df
df.index = _pd.Series( | _pd.to_datetime(df.DateTime, format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
from datetime import datetime
import numpy as np
import pytest
from pandas._libs.tslibs import ccalendar
@pytest.mark.parametrize(
"date_tuple,expected",
[
((2001, 3, 1), 60),
((2004, 3, 1), 61),
((1907, 12, 31), 365), # End-of-year, non-leap year.
((2004, 12, 31), 366), # End-of-year, leap year.
],
)
def test_get_day_of_year_numeric(date_tuple, expected):
assert ccalendar.get_day_of_year(*date_tuple) == expected
def test_get_day_of_year_dt():
dt = datetime.fromordinal(1 + np.random.randint(365 * 4000))
result = | ccalendar.get_day_of_year(dt.year, dt.month, dt.day) | pandas._libs.tslibs.ccalendar.get_day_of_year |
import pandas as pd
import numpy as np
# TODO: fix 'skips', add remaining rows once scrape completes
df_list = []
# 87 turned out weird, figure out what happened here
skips = [87, 101, 144, 215, 347, 350, 360,374]
for i in range(600):
if i in skips:
print('skipping {}'.format(i))
pass
else:
df1 = pd.read_csv('coffee_{}_table_0.csv'.format(i))
df2 = pd.read_csv('coffee_{}_table_1.csv'.format(i))
df3 = pd.read_csv('coffee_{}_table_2.csv'.format(i))
df4 = pd.read_csv('coffee_{}_table_3.csv'.format(i))
df5 = pd.read_csv('coffee_{}_table_4.csv'.format(i))
# df1
"""
Unnamed: 0 0 1
0 0 90.58 NaN
1 1 View Q Arabica Certificate NaN
2 2 Print Q Arabica Certificate NaN
3 3 Cupping Protocol and Descriptors NaN
4 4 View Green Analysis Details NaN
5 5 Request a Sample NaN
6 6 Species Arabica
7 7 Owner metad plc
"""
df1.columns = ['one','two','three']
colnames1 = df1['two'].tolist()
# these names are inconistent, but the data doesn't look important
colnames1[1] = 'view_certificate_1'
colnames1[2] = 'view_certificate_2'
data1 = df1['three'].tolist()
data1[0] = colnames1[0]
colnames1[0] = 'quality_score'
df1_processed = pd.DataFrame([data1],columns=colnames1)
# df2
"""
Unnamed: 0 0 1 \
0 0 Country of Origin Ethiopia
1 1 Farm Name METAD PLC
2 2 Lot Number NaN
3 3 Mill METAD PLC
4 4 ICO Number 2014/2015
5 5 Company METAD Agricultural Developmet plc
6 6 Altitude 1950-2200
7 7 Region GUJI-HAMBELA/GOYO
8 8 Producer METAD PLC
2 3
0 Number of Bags 300
1 Bag Weight 60 kg
2 In-Country Partner METAD Agricultural Development plc
3 Harvest Year 2014
4 Grading Date April 4th, 2015
5 Owner metad plc
6 Variety NaN
7 Status Completed
8 Processing Method Washed / Wet
"""
df2.columns = ['one','two','three','four','five']
colnames1 = df2['two'].tolist()
colnames2 = df2['four'].tolist()
data1 = df2['three'].tolist()
data2 = df2['five'].tolist()
df2_processed = pd.DataFrame([(data1+data2)],columns=(colnames1+colnames2))
# df3
"""
Unnamed: 0 0 1 2 3
0 0 NaN Sample NaN Sample
1 1 Aroma 8.67 Uniformity 10.00
2 2 Flavor 8.83 Clean Cup 10.00
3 3 Aftertaste 8.67 Sweetness 10.00
4 4 Acidity 8.75 Cupper Points 8.75
5 5 Body 8.50 Total Cup Points Sample 90.58
6 6 Balance 8.42 NaN NaN
"""
df3.columns = ['one','two','three','four','five']
colnames1 = df3['two'].tolist()
colnames2 = df3['four'].tolist()
data1 = df3['three'].tolist()
data2 = df3['five'].tolist()
df3_processed = pd.DataFrame([(data1+data2)],columns=(colnames1+colnames2))
# df4
"""
Unnamed: 0 0 1 2 \
0 0 Moisture 12 % Color
1 1 Category One Defects 0 full defects Category Two Defects
2 2 Quakers 0 NaN
3
0 Green
1 0 full defects
2 NaN
"""
df4.columns = ['one','two','three','four','five']
colnames1 = df4['two'].tolist()
colnames2 = df4['four'].tolist()
data1 = df4['three'].tolist()
data2 = df4['five'].tolist()
df4_processed = pd.DataFrame([(data1+data2)],columns=(colnames1+colnames2))
# df5
"""
Unnamed: 0 0 \
0 0 Expiration
1 1 Certification Body
2 2 Certification Address
3 3 Certification Contact
1
0 April 3rd, 2016
1 METAD Agricultural Development plc
2 BAWA Center, 3rd Floor (Gerji), Addis Ababa, E...
3 <NAME> (Emebet Dinku) - +251-116-292534, ...
"""
df5.columns = ['one','two','three']
colnames1 = df5['two'].tolist()
data1 = df5['three'].tolist()
if i > 1:
prev_cols = df.columns # cols before repalcing df with next coffee
df5_processed = pd.DataFrame([data1],columns=colnames1)
df = pd.concat([df1_processed,df2_processed,df3_processed,df4_processed,df5_processed],1)
df = df.rename(columns = {np.nan : "NA"})
df_list.append(df)
print(i)
these_cols = df.columns
# are the columns matching across coffees?
if i>1:
# figuring out where the column mismatches are
#print(these_cols==prev_cols)
#print(these_cols)
#print(prev_cols)
pass
j = 0
for i in df_list:
print('{} shape: {}'.format(j,i.shape))
j+=1
df_final = | pd.concat(df_list, 0) | pandas.concat |
from main_app_v3 import forecast_cases, forecast_cases_active
import pandas as pd
import numpy as np
import os
import warnings
warnings.filterwarnings("ignore")
from bs4 import BeautifulSoup as bs
from datetime import date
from selenium import webdriver
def update(region_dict, driver):
df = pd.read_html(driver.page_source)[0]
df.columns = ["slno", "Region", "Active_Cases", "Active_New", "Cured/Discharged", "Cured_New", "Death", "Deaths_New"]
df["Date"] = [date.today().strftime("%d-%m-%Y") for i in range(len(df))]
df.drop(columns=["slno", "Active_New", "Cured_New", "Deaths_New"], inplace=True)
df = df.iloc[:-4,:]
df.iloc[-1,0] = "India"
df["Confirmed_Cases"] = [df.iloc[i,1:-1].astype(int).sum() for i in range(len(df))]
cols = list(df.columns)
cols = [i.replace(" ","_") for i in cols]
df.columns = cols
#df["Date"] = [date.today().strftime('%Y-%m-%d') for i in range(len(df))]
#df.index = pd.to_datetime(df.Date,format='%Y-%m-%d')
df["Date"] = [date.today().strftime('%d-%m-%Y') for i in range(len(df))]
df.index = pd.to_datetime(df.Date,format='%d-%m-%Y')
for i,region in enumerate(region_dict):
region_dict[region] = region_dict[region].append(df[df.Region == region], sort=False, ignore_index=True)
try:
region_dict[region].index = pd.to_datetime(region_dict[region].Date,format='%d-%m-%Y')
except:
region_dict[region].index = pd.to_datetime(region_dict[region].Date,format='%Y-%m-%d')
return region_dict
def update_database():
df = pd.read_csv("COVID_Database.csv")
cols = list(df.columns)
cols = [i.replace(" ","_") for i in cols]
df.columns = cols
states_df = df.groupby('Region')
state_df = [pd.DataFrame(states_df.get_group(state)) for state in states_df.groups]
#GOOGLE_CHROME_PATH = '/app/.apt/usr/bin/google_chrome'
CHROMEDRIVER_PATH = '/app/.chromedriver/bin/chromedriver'
#selenium setup
chrome_options = webdriver.ChromeOptions()
#chrome_options.binary_location = GOOGLE_CHROME_PATH
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--no-sandbox")
try:
driver=webdriver.Chrome(CHROMEDRIVER_PATH,chrome_options=chrome_options)
except:
driver=webdriver.Chrome(r"C:\Users\Tejas\Downloads\chromedriver_win32\chromedriver.exe",chrome_options=chrome_options)
#driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), chrome_options=chrome_options)
driver.get("https://www.mohfw.gov.in/")
region_dict = {}
for region in states_df.groups:
region_dict[region] = pd.DataFrame(states_df.get_group(region))
try:
region_dict[region].index = pd.to_datetime(region_dict[region].Date,format='%Y-%m-%d')
except:
region_dict[region].index = pd.to_datetime(region_dict[region].Date,format='%d-%m-%Y')
region_dict = update(region_dict, driver)
# break
driver.quit()
df = pd.DataFrame()
for region in region_dict.keys():
df = df.append(region_dict[region], sort=False, ignore_index=True)
cols = list(df.columns)
cols = [i.replace(" ","_") for i in cols]
df.columns = cols
df.drop_duplicates(inplace=True)
df.to_csv("COVID_Database.csv", index=False)
return "Database Updated"
def forecasted_database():
df = pd.read_csv("COVID_Database.csv")
df["Forecasted"] = ["" for i in range(len(df))]
regions = sorted(list(df.Region.value_counts().keys()))
df_old = pd.read_csv("Forecast.csv")
forecast_df = pd.DataFrame(columns=['Date', 'Region', 'Confirmed_Cases', 'Active_Cases', 'Cured/Discharged',
'Death', 'Forecasted', 'Forecasted_high', 'Forecasted_low', 'Confirmed_Cases_Daywise', 'Active_Cases_Daywise'])
forecasts = []
forecasts_high = []
forecasts_low = []
forecasts_daywise = []
forecasts_daywise_high = []
forecasts_daywise_low = []
days = []
region_x = []
############
forecasts_active = []
forecasts_active_high = []
forecasts_active_low = []
forecasts_active_daywise = []
forecasts_active_daywise_high = []
forecasts_active_daywise_low = []
n=31
for region in regions:
try:
forecast,std_error,confidence_interval = forecast_cases(region, n=n, return_all=True)
forecast_old = df_old.Forecasted_Cases[df_old.Region==region].iloc[0]
forecast_old = forecast[0]-forecast_old
forecasts_daywise.append(forecast_old)
forecasts_daywise_high.append(forecast_old+(forecast_old*0.05))
forecasts_daywise_low.append(forecast_old-(forecast_old*0.05))
forecast_high = confidence_interval[:,1]
forecast_low = confidence_interval[:,0]
forecasts.extend(forecast)
forecasts_high.extend(forecast_high)
forecasts_low.extend(forecast_low)
forecasts_daywise.extend(pd.Series(forecast).diff(periods=1).iloc[1:])
forecasts_daywise_high.extend(pd.Series(forecast_high).diff(periods=1).iloc[1:])
forecasts_daywise_low.extend(pd.Series(forecast_low).diff(periods=1).iloc[1:])
days.extend(pd.date_range(date.today(), periods=n).strftime('%d-%m-%Y'))
region_x.extend([region for i in range(n)])
######################
forecast_active,std_error_active,confidence_interval_active = forecast_cases_active(region, n=n, return_all=True)
forecast_active_old = df_old.Forecasted_Active_Cases[df_old.Region==region].iloc[0]
forecast_active_old = forecast_active[0]-forecast_active_old
forecasts_active_daywise.append(forecast_active_old)
forecasts_active_daywise_high.append(forecast_active_old+(forecast_active_old*0.05))
forecasts_active_daywise_low.append(forecast_active_old-(forecast_active_old*0.05))
forecast_active_high = confidence_interval_active[:,1]
forecast_active_low = confidence_interval_active[:,0]
forecasts_active.extend(forecast_active)
forecasts_active_high.extend(forecast_active_high)
forecasts_active_low.extend(forecast_active_low)
forecasts_active_daywise.extend(pd.Series(forecast_active).diff(periods=1).iloc[1:])
forecasts_active_daywise_high.extend(pd.Series(forecast_active_high).diff(periods=1).iloc[1:])
forecasts_active_daywise_low.extend( | pd.Series(forecast_active_low) | pandas.Series |
import pandas as pd
import numpy as np
import json
import pycountry_convert as pc
from ai4netmon.Analysis.aggregate_data import data_collectors as dc
from ai4netmon.Analysis.aggregate_data import graph_methods as gm
FILES_LOCATION = 'https://raw.githubusercontent.com/sermpezis/ai4netmon/main/data/misc/'
PATH_AS_RANK = FILES_LOCATION+'ASrank.csv'
PATH_PERSONAL = FILES_LOCATION+'perso.txt'
PATH_PEERINGDB = FILES_LOCATION+'peeringdb_2_dump_2021_07_01.json'
AS_HEGEMONY_PATH = FILES_LOCATION+'AS_hegemony.csv'
ALL_ATLAS_PROBES = FILES_LOCATION+'RIPE_Atlas_probes.json'
ROUTEVIEWS_PEERS = FILES_LOCATION+'RouteViews_peers.json'
AS_RELATIONSHIPS = FILES_LOCATION+'AS_relationships_20210701.as-rel2.txt'
def cc2cont(country_code):
'''
Receives a country code ISO2 (e.g., 'US') and returns the corresponding continent name (e.g., 'North America').
Exceptions:
- if 'EU' is given as country code (it happened in data), then it is treated as the continent code
- if the country code is not found, then a None value is returned
:param country_code: (str) ISO2 country code
:return: (str) continent name of the given country(-ies)
'''
if country_code in ['EU']:
continent_code = country_code
else:
try:
continent_code = pc.country_alpha2_to_continent_code(country_code)
except KeyError:
return None
continent_name = pc.convert_continent_code_to_continent_name(continent_code)
return continent_name
def get_continent(country_code):
'''
Receives a series of country codes ISO2 (e.g., 'US') and returns the corresponding continent names (e.g., 'North America').
For NaN or None elements, it returns a None value
:param country_code: (pandas Series) ISO2 country codes
:return: (list of str) continent names of the given countries
'''
continent_name = []
for cc in country_code.tolist():
if pd.isna(cc):
continent_name.append(None)
else:
continent_name.append( cc2cont(cc) )
return continent_name
def create_df_from_AS_relationships():
"""
Loads the CAIDA AS-relationships datasets from the source file. Returns a dataframe with index the ASN
and columns features derived from the graph; appends in the column names the prefix "AS_rel_".
The returned features are:
- "degree": a column with the degree (i.e., #neighbors) of each AS
:return: A dataframe with index the ASN
"""
G = gm.create_graph_from_AS_relationships(AS_RELATIONSHIPS)
df = pd.DataFrame(G.degree(), columns=['asn','AS_rel_degree'])
df = df.set_index('asn')
return df
def create_df_from_RouteViews():
"""
Collects the list of RouteViews peers, and returns a dataframe with RouteViews peers ASNs
:return: A dataframe with index the ASN
"""
df = pd.read_json(ROUTEVIEWS_PEERS)
df.columns = ['asn']
df = df.drop_duplicates()
df['is_routeviews_peer'] = 1
df = df.set_index('asn')
return df
def create_df_from_RIPE_RIS():
"""
Collects the list of RIPE RIS peers, and returns a dataframe with the v4 and v6 RIS peers ASNs.
:return: A dataframe with index the ASN
"""
ris_peer_ip2asn, _ = dc.get_ripe_ris_data()
unique_asns = set(ris_peer_ip2asn.values())
unique_asns_v4 = set([asn for ip,asn in ris_peer_ip2asn.items() if ':' not in ip])
unique_asns_v6 = set([asn for ip,asn in ris_peer_ip2asn.items() if ':' in ip])
df = pd.DataFrame(columns=['is_ris_peer_v4', 'is_ris_peer_v6'], index=unique_asns)
df.loc[unique_asns_v4, 'is_ris_peer_v4'] = 1
df.loc[unique_asns_v6, 'is_ris_peer_v6'] = 1
df.index.name = 'asn'
return df
def create_df_from_Atlas_probes():
"""
Loads the list of RIPE Atlas probes, and returns a dataframe with the number of v4 and v6 probes per ASN (only for ASNs that have at least one probe).
:return: A dataframe with index the ASN
"""
data = pd.read_json(ALL_ATLAS_PROBES, lines=True)
data = data[(data['status'] == 'Connected')]
s4 = data['asn_v4'].value_counts()
s6 = data['asn_v6'].value_counts()
df = pd.concat([s4, s6], axis=1)
df.index.name = 'asn'
df = df.rename(columns={'asn_v4': 'nb_atlas_probes_v4', 'asn_v6': 'nb_atlas_probes_v6'})
return df
def create_df_from_AS_rank():
"""
Loads the CAIDA AS-rank dataset from the source file. Returns a dataframe with index the ASN; appends in the column names the prefix "AS_rank_".
:return: A dataframe with index the ASN
"""
data = pd.read_csv(PATH_AS_RANK, sep=",")
new_columns = ['AS_rank_' + str(i) for i in data.columns]
data = data.set_axis(new_columns, axis='columns', inplace=False)
data.loc[(data['AS_rank_longitude'] == 0) & (data['AS_rank_latitude'] == 0), ['AS_rank_longitude',
'AS_rank_latitude']] = None
data['AS_rank_continent'] = get_continent(data['AS_rank_iso'])
data = data.set_index('AS_rank_asn')
return data
def create_df_from_AS_hegemony():
"""
Loads the AS hegemony dataset from the source file. Returns a dataframe with index the ASN, and a single column with the AS hegemony value of the AS
:return: A dataframe with index the ASN
"""
data = pd.read_csv(AS_HEGEMONY_PATH, sep=",")
data = data.rename(columns={'hege': 'AS_hegemony'})
data = data.set_index('asn')
return data
def create_df_from_personal():
"""
Loads the bgp.tools personal AS dataset from the source file. Creates a dataframe with index the ASN of
the ASes that are personal use ASes; the dataframe has only one column with 1 for all rows
:return: A dataframe with index the ASN
"""
data = pd.read_csv(PATH_PERSONAL, header=None)
data.columns = ['asn']
# keep only the digits of the ASNs
data['asn'] = data['asn'].apply(lambda x: int(x[2:]))
data['is_personal_AS'] = 1
data = data.set_index('asn')
return data
def create_df_from_PeeringDB():
"""
Loads the PeeringDB dataset from the source file. Returns a dataframe with index the ASN; appends in the column names the prefix "peeringDB_". The dataframe which contains only the features in the keep_keys list
:return: A dataframe with index the ASN
"""
df = pd.read_json(PATH_PEERINGDB)
data = []
keep_keys = ['asn', 'info_ratio', 'info_traffic', 'info_scope', 'info_type', 'info_prefixes4',
'info_prefixes6', 'policy_general', 'ix_count', 'fac_count', 'created']
for row in df.net['data']:
net_row = [row.get(key) for key in keep_keys]
data.append(net_row)
df = pd.DataFrame(data, columns=keep_keys)
new_columns = ['peeringDB_' + str(i) for i in df.columns]
df = df.set_axis(new_columns, axis='columns', inplace=False)
df = df.set_index('peeringDB_asn')
return df
def create_df_from(dataset):
"""
Selects a method, based on the given dataset name, and creates the corresponding dataframe.
When adding a new method, take care to have as index the ASN and the column names to be of the format "dataset_name_"+"column_name" (e.g., the column "X" from the dataset "setA", should be "setA_X")
:param dataset: (type = string) name of the dataset to be loaded
:return: A dataframe with indexes the ASNs and columns the features loaded from the given dataset
"""
if dataset == 'AS_rank':
data = create_df_from_AS_rank()
elif dataset == 'personal':
data = create_df_from_personal()
elif dataset == 'PeeringDB':
data = create_df_from_PeeringDB()
elif dataset == 'AS_hegemony':
data = create_df_from_AS_hegemony()
elif dataset == 'Atlas_probes':
data = create_df_from_Atlas_probes()
elif dataset == 'RIPE_RIS':
data = create_df_from_RIPE_RIS()
elif dataset == 'RouteViews':
data = create_df_from_RouteViews()
elif dataset == 'AS_relationships':
data = create_df_from_AS_relationships()
else:
raise Exception('Not defined dataset')
return data
def create_dataframe_from_multiple_datasets(list_of_datasets):
"""
Creates a dataframe for each given dataset, and concatenates all the dataframes in a common dataframe. The final/returned dataframe has the ASN as the index, and as columns all the columns from all datasets. It fills with NaN non existing values.
:param list_of_datasets: a list of str, where each string corresponds to a dataset to be loaded
:return: A dataframe with indexes the ASNs and columns the features loaded from each given dataset
"""
data = | pd.DataFrame() | pandas.DataFrame |
from __future__ import annotations
from textwrap import dedent
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
from pandas.util._decorators import doc
from sklearn.pipeline import FeatureUnion as SKFeatureUnion
from sklearn.preprocessing import MinMaxScaler as SKMinMaxScaler
from sklearn.preprocessing import OneHotEncoder as SKOneHotEncoder
from dtoolkit.accessor.dataframe import cols # noqa
from dtoolkit.accessor.series import cols # noqa
from dtoolkit.transformer._util import transform_array_to_frame
from dtoolkit.transformer._util import transform_series_to_frame
from dtoolkit.transformer.base import Transformer
if TYPE_CHECKING:
from scipy.sparse import csr_matrix
from dtoolkit._typing import SeriesOrFrame
from dtoolkit._typing import TwoDimArray
class FeatureUnion(SKFeatureUnion, Transformer):
"""
Concatenates results of multiple transformer objects.
See Also
--------
make_union
Convenience function for simplified feature union construction.
Notes
-----
Different to :obj:`sklearn.pipeline.FeatureUnion`.
This would let :obj:`~pandas.DataFrame` in and
:obj:`~pandas.DataFrame` out.
Examples
--------
>>> from dtoolkit.transformer import FeatureUnion
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> union = FeatureUnion([("pca", PCA(n_components=1)),
... ("svd", TruncatedSVD(n_components=2))])
>>> X = [[0., 1., 3], [2., 2., 5]]
>>> union.fit_transform(X)
array([[ 1.5 , 3.0..., 0.8...],
[-1.5 , 5.7..., -0.4...]])
"""
def _hstack(self, Xs):
if all(isinstance(i, (pd.Series, pd.DataFrame)) for i in Xs):
Xs = (i.reset_index(drop=True) for i in Xs)
return pd.concat(Xs, axis=1)
return super()._hstack(Xs)
def make_union(
*transformers: list[Transformer],
n_jobs: int | None = None,
verbose: bool = False,
) -> FeatureUnion:
"""
Construct a FeatureUnion from the given transformers.
See Also
--------
FeatureUnion
Class for concatenating the results of multiple transformer objects.
Notes
-----
Different to :obj:`sklearn.pipeline.make_union`.
This would let :obj:`~pandas.DataFrame` in and
:obj:`~pandas.DataFrame` out.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> from dtoolkit.transformer import make_union
>>> make_union(PCA(), TruncatedSVD())
FeatureUnion(transformer_list=[('pca', PCA()),
('truncatedsvd', TruncatedSVD())])
"""
from sklearn.pipeline import _name_estimators
return FeatureUnion(
_name_estimators(transformers),
n_jobs=n_jobs,
verbose=verbose,
)
class MinMaxScaler(SKMinMaxScaler):
"""
Transform features by scaling each feature to a given range.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where :exc:`min, max = feature_range`.
Examples
--------
>>> from dtoolkit.transformer import MinMaxScaler
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> scaler.fit(data)
MinMaxScaler()
>>> scaler.data_max_
array([ 1., 18.])
>>> scaler.transform(data)
array([[0. , 0. ],
[0.25, 0.25],
[0.5 , 0.5 ],
[1. , 1. ]])
>>> scaler.transform([[2, 2]])
array([[1.5, 0. ]])
Notes
-----
Different to :obj:`sklearn.preprocessing.MinMaxScaler`.
This would let :obj:`~pandas.DataFrame` in and
:obj:`~pandas.DataFrame` out.
"""
def transform(self, X: TwoDimArray) -> TwoDimArray:
"""
Scale features of X according to feature_range.
Parameters
----------
X : DataFrame or array-like of shape `(n_samples, n_features)`
Input data that will be transformed.
Returns
-------
DataFrame or ndarray of shape `(n_samples, n_features)`
Transformed data.
Notes
-----
This would let :obj:`~pandas.DataFrame` in and
:obj:`~pandas.DataFrame` out.
"""
X_new = super().transform(X)
return transform_array_to_frame(X_new, X)
def inverse_transform(self, X: SeriesOrFrame | np.ndarray) -> TwoDimArray:
"""
Undo the scaling of X according to feature_range.
Parameters
----------
X : Series, DataFrame or array-like of shape `(n_samples, n_features)`
Input data that will be transformed. It cannot be sparse.
Returns
-------
DataFrame or ndarray of shape (n_samples, n_features)
Transformed data.
Notes
-----
This would let :obj:`~pandas.DataFrame` in and
:obj:`~pandas.DataFrame` out.
"""
X = transform_series_to_frame(X)
X_new = super().inverse_transform(X)
return transform_array_to_frame(X_new, X)
class OneHotEncoder(SKOneHotEncoder):
"""
Encode categorical features as a one-hot numeric array.
Notes
-----
Different to :obj:`sklearn.preprocessing.OneHotEncoder`.
The result would return a :obj:`~pandas.DataFrame` which uses categories
as columns.
Examples
--------
Given a dataset with two features, we let the encoder find the unique
values per feature and transform the data to a binary one-hot encoding.
>>> from dtoolkit.transformer import OneHotEncoder
One can discard categories not seen during `fit`:
>>> enc = OneHotEncoder(sparse=True, handle_unknown='ignore')
>>> X = [['Male', 1], ['Female', 3], ['Female', 2]]
>>> enc.fit(X)
OneHotEncoder(handle_unknown='ignore', sparse=True)
>>> enc.categories_
[array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
>>> enc.transform([['Female', 1], ['Male', 4]]).toarray()
array([[1., 0., 1., 0., 0.],
[0., 1., 0., 0., 0.]])
>>> enc.inverse_transform([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0]])
array([['Male', 1],
[None, 2]], dtype=object)
>>> enc.get_feature_names(['gender', 'group'])
array(['gender_Female', 'gender_Male', 'group_1', 'group_2', 'group_3'],
dtype=object)
One can always drop the first column for each feature:
>>> drop_enc = OneHotEncoder(sparse=True, drop='first').fit(X)
>>> drop_enc.categories_
[array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
>>> drop_enc.transform([['Female', 1], ['Male', 2]]).toarray()
array([[0., 0., 0.],
[1., 1., 0.]])
Or drop a column for feature only having 2 categories:
>>> drop_binary_enc = OneHotEncoder(sparse=True, drop='if_binary').fit(X)
>>> drop_binary_enc.transform([['Female', 1], ['Male', 2]]).toarray()
array([[0., 1., 0., 0.],
[1., 0., 1., 0.]])
:obj:`~pandas.DataFrame` in, :obj:`~pandas.DataFrame` out with categories
as columns.
>>> import pandas as pd
>>> df = pd.DataFrame(X, columns=['gender', 'number'])
>>> df
gender number
0 Male 1
1 Female 3
2 Female 2
>>> enc = OneHotEncoder(categories_with_parent=True)
>>> enc.fit_transform(df)
gender_Female gender_Male number_1 number_2 number_3
0 0.0 1.0 1.0 0.0 0.0
1 1.0 0.0 0.0 0.0 1.0
2 1.0 0.0 0.0 1.0 0.0
"""
@ | doc(SKOneHotEncoder.__init__) | pandas.util._decorators.doc |
import os
import requests
from bs4 import BeautifulSoup
import pandas as pd
from functools import partial, reduce
import time
import multiprocessing
from collections import defaultdict
from gatheringMethods import *
from time import localtime, strftime
import jinja2
import ftplib
import random
import config_local as cfg
from config import *
from pathlib import Path
webs = {
'sobieraj': {'url': 'http://www.sobieraj-nieruchomosci.pl/', 'url_suffix': '', 'func': sobieraj_parse,
'pagination': False},
'florczyk': {'url': 'http://florczyk.nieruchomosci.pl/category/mieszkania/', 'url_suffix': '',
'func': florczyk_parse,
'pagination': False},
'abakus': {'url': "http://abakus.konin.pl/mieszkania", 'url_suffix': "", 'func': abakus_parse, 'pagination': False},
'invicus': {'url': "http://invicus.pl/pl/ogloszenia-w-serwisie/4/mieszkania/", 'url_suffix': "",
'func': invicus_parse, 'pagination': True},
'lider': {'url': "http://www.liderkonin.pl/100-mieszkania", 'url_suffix': "", 'func': lider_parse,
'pagination': False},
'tok': {'url': "https://www.toknieruchomosci.com.pl/mieszkania", 'url_suffix': "", 'func': tok_parse,
'pagination': False},
'aba': {
'url': "https://www.abanieruchomosci.pl/szukaj-oferty.html?estate_type=Mieszkania&ad_type=Sprzeda%C5%BC&locality=Konin&searching=yes&page_index=0",
'url_suffix': "", 'func': aba_parse,
'pagination': False},
'zaroda': {
'url': "http://www.zaroda-nieruchomosci.pl/oferta/szukaj?search%5Blocalization%5D=konin&search%5Btransaction%5D=1&search%5Btypes%5D=1",
'url_suffix': "", 'func': zaroda_parse,
'pagination': False},
'trado': {
'url': "http://tradonieruchomosci.pl/wyniki-wyszukiwania/?property_location=any&property_type=mieszkania&title=konin&property_feature=na-sprzedaz&search_nonce=479c85afba",
'url_suffix': "", 'func': trado_parse, 'pagination': False},
'lm': {'url': "https://www.lm.pl/ogloszenia/lista/85/", 'url_suffix': "/32454206", 'func': lm_parse,
'pagination': True},
}
def main():
starting_time = time.time()
try:
data = pd.DataFrame()
for web in webs:
data = pd.concat([data, scrap(web)], ignore_index=True, sort=False)
if len(data):
save_new_data(data)
render_html()
except Exception as e:
print(e)
logging.error(e)
logging.info("Running time %ds using %d threads" % ((time.time() - starting_time), THREADS_COUNT))
# execute all the functions for one website and return data in dataframe
def scrap(web):
pages_urls = prepare_pages_urls(web)
init_results = iterate_urls(pages_urls, web, 1)
# go to more detailed results of just new content
new_content = extract_new_content(init_results)
if new_content.empty:
return pd.DataFrame()
ads_urls = new_content['Url'].tolist()
detailed_ads = iterate_urls(ads_urls, web, 2)
results = new_content.merge(detailed_ads, on="Url")
return results
# get last page from website (if there is any pagination) else return 1
def get_last_page(web):
if not webs[web]['pagination']:
return 1
url = webs[web]['url'] + "1" + webs[web]['url_suffix']
try:
r = requests.get(url, headers=HEADERS, timeout=10)
if r.status_code != 200:
raise r.raise_for_status()
soup = BeautifulSoup(r.content, features="lxml")
if web == 'lm':
last_page = soup.find(class_='multicont_news_stronicowanie').find_all('a')[-2].get_text()
elif web == 'invicus':
last_page = soup.find(class_='perPage clear').find(class_='links').find_all('a')[-1].get_text()
else:
last_page = 1
return int(last_page)
except Exception as e:
logging.info(e)
return 1
# prepare list of urls for each page (if there is any pagination)
def prepare_pages_urls(web):
url = webs[web]['url']
url_suffix = webs[web]['url_suffix']
last_page = get_last_page(web)
if last_page == 1:
return [url + url_suffix]
else:
pages = range(1, last_page + 1)
return list(map(lambda x: url + str(x) + url_suffix, pages))
# using multiprocessing scrap all the urls from list (loop until all are done)
def iterate_urls(urls, web, step):
request_timeout = INIT_REQUEST_TIMEOUT
results = []
# breaks when there are no failed requests or timeout rise to maxReqTime
while len(urls) != 0 and request_timeout <= MAX_REQUEST_TIMEOUT:
logging.info(web)
if len(urls) > 8:
p = multiprocessing.Pool(THREADS_COUNT)
results += p.map(partial(get_page_soup, timeout=request_timeout, web=web, step=step), urls,
chunksize=CHUNK_SIZE) # play with chunksize for better performance
p.terminate()
p.join()
else:
results += map(lambda x: get_page_soup(x, request_timeout, web, step), urls)
results = [r for r in results if type(r) != str]
# try again with failed results
urls = [r for r in results if type(r) == str]
request_timeout += 1
return merge_to_dataframe(results)
# listOfListsOfDicts -> Dataframe
def merge_to_dataframe(lists):
lists = [x for x in lists if x is not None]
if len(lists) > 0:
return pd.DataFrame(reduce(lambda x, y: x + y, lists))
else:
return pd.DataFrame(
{'Url': [], 'Nazwa': [], 'Telefon': [], 'Cena': [], 'Zdjecie': [], 'Powierzchnia': 0.0, 'Piętro': [],
'Tresc': [], 'Zrodlo': []})
# scraping data from single page (webfunc states for the function to be used to gather data)
def get_page_soup(page_url, timeout, web, step):
gather_method = webs[web]['func']
logging.info(page_url)
# in case requests fails
try:
r = requests.get(page_url, headers=HEADERS, timeout=timeout)
if r.status_code != 200:
raise r.raise_for_status()
soup = BeautifulSoup(r.content, features="lxml")
return gather_method(web, soup, step, page_url)
except Exception as e:
logging.info(e)
logging.info('failed...')
return page_url
def extract_new_content(new_data):
old_data = | pd.read_pickle(DATABASE_FILE) | pandas.read_pickle |
import logging
import os
# import pathlib
import random
import sys
import time
from itertools import chain
from collections import Iterable
# from deepsense import neptune
import numpy as np
import pandas as pd
import torch
from PIL import Image
import yaml
from imgaug import augmenters as iaa
import imgaug as ia
def do_length_encode(x):
bs = np.where(x.T.flatten())[0]
rle = []
prev = -2
for b in bs:
if b > prev + 1:
rle.extend((b + 1, 0))
rle[-1] += 1
prev = b
# https://www.kaggle.com/c/data-science-bowl-2018/discussion/48561#
# if len(rle)!=0 and rle[-1]+rle[-2] == x.size:
# rle[-2] = rle[-2] -1
rle = " ".join([str(r) for r in rle])
return rle
from math import isnan
def do_length_decode(rle, H, W, fill_value=255):
mask = np.zeros((H, W), np.uint8)
if type(rle).__name__ == "float":
return mask
mask = mask.reshape(-1)
rle = np.array([int(s) for s in rle.split(" ")]).reshape(-1, 2)
for r in rle:
start = r[0] - 1
end = start + r[1]
mask[start:end] = fill_value
mask = mask.reshape(W, H).T # H, W need to swap as transposing.
return mask
def decode_csv(csv_name):
import pandas as pd
data = pd.read_csv(csv_name)
id = data["id"]
rle_mask = data["rle_mask"]
dict = {}
for id, rle in zip(id, rle_mask):
tmp = do_length_decode(rle, 101, 101, fill_value=1)
dict[id] = tmp
return dict
def save_id_fea(predict_dict, save_dir):
for id in predict_dict:
output_mat = predict_dict[id].astype(np.float32)
np.save(os.path.join(save_dir, id), output_mat)
def state_dict_remove_moudle(moudle_state_dict, model):
state_dict = model.state_dict()
keys = list(moudle_state_dict.keys())
for key in keys:
print(key + " loaded")
new_key = key.replace(r"module.", r"")
print(new_key)
state_dict[new_key] = moudle_state_dict[key]
return state_dict
from matplotlib import pyplot as plt
def write_and_plot(name, aver_num, logits, max_y=1.0, color="blue"):
def moving_average(a, n=aver_num):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
# ===========================================================
moving_plot = moving_average(np.array(logits))
x = range(moving_plot.shape[0])
# plt.close()
plt.plot(x, moving_plot, color=color)
plt.ylim(0, max_y)
plt.savefig(name)
def decompose(labeled):
nr_true = labeled.max()
masks = []
for i in range(1, nr_true + 1):
msk = labeled.copy()
msk[msk != i] = 0.0
msk[msk == i] = 255.0
masks.append(msk)
if not masks:
return [labeled]
else:
return masks
def encode_rle(predictions):
return [run_length_encoding(mask) for mask in predictions]
def create_submission(predictions):
output = []
for image_id, mask in predictions:
# print(image_id)
rle_encoded = " ".join(str(rle) for rle in run_length_encoding(mask))
output.append([image_id, rle_encoded])
submission = | pd.DataFrame(output, columns=["id", "rle_mask"]) | pandas.DataFrame |
"""General data-related utilities."""
import functools
import operator
import pandas as pd
def cartesian(ranges, names=None):
"""Generates a data frame that is a cartesian product of ranges."""
if names is None:
names = range(len(ranges))
if not ranges:
return pd.DataFrame()
if len(ranges) == 1:
return pd.DataFrame({names[0]: ranges[0]})
remaining_size = functools.reduce(
operator.mul, [len(r) for r in ranges[1:]], 1)
return pd.concat([
pd.concat([ | pd.DataFrame({names[0]: [n] * remaining_size}) | pandas.DataFrame |
import os
import pandas as pd
from unittest import TestCase, main
from metapool.sample_sheet import KLSampleSheet, sample_sheet_to_dataframe
from metapool.prep import (preparations_for_run, remove_qiita_id,
get_run_prefix, is_nonempty_gz_file,
get_machine_code, get_model_and_center,
parse_illumina_run_id,
_check_invalid_names, agp_transform, parse_prep,
generate_qiita_prep_file)
class TestPrep(TestCase):
def setUp(self):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.good_run = os.path.join(data_dir, 'runs',
'191103_D32611_0365_G00DHB5YXX')
self.good_run_new_version = os.path.join(
data_dir, 'runs', '191104_D32611_0365_G00DHB5YXZ')
self.OKish_run_new_version = os.path.join(
data_dir, 'runs', '191104_D32611_0365_OK15HB5YXZ')
self.ss = os.path.join(self.good_run, 'sample-sheet.csv')
self.prep = os.path.join(data_dir, 'prep.tsv')
def _check_run_191103_D32611_0365_G00DHB5YXX(self, obs):
"Convenience method to check the output of a whole run"
exp = {('191103_D32611_0365_G00DHB5YXX', 'Baz', '1'),
('191103_D32611_0365_G00DHB5YXX', 'Baz', '3'),
('191103_D32611_0365_G00DHB5YXX', 'FooBar_666', '3')}
self.assertEqual(set(obs.keys()), exp)
columns = ['sample_name', 'experiment_design_description',
'library_construction_protocol', 'platform', 'run_center',
'run_date', 'run_prefix', 'sequencing_meth', 'center_name',
'center_project_name', 'instrument_model', 'runid',
'sample_plate', 'sample_well', 'i7_index_id', 'index',
'i5_index_id', 'index2', 'lane', 'sample_project',
'well_description']
data = [['importantsample1', 'EXPERIMENT_DESC',
'LIBRARY_PROTOCOL', 'Illumina', 'UCSDMI', '2019-11-03',
'sample1_S11_L003', 'sequencing by synthesis', 'CENTER_NAME',
'Baz', 'Illumina HiSeq 2500',
'191103_D32611_0365_G00DHB5YXX', 'FooBar_666_p1', 'A3',
'iTru7_107_09', 'GCCTTGTT', 'iTru5_01_A', 'AACACCAC', '3',
'Baz', 'FooBar_666_p1.sample1.A3'],
['importantsample44', 'EXPERIMENT_DESC',
'LIBRARY_PROTOCOL', 'Illumina', 'UCSDMI', '2019-11-03',
'sample44_S14_L003', 'sequencing by synthesis', 'CENTER_NAME',
'Baz', 'Illumina HiSeq 2500',
'191103_D32611_0365_G00DHB5YXX', 'Baz_p3', 'B99',
'iTru7_107_14', 'GTCCTAAG', 'iTru5_01_A', 'CATCTGCT', '3',
'Baz', 'Baz_p3.sample44.B99']]
exp = pd.DataFrame(data=data, columns=columns)
obs_df = obs[('191103_D32611_0365_G00DHB5YXX', 'Baz', '3')]
# make sure the columns are in the same order before comparing
obs_df = obs_df[exp.columns].copy()
pd.testing.assert_frame_equal(obs_df, exp)
data = [['importantsample1', 'EXPERIMENT_DESC',
'LIBRARY_PROTOCOL', 'Illumina', 'UCSDMI', '2019-11-03',
'sample1_S11_L001', 'sequencing by synthesis', 'CENTER_NAME',
'Baz', 'Illumina HiSeq 2500',
'191103_D32611_0365_G00DHB5YXX', 'FooBar_666_p1', 'A1',
'iTru7_107_07', 'CCGACTAT', 'iTru5_01_A', 'ACCGACAA', '1',
'Baz', 'FooBar_666_p1.sample1.A1'],
['importantsample2', 'EXPERIMENT_DESC',
'LIBRARY_PROTOCOL', 'Illumina', 'UCSDMI', '2019-11-03',
'sample2_S10_L001', 'sequencing by synthesis', 'CENTER_NAME',
'Baz', 'Illumina HiSeq 2500',
'191103_D32611_0365_G00DHB5YXX', 'FooBar_666_p1', 'A2',
'iTru7_107_08', 'CCGACTAT', 'iTru5_01_A', 'CTTCGCAA', '1',
'Baz', 'FooBar_666_p1.sample2.A2']]
exp = pd.DataFrame(columns=columns, data=data)
obs_df = obs[('191103_D32611_0365_G00DHB5YXX', 'Baz', '1')]
# make sure the columns are in the same order before comparing
obs_df = obs_df[exp.columns].copy()
pd.testing.assert_frame_equal(obs_df, exp)
data = [['importantsample31', 'EXPERIMENT_DESC',
'LIBRARY_PROTOCOL', 'Illumina', 'UCSDMI', '2019-11-03',
'sample31_S13_L003', 'sequencing by synthesis',
'CENTER_NAME', 'FooBar', 'Illumina HiSeq 2500',
'191103_D32611_0365_G00DHB5YXX', 'FooBar_666_p1', 'A5',
'iTru7_107_11', 'CAATGTGG', 'iTru5_01_A', 'GGTACGAA', '3',
'FooBar_666', 'FooBar_666_p1.sample31.A5'],
['importantsample32', 'EXPERIMENT_DESC',
'LIBRARY_PROTOCOL', 'Illumina', 'UCSDMI', '2019-11-03',
'sample32_S19_L003', 'sequencing by synthesis', 'CENTER_NAME',
'FooBar', 'Illumina HiSeq 2500',
'191103_D32611_0365_G00DHB5YXX', 'FooBar_666_p1', 'B6',
'iTru7_107_12', 'AAGGCTGA', 'iTru5_01_A', 'CGATCGAT', '3',
'FooBar_666', 'FooBar_666_p1.sample32.B6'],
['importantsample34', 'EXPERIMENT_DESC',
'LIBRARY_PROTOCOL', 'Illumina', 'UCSDMI', '2019-11-03',
'sample34_S33_L003', 'sequencing by synthesis', 'CENTER_NAME',
'FooBar', 'Illumina HiSeq 2500',
'191103_D32611_0365_G00DHB5YXX', 'FooBar_666_p1', 'B8',
'iTru7_107_13', 'TTACCGAG', 'iTru5_01_A', 'AAGACACC', '3',
'FooBar_666', 'FooBar_666_p1.sample34.B8']]
exp = | pd.DataFrame(columns=columns, data=data) | pandas.DataFrame |
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import prot.viz
import prot.estimate
colors = prot.viz.plotting_style()
constants = prot.estimate.load_constants()
dataset_colors = prot.viz.dataset_colors()
from scipy.optimize import curve_fit
def func(x, a, c, d):
return a*np.exp(-c*x)+d
# # Load the compiled data
data = pd.read_csv('../../data/compiled_absolute_measurements.csv')
L_R = 7459.0 # length of all subunits in ribosomes, in amino acids
ribosome_genes = ['rpsA', 'rpsB', 'rpsC', 'rpsD', 'rpsE',
'rpsF', 'rpsG', 'rpsH', 'rpsI', 'rpsJ', 'rpsK',
'rpsL', 'rpsM', 'rpsN', 'rpsO', 'rpsP', 'rpsQ',
'rpsR', 'rpsS', 'rpsT', 'rpsU', 'sra', 'rplA', 'rplB',
'rplC', 'rplD', 'rplE', 'rplF', 'rplJ',
'rplL', 'rplI', 'rplK', 'rplM', 'rplN', 'rplO', 'rplP', 'rplQ',
'rplR', 'rplS','rplT', 'rplU', 'rplV', 'rplW', 'rplX', 'rplY',
'rpmA', 'rpmB', 'rpmC', 'rpmD', 'rpmE', 'rpmF', 'rpmG', 'rpmH',
'rpmI', 'rpmJ', 'ykgM', 'ykgO']
# %%
######################
# plot configuration #
######################
fig = plt.figure(figsize = (6,5))#constrained_layout=True)
# widths = [6, 2.5, 2.5, 5]
widths = [5, 2, 2]
heights = [2, 4, 6]
spec = fig.add_gridspec(ncols=3, nrows=3, width_ratios=widths,
height_ratios=heights)
# subplot [0,0] blank
# plot of growth rate vs. ribosomal fraction
ax2 = fig.add_subplot(spec[1, 1])
# plot of growth rate vs. active ribosomal fraction
ax3 = fig.add_subplot(spec[2, 0])
# plot of number of ribosomes vs. growth rate - rRNA persepective
ax4 = fig.add_subplot(spec[2, 1:])
# %
############################
# Plot 2 - growth rate vs. ribosomal fraction
############################
#
# # Add in data from Dai et al.
# load in the Dai et al. 2016 data
dai_nut_df = | pd.read_csv('../../data/dai2016_raw_data/dai2016_summary.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 11:29:34 2020
@author: Pavan
"""
import pandas as pd
pd.set_option('mode.chained_assignment', None)
import numpy as np
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
import scipy.stats as stats
import itertools
from datetime import datetime, date
import os
import yfinance as yf
# from functools import partial
from american_option_pricing import american_option
import density_utilities as du
import prediction_ensemble_py as pe
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
"""
#######################################################################################
Import Data
#######################################################################################
"""
data = pd.read_excel('spy.xlsx', index_col=None)
current_date = date(2020,9,29)
expiry_date = date(2020,10,2)
days_to_expiry = np.busday_count( current_date, expiry_date)-1
# min_p_profit = 35
# hor_leg_factor = 0.05
forecast_dens = False
save_results = True
save_plots = True
Strategies = []
Strategies = ["Butterfly","Double Broken Wing Butterfly","Iron Condor"]
# Strategies = ["Iron Condor"]
"""
#######################################################################################
Get Risk Free Date
#######################################################################################
"""
print("\n Gathering Risk Free Rate")
rf_eod_data = yf.download("^IRX", start="2020-07-01", end= current_date.strftime("%Y-%m-%d"))
for col in rf_eod_data.columns:
rf_eod_data[col] = pd.to_numeric(rf_eod_data[col],errors='coerce')
rf_eod_data=rf_eod_data.fillna(method='ffill')
rf_eod_data['interest']=((1+(rf_eod_data['Adj Close']/100))**(1/252))-1
rf_eod_data['annualized_interest']=252*(((1+(rf_eod_data['Adj Close']/100))**(1/252))-1)
rf_value =rf_eod_data['annualized_interest'].iloc[-1]
print("\nCurrent Risk Free Rate is :",'{:.3f}%'.format(rf_value*100))
"""
#######################################################################################
Data Cleaning
#######################################################################################
"""
def wrang_1(df, col_names):
for col in col_names:
df[col] = df[col].str.rstrip('%')
df[col] = | pd.to_numeric(df[col],errors='coerce') | pandas.to_numeric |
"""NLP utils & functions for Glass description processing and salient term extraction."""
from itertools import combinations, chain
from typing import Any, Dict, Iterable, List, Optional
import re
import string
import nltk
import pandas as pd
import toolz.curried as t
from gensim import models
from Levenshtein import distance
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sg_covid_impact.utils.list_utils import flatten_freq
nltk.download("stopwords", quiet=True)
nltk.download("punkt", quiet=True)
_STOP_WORDS = set(
stopwords.words("english") + list(string.punctuation) + ["\\n"] + ["quot"]
)
# WHAT IS _REGEX_STR?
# Each failed match means it falls through to the next to catch
# 0: URL's
# 1: ???
# 2: ???
# 3: ???
# 4: Twitter id like
# 5: Enclosed in angle brackets
# 6: Apostrophe
# 7: Words (including underscores)
# 8: Single non-whitespace character
_REGEX_STR = [
r"http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|" r"[!*\(\),](?:%[0-9a-f][0-9a-f]))+",
r"(?:\w+-\w+){2}", # TODO: is this useful?
r"(?:\w+-\w+)", # TODO: is this useful?
r"(?:\\\+n+)", # TODO: is this useful?
r"(?:@[\w_]+)",
r"<[^>]+>",
r"(?:\w+'\w)",
r"(?:[\w_]+)",
r"(?:\S)", # TODO: is this useful?
r"(?:\S)",
]
# Create the tokenizer which will be case insensitive and will ignore space.
tokens_re = re.compile(r"(" + "|".join(_REGEX_STR) + ")", re.VERBOSE | re.IGNORECASE)
def tokenize_document(text, remove_stops=False):
"""Preprocess a whole raw document.
Args:
text (str): Raw string of text.
remove_stops (bool): Flag to remove english stopwords
Return:
List of preprocessed and tokenized documents
"""
return [
clean_and_tokenize(sentence, remove_stops)
for sentence in nltk.sent_tokenize(text)
]
def clean_and_tokenize(text, remove_stops):
"""Preprocess a raw string/sentence of text.
Args:
text (str): Raw string of text.
remove_stops (bool): Flag to remove english stopwords
Return:
tokens (list, str): Preprocessed tokens.
"""
tokens = tokens_re.findall(text)
_tokens = [t.lower() for t in tokens]
filtered_tokens = [
token.replace("-", "_")
for token in _tokens
# Conditions to be kept:
# - Longer than 2 characters if `remove_stops`
# - Not be a stop words if `remove_stops`
# - No digits in token
# - At least one ascii lowercase character
if not (remove_stops and len(token) <= 2)
and (not remove_stops or token not in _STOP_WORDS)
and not any(x in token for x in string.digits)
and any(x in token for x in string.ascii_lowercase)
]
return filtered_tokens
def tokenize(text: str, tokens_re: re.Pattern) -> Iterable[str]:
"""Preprocess a raw string/sentence of text. """
return t.pipe(
text,
tokens_re.findall,
lambda tokens: chain.from_iterable(tokens) if tokens_re.groups > 1 else tokens,
t.filter(None),
)
def make_ngram(tokenised_corpus, n_gram=2, threshold=10):
"""Extract bigrams from tokenised corpus
Args:
tokenised_corpus (list): List of tokenised corpus
n_gram (int): maximum length of n-grams. Defaults to 2 (bigrams)
threshold (int): min number of n-gram occurrences before inclusion
Returns:
ngrammed_corpus (list)
"""
tokenised = tokenised_corpus.copy()
t = 1
# Loops while the ngram length less / equal than our target
while t < n_gram:
phrases = models.Phrases(tokenised, threshold=threshold)
bigram = models.phrases.Phraser(phrases)
tokenised = bigram[tokenised]
t += 1
return list(tokenised)
def make_ngrams_v2(
documents: List[List[str]], n: int = 2, phrase_kws: Optional[Dict[str, Any]] = None
) -> List[List[str]]:
"""Create ngrams using Gensim's phrases.
Args:
documents: Tokenized documents.
n: The `n` in n-gram.
phrase_kws: Passed to `gensim.models.Phrases`.
Return:
N-grams
#UTILS
"""
assert isinstance(n, int)
if n < 2:
raise ValueError("Pass n >= 2 to generate n-grams")
def_phrase_kws = {
"scoring": "npmi",
"threshold": 0.25,
"min_count": 2,
"delimiter": b"_",
}
if phrase_kws is None:
phrase_kws = def_phrase_kws
else:
def_phrase_kws.update(phrase_kws)
phrase_kws = def_phrase_kws
t = 1
while t < n:
phrases = models.Phrases(documents, **phrase_kws)
bigram = models.phrases.Phraser(phrases)
del phrases
tokenised = bigram[documents]
t += 1
return list(tokenised)
def salient_words_per_category(token_df, corpus_freqs, thres, top_words=100):
"""Create a list of salient terms in a sub-corpus (normalised by corpus
frequency).
Args:
tokens (list or series): List where every element is a tokenised doc
corpus_freqs (df): frequencies of terms in the whole corpus
thres (int): number of occurrences of a term in the subcorpus
top_words (int): number of salient words to output
#Returns:
A df where every element is a term with its salience
"""
# Create subcorpus frequencies
subcorpus_freqs = flatten_freq(token_df)
# Merge with corpus freqs
merged = pd.concat([ | pd.DataFrame(subcorpus_freqs) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 3 18:39:39 2019
"""
import re
import pandas as pd # pandas for data handling
import pkg_resources
from acccmip5.utilities.util import _fetch_url, _choose_server
class CMIP5DB:
_Turl = "https://rawgit.com/WCRP-CMIP/CMIP6_CVs/master/src/CMIP6_source_id.html"
_ETurl = "https://rawgit.com/WCRP-CMIP/CMIP6_CVs/master/src/CMIP6_experiment_id.html"
_Curl = "https://esgf-node.llnl.gov/search/cmip5/"
def __init__(self, **options):
self._total = 0
self._avail = 0
self._holder = []
self._fdata = []
@classmethod
def _set_curl(cls, url):
cls._curl = url
return cls._curl
def available_models(self):
try:
with _fetch_url(self._set_curl(self._Curl)) as self._fdata:
self._avail = len(re.findall('id="checkbox_model_',self._fdata))-1
print("\nCurrently ", self._avail," models has outputs!\n")
for zz in range(self._avail):
self._holder.append(self._fdata.split('checkbox_model_')[zz+2].split('" name="')[0])
print("Available models: \n\n")
return self._holder
except:
self._Curl=_choose_server()
with _fetch_url(self._set_curl(self._Curl)) as self._fdata:
self._avail = len(re.findall('id="checkbox_model_',self._fdata))-1
print("\nCurrently ", self._avail," models has outputs!\n")
for zz in range(self._avail):
self._holder.append(self._fdata.split('checkbox_model_')[zz+2].split('" name="')[0])
print("Available models: \n\n")
return self._holder
def all_models(self):
with _fetch_url(self._set_curl(self._Turl)) as self._fdata:
self._total = len(re.findall('<tr><td>',self._fdata))
print("\nCMIP5 has ", self._total," models in total!\n")
for zz in range(self._total):
self._holder.append(self._fdata.split('<tr><td>')[zz+1].split('</td>\\n')[0])
print("List of all CMIP5 models: \n\n")
return self._holder
def available_experiments(self):
try:
with _fetch_url(self._set_curl(self._Curl)) as self._fdata:
self._avail = len(re.findall('id="checkbox_experiment_',self._fdata))
print("\nCurrently ", self._avail," experiments has outputs!\n")
for zz in range(self._avail):
if 'family' not in self._fdata.split('checkbox_experiment_')[zz+2].split('" name="')[0]:
self._holder.append(self._fdata.split('checkbox_experiment_')[zz+2].split('" name="')[0])
print("Available experiments: \n\n")
return self._holder
except:
self._Curl=_choose_server()
with _fetch_url(self._set_curl(self._Curl)) as self._fdata:
self._avail = len(re.findall('id="checkbox_experiment_id_',self._fdata))
print("\nCurrently ", self._avail," experiments has outputs!\n")
for zz in range(self._avail):
if 'family' not in self._fdata.split('checkbox_experiment_')[zz+2].split('" name="')[0]:
self._holder.append(self._fdata.split('checkbox_experiment_id_')[zz+2].split('" name="')[0])
print("Available experiments: \n\n")
return self._holder
def all_experiments(self):
with _fetch_url(self._set_curl(self._ETurl)) as self._fdata:
self._total = len(re.findall('<tr><td>',self._fdata))
print("\nCMIP5 has ", self._total," experiments in total!\n")
for zz in range(self._total):
self._holder.append(self._fdata.split('<tr><td>')[zz+1].split('</td>\\n')[0])
print("List of all CMIP5 experiments: \n\n")
return self._holder
def CMIP5_variables(self):
try:
with _fetch_url(self._set_curl(self._Curl)) as self._fdata:
self._avail = len(re.findall('id="checkbox_variable_',self._fdata))
print("\nCurrently ", self._avail," variables has outputs!\n")
for zz in range(self._avail):
if 'long_name' not in self._fdata.split('checkbox_variable_')[zz+2].split('" name="')[0]:
self._holder.append(self._fdata.split('checkbox_variable_')[zz+2].split('" name="')[0])
print("Available variables: \n\n")
return self._holder
except:
self._Curl=_choose_server()
with _fetch_url(self._set_curl(self._Curl)) as self._fdata:
self._avail = len(re.findall('id="checkbox_variable_',self._fdata))
print("\nCurrently ", self._avail," variables has outputs!\n")
for zz in range(self._avail):
if 'long_name' not in self._fdata.split('checkbox_variable_')[zz+2].split('" name="')[0]:
self._holder.append(self._fdata.split('checkbox_variable_')[zz+2].split('" name="')[0])
print("Available variables: \n\n")
return self._holder
def available_frequencies(self):
try:
with _fetch_url(self._set_curl(self._Curl)) as self._fdata:
self._avail = len(re.findall('id="checkbox_time_frequency_',self._fdata))
print("\nCurrently ", self._avail," frequencies has outputs!\n")
for zz in range(self._avail):
self._holder.append(self._fdata.split('checkbox_time_frequency_')[zz+2].split('" name="')[0])
print("Available frequencies: \n\n")
return self._holder
except:
self._Curl=_choose_server()
with _fetch_url(self._set_curl(self._Curl)) as self._fdata:
self._avail = len(re.findall('id="checkbox_time_frequency_',self._fdata))
print("\nCurrently ", self._avail," frequencies has outputs!\n")
for zz in range(self._avail):
self._holder.append(self._fdata.split('checkbox_time_frequency_')[zz+2].split('" name="')[0])
print("Available frequencies: \n\n")
return self._holder
def available_realmns(self):
try:
with _fetch_url(self._set_curl(self._Curl)) as self._fdata:
self._avail = len(re.findall('id="checkbox_realm_',self._fdata))
print("\nCurrently ", self._avail," realms has outputs!\n")
for zz in range(self._avail):
self._holder.append(self._fdata.split('checkbox_realm_')[zz+2].split('" name="')[0])
print("Available realms: \n\n")
return self._holder
except:
self._Curl=_choose_server()
with _fetch_url(self._set_curl(self._Curl)) as self._fdata:
self._avail = len(re.findall('id="checkbox_realm_',self._fdata))
print("\nCurrently ", self._avail," realms has outputs!\n")
for zz in range(self._avail):
self._holder.append(self._fdata.split('checkbox_realm_')[zz+2].split('" name="')[0])
print("Available frequencies: \n\n")
return self._holder
def var_stdName(self):
try:
with _fetch_url(self._set_curl(self._Curl)) as self._fdata:
self._avail = len(re.findall('id="checkbox_cf_standard_name_',self._fdata))
print("\nCurrently ", self._avail," variables has outputs!\n")
for zz in range(self._avail):
self._holder.append(self._fdata.split('checkbox_cf_standard_name_')[zz+2].split('" name="')[0])
print("Available variables: \n\n")
return self._holder
except:
self._Curl=_choose_server()
with _fetch_url(self._set_curl(self._Curl)) as self._fdata:
self._avail = len(re.findall('id="checkbox_cf_standard_name_',self._fdata))
print("\nCurrently ", self._avail," variables has outputs!\n")
for zz in range(self._avail):
self._holder.append(self._fdata.split('checkbox_cf_standard_name_')[zz+2].split('" name="')[0])
print("Available frequencies: \n\n")
return self._holder
@staticmethod
def _get_definition(exp):
resource_package = __name__
resource_path = '/'.join(('data', 'CMIP6_exps.xlsx'))
tmp = pkg_resources.resource_stream(resource_package, resource_path)
did=pd.read_excel(tmp)
exp_name=pd.DataFrame(did,columns=['canonical_name'])
exp_def=pd.DataFrame(did,columns=['description'])
for zz in range(len(exp_name.values)):
if (exp_name.values[zz]==exp):
definition=(exp_def.values[zz][0]).split('\n')[0].strip()
return definition
@staticmethod
def _get_longName(var):
resource_package = __name__
resource_path = '/'.join(('data', 'var_list.xlsx'))
tmp = pkg_resources.resource_stream(resource_package, resource_path)
did=pd.read_excel(tmp)
v=pd.DataFrame(did,columns=['variable'])
v_long= | pd.DataFrame(did,columns=['Long_name']) | pandas.DataFrame |
import pandas as pd
import argparse
def extract_simple_repeats(row, current_repeat_table):
current_repeat_table_entries = current_repeat_table.loc[(current_repeat_table[2] <= int(row["POS"])) & (current_repeat_table[3] >= int(row["POS"]))]
occurences = len(current_repeat_table_entries)
if occurences == 1:
repeat_length = str(current_repeat_table_entries[5][0])
repeat_region_start = str(current_repeat_table_entries[2][0])
repeat_region_end = str(current_repeat_table_entries[3][0])
repeat_region = ">".join(["chr" + str(row["CHROM"]), repeat_region_start, repeat_region_end])
return [repeat_region, repeat_length]
elif occurences > 1:
repeat_length_entries = [str(value) for value in current_repeat_table_entries[5].tolist()]
repeat_region_start = [str(value) for value in current_repeat_table_entries[2].tolist()]
repeat_region_end = [str(value) for value in current_repeat_table_entries[3].tolist()]
repeat_region_entries = [">".join(["chr" + str(row["CHROM"]), repeat_region_start[i], repeat_region_end[i]]) for i in range(len(repeat_region_start))]
repeat_length = "&".join(repeat_length_entries)
repeat_region = "&".join(repeat_region_entries)
return [repeat_region, repeat_length]
else:
return ["", ""]
def extract_data(data, current_repeat_table):
data[["SimpleTandemRepeatRegion", "SimpleTandemRepeatLength"]] = data.apply(lambda x: pd.Series(extract_simple_repeats(x, current_repeat_table)), axis=1)
return data
def group_and_process_data(repeat_data, data):
repeats = pd.read_csv(repeat_data, sep="\t", low_memory=False, header=None)
repeats.set_index([1], inplace=True)
data_grouped = [group for key, group in data.groupby("CHROM")]
for group in data_grouped:
if str(group["CHROM"].iloc[0]) == "MT":
continue
else:
current_repeat_table = repeats.loc["chr" + str(group["CHROM"].iloc[0])]
group = extract_data(group, current_repeat_table)
data_combined = | pd.concat(data_grouped) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.