prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
class Write:
def Write(self, df, target_path):
print(f'\n===\nWriting data to target: {target_path} ...')
# Write the collated and formated data to a new file
# Create separate DataFrames for each sheet in the Migration Template
df_member = pd.DataFrame.from_dict(df['Member'])
df_membership = pd.DataFrame.from_dict(df['Membership'])
df_membership_category = pd.DataFrame.from_dict(df['Membership Category'])
df_teams = pd.DataFrame.from_dict(df['Teams'])
df_training_fee = pd.DataFrame.from_dict(df['Training fee'])
df_training_locations = pd.DataFrame.from_dict(df['Training locations'])
df_department = pd.DataFrame.from_dict(df['Department info'])
df_club = pd.DataFrame.from_dict(df['Club info'])
df_committees = pd.DataFrame.from_dict(df['Committees'])
df_grens = pd.DataFrame.from_dict(df['Grens'])
df_style = pd.DataFrame.from_dict(df['Style'])
df_op_duration = pd.DataFrame.from_dict(df['Op - Duration type'])
df_op_invoice = pd.DataFrame.from_dict(df['Op - Invoice Duration Type'])
df_op_memcat = pd.DataFrame.from_dict(df['Op - Membership Category'])
df_op_yn = | pd.DataFrame.from_dict(df['Op - Yes_No']) | pandas.DataFrame.from_dict |
from unittest import TestCase
from unittest.mock import Mock, patch
import copulas
import numpy as np
import pandas as pd
import pytest
from copulas import univariate
from rdt.transformers.null import NullTransformer
from rdt.transformers.numerical import ClusterBasedNormalizer, FloatFormatter, GaussianNormalizer
class TestFloatFormatter(TestCase):
def test___init__super_attrs(self):
"""super() arguments are properly passed and set as attributes."""
nt = FloatFormatter(
missing_value_replacement='mode',
model_missing_values=False
)
assert nt.missing_value_replacement == 'mode'
assert nt.model_missing_values is False
def test_get_output_sdtypes(self):
"""Test the ``get_output_sdtypes`` method when a null column is created.
When a null column is created, this method should apply the ``_add_prefix``
method to the following dictionary of output sdtypes:
output_sdtypes = {
'value': 'float',
'is_null': 'float'
}
Setup:
- initialize a ``FloatFormatter`` transformer which:
- sets ``self.null_transformer`` to a ``NullTransformer`` where
``self.model_missing_values`` is True.
- sets ``self.column_prefix`` to a string.
Output:
- the ``output_sdtypes`` dictionary, but with the ``self.column_prefix``
added to the beginning of the keys.
"""
# Setup
transformer = FloatFormatter()
transformer.null_transformer = NullTransformer(missing_value_replacement='fill')
transformer.null_transformer._model_missing_values = True
transformer.column_prefix = 'a#b'
# Run
output = transformer.get_output_sdtypes()
# Assert
expected = {
'a#b.value': 'float',
'a#b.is_null': 'float'
}
assert output == expected
def test_is_composition_identity_null_transformer_true(self):
"""Test the ``is_composition_identity`` method with a ``null_transformer``.
When the attribute ``null_transformer`` is not None and a null column is not created,
this method should simply return False.
Setup:
- initialize a ``FloatFormatter`` transformer which sets
``self.null_transformer`` to a ``NullTransformer`` where
``self.model_missing_values`` is False.
Output:
- False
"""
# Setup
transformer = FloatFormatter()
transformer.null_transformer = NullTransformer(missing_value_replacement='fill')
# Run
output = transformer.is_composition_identity()
# Assert
assert output is False
def test_is_composition_identity_null_transformer_false(self):
"""Test the ``is_composition_identity`` method without a ``null_transformer``.
When the attribute ``null_transformer`` is None, this method should return
the value stored in the ``COMPOSITION_IS_IDENTITY`` attribute.
Setup:
- initialize a ``FloatFormatter`` transformer which sets
``self.null_transformer`` to None.
Output:
- the value stored in ``self.COMPOSITION_IS_IDENTITY``.
"""
# Setup
transformer = FloatFormatter()
transformer.null_transformer = None
# Run
output = transformer.is_composition_identity()
# Assert
assert output is True
def test__learn_rounding_digits_more_than_15_decimals(self):
"""Test the _learn_rounding_digits method with more than 15 decimals.
If the data has more than 15 decimals, None should be returned.
Input:
- An array that contains floats with more than 15 decimals.
Output:
- None
"""
data = np.random.random(size=10).round(20)
output = FloatFormatter._learn_rounding_digits(data)
assert output is None
def test__learn_rounding_digits_less_than_15_decimals(self):
"""Test the _learn_rounding_digits method with less than 15 decimals.
If the data has less than 15 decimals, the maximum number of decimals
should be returned.
Input:
- An array that contains floats with a maximum of 3 decimals and a
NaN.
Output:
- 3
"""
data = np.array([10, 0., 0.1, 0.12, 0.123, np.nan])
output = FloatFormatter._learn_rounding_digits(data)
assert output == 3
def test__learn_rounding_digits_negative_decimals_float(self):
"""Test the _learn_rounding_digits method with floats multiples of powers of 10.
If the data has all multiples of 10, 100, or any other higher power of 10,
the output is the negative number of decimals representing the corresponding
power of 10.
Input:
- An array that contains floats that are multiples of powers of 10, 100 and 1000
and a NaN.
Output:
- -1
"""
data = np.array([1230., 12300., 123000., np.nan])
output = FloatFormatter._learn_rounding_digits(data)
assert output == -1
def test__learn_rounding_digits_negative_decimals_integer(self):
"""Test the _learn_rounding_digits method with integers multiples of powers of 10.
If the data has all multiples of 10, 100, or any other higher power of 10,
the output is the negative number of decimals representing the corresponding
power of 10.
Input:
- An array that contains integers that are multiples of powers of 10, 100 and 1000
and a NaN.
Output:
- -1
"""
data = np.array([1230, 12300, 123000, np.nan])
output = FloatFormatter._learn_rounding_digits(data)
assert output == -1
def test__learn_rounding_digits_all_missing_value_replacements(self):
"""Test the _learn_rounding_digits method with data that is all NaNs.
If the data is all NaNs, expect that the output is None.
Input:
- An array of NaN.
Output:
- None
"""
data = np.array([np.nan, np.nan, np.nan, np.nan])
output = FloatFormatter._learn_rounding_digits(data)
assert output is None
def test__fit(self):
"""Test the ``_fit`` method.
Validate that the ``_dtype`` and ``.null_transformer.missing_value_replacement`` attributes
are set correctly.
Setup:
- initialize a ``FloatFormatter`` with the ``missing_value_replacement``
parameter set to ``'missing_value_replacement'``.
Input:
- a pandas series containing a None.
Side effect:
- it sets the ``null_transformer.missing_value_replacement``.
- it sets the ``_dtype``.
"""
# Setup
data = pd.Series([1.5, None, 2.5])
transformer = FloatFormatter(
missing_value_replacement='missing_value_replacement'
)
# Run
transformer._fit(data)
# Asserts
expected = 'missing_value_replacement'
assert transformer.null_transformer._missing_value_replacement == expected
assert transformer._dtype == float
def test__fit_learn_rounding_scheme_false(self):
"""Test ``_fit`` with ``learn_rounding_scheme`` set to ``False``.
If the ``learn_rounding_scheme`` is set to ``False``, the ``_fit`` method
should not set its ``_rounding_digits`` instance variable.
Input:
- An array with floats rounded to one decimal and a None value
Side Effect:
- ``_rounding_digits`` should be ``None``
"""
# Setup
data = pd.Series([1.5, None, 2.5])
# Run
transformer = FloatFormatter(
missing_value_replacement='missing_value_replacement',
learn_rounding_scheme=False
)
transformer._fit(data)
# Asserts
assert transformer._rounding_digits is None
def test__fit_learn_rounding_scheme_true(self):
"""Test ``_fit`` with ``learn_rounding_scheme`` set to ``True``.
If ``learn_rounding_scheme`` is set to ``True``, the ``_fit`` method
should set its ``_rounding_digits`` instance variable to what is learned
in the data.
Input:
- A Series with floats up to 4 decimals and a None value
Side Effect:
- ``_rounding_digits`` is set to 4
"""
# Setup
data = pd.Series([1, 2.1, 3.12, 4.123, 5.1234, 6.123, 7.12, 8.1, 9, None])
# Run
transformer = FloatFormatter(
missing_value_replacement='mean',
learn_rounding_scheme=True
)
transformer._fit(data)
# Asserts
assert transformer._rounding_digits == 4
def test__fit_learn_rounding_scheme_true_large_numbers(self):
"""Test ``_fit`` with ``learn_rounding_scheme`` set to ``True`` on large numbers.
`
If the ``learn_rounding_scheme`` parameter is set to ``True`` and the data is
very large, ``_fit`` should learn ``_rounding_digits`` to be the highest number of 0s
to round to that keeps the data the same.
Input:
- Series of data with numbers between 10^10 and 10^20
Side Effect:
- ``_rounding_digits`` is set to the minimum exponent seen in the data
"""
# Setup
exponents = [np.random.randint(10, 20) for i in range(10)]
big_numbers = [10**exponents[i] for i in range(10)]
data = pd.Series(big_numbers)
# Run
transformer = FloatFormatter(
missing_value_replacement='mean',
learn_rounding_scheme=True
)
transformer._fit(data)
# Asserts
assert transformer._rounding_digits == -min(exponents)
def test__fit_learn_rounding_scheme_true_max_decimals(self):
"""Test ``_fit`` with ``learn_rounding_scheme`` set to ``True``.
If the ``learn_rounding_scheme`` parameter is set to ``True``, ``_fit`` should learn
the ``_rounding_digits`` to be the max number of decimal places seen in the data.
The max amount of decimals that floats can be accurately compared with is 15.
If the input data has values with more than 14 decimals, we will not be able to
accurately learn the number of decimal places required, so we do not round.
Input:
- Series with a value that has 15 decimals
Side Effect:
- ``_rounding_digits`` is set to ``None``
"""
# Setup
data = pd.Series([0.000000000000001])
# Run
transformer = FloatFormatter(
missing_value_replacement='mean',
learn_rounding_scheme=True
)
transformer._fit(data)
# Asserts
assert transformer._rounding_digits is None
def test__fit_learn_rounding_scheme_true_inf(self):
"""Test ``_fit`` with ``learn_rounding_scheme`` set to ``True``.
If the ``learn_rounding_scheme`` parameter is set to ``True``, and the data
contains values that are infinite, ``_fit`` should learn the ``_rounding_digits``
to be the min number of decimal places seen in the data with the infinite values
filtered out.
Input:
- Series with ``np.inf`` as a value
Side Effect:
- ``_rounding_digits`` is set to max seen in rest of data
"""
# Setup
data = pd.Series([15000, 4000, 60000, np.inf])
# Run
transformer = FloatFormatter(
missing_value_replacement='mean',
learn_rounding_scheme=True
)
transformer._fit(data)
# Asserts
assert transformer._rounding_digits == -3
def test__fit_learn_rounding_scheme_true_max_zero(self):
"""Test ``_fit`` with ``learn_rounding_scheme`` set to ``True``.
If the ``learn_rounding_scheme`` parameter is set to ``True``, and the max
in the data is 0, ``_fit`` should learn the ``_rounding_digits`` to be 0.
Input:
- Series with 0 as max value
Side Effect:
- ``_rounding_digits`` is set to 0
"""
# Setup
data = pd.Series([0, 0, 0])
# Run
transformer = FloatFormatter(
missing_value_replacement='mean',
learn_rounding_scheme=True
)
transformer._fit(data)
# Asserts
assert transformer._rounding_digits == 0
def test__fit_learn_rounding_scheme_true_max_negative(self):
"""Test ``_fit`` with ``learn_rounding_scheme`` set to ``True``.
If the ``learn_rounding_scheme`` parameter is set to ``True``, and the max
in the data is negative, the ``_fit`` method should learn ``_rounding_digits``
to be the minimum number of digits seen in those negative values.
Input:
- Series with negative max value
Side Effect:
- ``_rounding_digits`` is set to min number of digits in array
"""
# Setup
data = pd.Series([-500, -220, -10])
# Run
transformer = FloatFormatter(
missing_value_replacement='mean',
learn_rounding_scheme=True
)
transformer._fit(data)
# Asserts
assert transformer._rounding_digits == -1
def test__fit_enforce_min_max_values_false(self):
"""Test ``_fit`` with ``enforce_min_max_values`` set to ``False``.
If the ``enforce_min_max_values`` parameter is set to ``False``,
the ``_fit`` method should not set its ``min`` or ``max``
instance variables.
Input:
- Series of floats and null values
Side Effect:
- ``_min_value`` and ``_max_value`` stay ``None``
"""
# Setup
data = pd.Series([1.5, None, 2.5])
# Run
transformer = FloatFormatter(
missing_value_replacement='mean',
enforce_min_max_values=False
)
transformer._fit(data)
# Asserts
assert transformer._min_value is None
assert transformer._max_value is None
def test__fit_enforce_min_max_values_true(self):
"""Test ``_fit`` with ``enforce_min_max_values`` set to ``True``.
If the ``enforce_min_max_values`` parameter is set to ``True``,
the ``_fit`` method should learn the min and max values from the _fitted data.
Input:
- Series of floats and null values
Side Effect:
- ``_min_value`` and ``_max_value`` are learned
"""
# Setup
data = pd.Series([-100, -5000, 0, None, 100, 4000])
# Run
transformer = FloatFormatter(
missing_value_replacement='mean',
enforce_min_max_values=True
)
transformer._fit(data)
# Asserts
assert transformer._min_value == -5000
assert transformer._max_value == 4000
def test__transform(self):
"""Test the ``_transform`` method.
Validate that this method calls the ``self.null_transformer.transform`` method once.
Setup:
- create an instance of a ``FloatFormatter`` and set ``self.null_transformer``
to a ``NullTransformer``.
Input:
- a pandas series.
Output:
- the transformed numpy array.
"""
# Setup
data = pd.Series([1, 2, 3])
transformer = FloatFormatter()
transformer.null_transformer = Mock()
# Run
transformer._transform(data)
# Assert
assert transformer.null_transformer.transform.call_count == 1
def test__reverse_transform_learn_rounding_scheme_false(self):
"""Test ``_reverse_transform`` when ``learn_rounding_scheme`` is ``False``.
The data should not be rounded at all.
Input:
- Random array of floats between 0 and 1
Output:
- Input array
"""
# Setup
data = np.random.random(10)
# Run
transformer = FloatFormatter(missing_value_replacement=None)
transformer.learn_rounding_scheme = False
transformer._rounding_digits = None
result = transformer._reverse_transform(data)
# Assert
np.testing.assert_array_equal(result, data)
def test__reverse_transform_rounding_none_dtype_int(self):
"""Test ``_reverse_transform`` with ``_dtype`` as ``np.int64`` and no rounding.
The data should be rounded to 0 decimals and returned as integer values if the ``_dtype``
is ``np.int64`` even if ``_rounding_digits`` is ``None``.
Input:
- Array of multiple float values with decimals.
Output:
- Input array rounded an converted to integers.
"""
# Setup
data = np.array([0., 1.2, 3.45, 6.789])
# Run
transformer = FloatFormatter(missing_value_replacement=None)
transformer._rounding_digits = None
transformer._dtype = np.int64
result = transformer._reverse_transform(data)
# Assert
expected = np.array([0, 1, 3, 7])
np.testing.assert_array_equal(result, expected)
def test__reverse_transform_rounding_none_with_nulls(self):
"""Test ``_reverse_transform`` when ``_rounding_digits`` is ``None`` and there are nulls.
The data should not be rounded at all.
Input:
- 2d Array of multiple float values with decimals and a column setting at least 1 null.
Output:
- First column of the input array as entered, replacing the indicated value with a
missing_value_replacement.
"""
# Setup
data = [
[0., 0.],
[1.2, 0.],
[3.45, 1.],
[6.789, 0.],
]
data = pd.DataFrame(data, columns=['a', 'b'])
# Run
transformer = FloatFormatter(missing_value_replacement='mean')
null_transformer = Mock()
null_transformer.reverse_transform.return_value = np.array([0., 1.2, np.nan, 6.789])
transformer.null_transformer = null_transformer
transformer.learn_rounding_scheme = False
transformer._rounding_digits = None
transformer._dtype = float
result = transformer._reverse_transform(data)
# Assert
expected = np.array([0., 1.2, np.nan, 6.789])
np.testing.assert_array_equal(result, expected)
def test__reverse_transform_rounding_none_with_nulls_dtype_int(self):
"""Test ``_reverse_transform`` rounding when dtype is int and there are nulls.
The data should be rounded to 0 decimals and returned as float values with
nulls in the right place.
Input:
- 2d Array of multiple float values with decimals and a column setting at least 1 null.
Output:
- First column of the input array rounded, replacing the indicated value with a
``NaN``, and kept as float values.
"""
# Setup
data = np.array([
[0., 0.],
[1.2, 0.],
[3.45, 1.],
[6.789, 0.],
])
# Run
transformer = FloatFormatter(missing_value_replacement='mean')
null_transformer = Mock()
null_transformer.reverse_transform.return_value = np.array([0., 1.2, np.nan, 6.789])
transformer.null_transformer = null_transformer
transformer.learn_rounding_digits = False
transformer._rounding_digits = None
transformer._dtype = int
result = transformer._reverse_transform(data)
# Assert
expected = np.array([0., 1., np.nan, 7.])
np.testing.assert_array_equal(result, expected)
def test__reverse_transform_rounding_small_numbers(self):
"""Test ``_reverse_transform`` when ``_rounding_digits`` is positive.
The data should round to the maximum number of decimal places
set in the ``_rounding_digits`` value.
Input:
- Array with decimals
Output:
- Same array rounded to the provided number of decimal places
"""
# Setup
data = np.array([1.1111, 2.2222, 3.3333, 4.44444, 5.555555])
# Run
transformer = FloatFormatter(missing_value_replacement=None)
transformer.learn_rounding_scheme = True
transformer._rounding_digits = 2
result = transformer._reverse_transform(data)
# Assert
expected_data = np.array([1.11, 2.22, 3.33, 4.44, 5.56])
np.testing.assert_array_equal(result, expected_data)
def test__reverse_transform_rounding_big_numbers_type_int(self):
"""Test ``_reverse_transform`` when ``_rounding_digits`` is negative.
The data should round to the number set in the ``_rounding_digits``
attribute and remain ints.
Input:
- Array with with floats above 100
Output:
- Same array rounded to the provided number of 0s
- Array should be of type int
"""
# Setup
data = np.array([2000.0, 120.0, 3100.0, 40100.0])
# Run
transformer = FloatFormatter(missing_value_replacement=None)
transformer._dtype = int
transformer.learn_rounding_scheme = True
transformer._rounding_digits = -3
result = transformer._reverse_transform(data)
# Assert
expected_data = np.array([2000, 0, 3000, 40000])
np.testing.assert_array_equal(result, expected_data)
assert result.dtype == int
def test__reverse_transform_rounding_negative_type_float(self):
"""Test ``_reverse_transform`` when ``_rounding_digits`` is negative.
The data should round to the number set in the ``_rounding_digits``
attribute and remain floats.
Input:
- Array with with larger numbers
Output:
- Same array rounded to the provided number of 0s
- Array should be of type float
"""
# Setup
data = np.array([2000.0, 120.0, 3100.0, 40100.0])
# Run
transformer = FloatFormatter(missing_value_replacement=None)
transformer.learn_rounding_scheme = True
transformer._rounding_digits = -3
result = transformer._reverse_transform(data)
# Assert
expected_data = np.array([2000.0, 0.0, 3000.0, 40000.0])
np.testing.assert_array_equal(result, expected_data)
assert result.dtype == float
def test__reverse_transform_rounding_zero_decimal_places(self):
"""Test ``_reverse_transform`` when ``_rounding_digits`` is 0.
The data should round to the number set in the ``_rounding_digits``
attribute.
Input:
- Array with with larger numbers
Output:
- Same array rounded to the 0s place
"""
# Setup
data = np.array([2000.554, 120.2, 3101, 4010])
# Run
transformer = FloatFormatter(missing_value_replacement=None)
transformer.learn_rounding_scheme = True
transformer._rounding_digits = 0
result = transformer._reverse_transform(data)
# Assert
expected_data = np.array([2001, 120, 3101, 4010])
np.testing.assert_array_equal(result, expected_data)
def test__reverse_transform_enforce_min_max_values(self):
"""Test ``_reverse_transform`` with ``enforce_min_max_values`` set to ``True``.
The ``_reverse_transform`` method should clip any values above
the ``max_value`` and any values below the ``min_value``.
Input:
- Array with values above the max and below the min
Output:
- Array with out of bound values clipped to min and max
"""
# Setup
data = np.array([-np.inf, -5000, -301, -250, 0, 125, 401, np.inf])
# Run
transformer = FloatFormatter(missing_value_replacement=None)
transformer.enforce_min_max_values = True
transformer._max_value = 400
transformer._min_value = -300
result = transformer._reverse_transform(data)
# Asserts
np.testing.assert_array_equal(result, np.array([-300, -300, -300, -250, 0, 125, 400, 400]))
def test__reverse_transform_enforce_min_max_values_with_nulls(self):
"""Test ``_reverse_transform`` with nulls and ``enforce_min_max_values`` set to ``True``.
The ``_reverse_transform`` method should clip any values above
the ``max_value`` and any values below the ``min_value``. Null values
should be replaced with ``np.nan``.
Input:
- 2d array where second column has some values over 0.5 representing null values
Output:
- Array with out of bounds values clipped and null values injected
"""
# Setup
data = np.array([
[-np.inf, 0],
[-5000, 0.1],
[-301, 0.8],
[-250, 0.4],
[0, 0],
[125, 1],
[401, 0.2],
[np.inf, 0.5]
])
expected_data = np.array([-300, -300, np.nan, -250, 0, np.nan, 400, 400])
# Run
transformer = FloatFormatter(missing_value_replacement='mean')
transformer._max_value = 400
transformer._min_value = -300
transformer.enforce_min_max_values = True
transformer.null_transformer = Mock()
transformer.null_transformer.reverse_transform.return_value = expected_data
result = transformer._reverse_transform(data)
# Asserts
null_transformer_calls = transformer.null_transformer.reverse_transform.mock_calls
np.testing.assert_array_equal(null_transformer_calls[0][1][0], data)
np.testing.assert_array_equal(result, expected_data)
class TestGaussianNormalizer:
def test___init__super_attrs(self):
"""super() arguments are properly passed and set as attributes."""
ct = GaussianNormalizer(
model_missing_values=False,
learn_rounding_scheme=False,
enforce_min_max_values=False
)
assert ct.missing_value_replacement == 'mean'
assert ct.model_missing_values is False
assert ct.learn_rounding_scheme is False
assert ct.enforce_min_max_values is False
def test___init__str_distr(self):
"""If distribution is a str, it is resolved using the _DISTRIBUTIONS dict."""
ct = GaussianNormalizer(distribution='gamma')
assert ct._distribution is copulas.univariate.GammaUnivariate
def test___init__non_distr(self):
"""If distribution is not an str, it is store as given."""
univariate = copulas.univariate.Univariate()
ct = GaussianNormalizer(distribution=univariate)
assert ct._distribution is univariate
def test__get_distributions_copulas_not_installed(self):
"""Test the ``_get_distributions`` method when copulas is not installed.
Validate that this method raises the appropriate error message when copulas is
not installed.
Raise:
- ImportError('\n\nIt seems like `copulas` is not installed.\n'
'Please install it using:\n\n pip install rdt[copulas]')
"""
__py_import__ = __import__
def custom_import(name, *args):
if name == 'copulas':
raise ImportError('Simulate copulas not being importable.')
return __py_import__(name, *args)
with patch('builtins.__import__', side_effect=custom_import):
with pytest.raises(ImportError, match=r'pip install rdt\[copulas\]'):
GaussianNormalizer._get_distributions()
def test__get_distributions(self):
"""Test the ``_get_distributions`` method.
Validate that this method returns the correct dictionary of distributions.
Setup:
- instantiate a ``GaussianNormalizer``.
"""
# Setup
transformer = GaussianNormalizer()
# Run
distributions = transformer._get_distributions()
# Assert
expected = {
'gaussian': univariate.GaussianUnivariate,
'gamma': univariate.GammaUnivariate,
'beta': univariate.BetaUnivariate,
'student_t': univariate.StudentTUnivariate,
'gaussian_kde': univariate.GaussianKDE,
'truncated_gaussian': univariate.TruncatedGaussian,
}
assert distributions == expected
def test__get_univariate_instance(self):
"""Test the ``_get_univariate`` method when the distribution is univariate.
Validate that a deepcopy of the distribution stored in ``self._distribution`` is returned.
Setup:
- create an instance of a ``GaussianNormalizer`` with ``distribution`` set
to ``univariate.Univariate``.
Output:
- a copy of the value stored in ``self._distribution``.
"""
# Setup
distribution = copulas.univariate.BetaUnivariate()
ct = GaussianNormalizer(distribution=distribution)
# Run
univariate = ct._get_univariate()
# Assert
assert univariate is not distribution
assert isinstance(univariate, copulas.univariate.Univariate)
assert dir(univariate) == dir(distribution)
def test__get_univariate_tuple(self):
"""Test the ``_get_univariate`` method when the distribution is a tuple.
When the distribution is passed as a tuple, it should return an instance
with the passed arguments.
Setup:
- create an instance of a ``GaussianNormalizer`` and set
``distribution`` to a tuple.
Output:
- an instance of ``copulas.univariate.Univariate`` with the passed arguments.
"""
# Setup
distribution = (
copulas.univariate.Univariate,
{'candidates': 'a_candidates_list'}
)
ct = GaussianNormalizer(distribution=distribution)
# Run
univariate = ct._get_univariate()
# Assert
assert isinstance(univariate, copulas.univariate.Univariate)
assert univariate.candidates == 'a_candidates_list'
def test__get_univariate_class(self):
"""Test the ``_get_univariate`` method when the distribution is a class.
When ``distribution`` is passed as a class, it should return an instance
without passing arguments.
Setup:
- create an instance of a ``GaussianNormalizer`` and set ``distribution``
to ``univariate.Univariate``.
Output:
- an instance of ``copulas.univariate.Univariate`` without any arguments.
"""
# Setup
distribution = copulas.univariate.BetaUnivariate
ct = GaussianNormalizer(distribution=distribution)
# Run
univariate = ct._get_univariate()
# Assert
assert isinstance(univariate, copulas.univariate.Univariate)
def test__get_univariate_error(self):
"""Test the ``_get_univariate`` method when ``distribution`` is invalid.
Validate that it raises an error if an invalid distribution is stored in
``distribution``.
Setup:
- create an instance of a ``GaussianNormalizer`` and set ``self._distribution``
improperly.
Raise:
- TypeError(f'Invalid distribution: {distribution}')
"""
# Setup
distribution = 123
ct = GaussianNormalizer(distribution=distribution)
# Run / Assert
with pytest.raises(TypeError):
ct._get_univariate()
def test__fit(self):
"""Test the ``_fit`` method.
Validate that ``_fit`` calls ``_get_univariate``.
Setup:
- create an instance of the ``GaussianNormalizer``.
- mock the ``_get_univariate`` method.
Input:
- a pandas series of float values.
Side effect:
- call the `_get_univariate`` method.
"""
# Setup
data = pd.Series([0.0, np.nan, 1.0])
ct = GaussianNormalizer()
ct._get_univariate = Mock()
# Run
ct._fit(data)
# Assert
ct._get_univariate.return_value.fit.assert_called_once()
call_value = ct._get_univariate.return_value.fit.call_args_list[0]
np.testing.assert_array_equal(
call_value[0][0],
np.array([0.0, 0.5, 1.0])
)
def test__fit_model_missing_values(self):
"""Test the ``_fit`` method.
Validate that ``_fit`` calls ``_get_univariate``.
Setup:
- create an instance of the ``GaussianNormalizer`` with ``model_missing_values``
set to True.
- mock the ``_get_univariate`` method.
Input:
- a pandas series of float values.
Side effect:
- call the `_get_univariate`` method.
"""
# Setup
data = | pd.Series([0.0, np.nan, 1.0]) | pandas.Series |
import pandas as pd
# Part I: Data Index
# data=pd.series([5,4,-2,3,7])
data=pd.Series([5,4,-2,3,7], index=["a","b","c","d","e"])
print(data)
# Part II: Observe Data
print("Data Type", data.dtype)
print("Data Number", data.size)
print("Data Index", data.index)
# Part III: Get Data
print(data[2], data[0])
print(data["a"], data["e"])
# Part IV: Number Operation
print("Max", data.max())
print("Sum", data.sum())
print("Std", data.std())
print("Median", data.std())
print("Largest 3", data.nlargest(3))
# Part V: String Operation
strData= | pd.Series(["你好","Python","Pandas"]) | pandas.Series |
"""Silly data generator (Faker (https://github.com/joke2k/faker) and others
are much better, but we just need something simple"""
import string
# Third Party
import pandas as pd
import numpy as np
def df_random(num_numeric=3, num_categorical=3, num_rows=100):
"""Generate a dataframe with random data. This is a general method
to easily generate a random dataframe, for more control of the
random 'distributions' use the column methods (df_numeric_column, df_categorical_column)
For other distributions you can use numpy methods directly (see example at bottom of this file)
Args:
num_numeric (int): The number of numeric columns (default = 3)
num_categorical (int): The number of categorical columns (default = 3)
num_rows (int): The number of rows to generate (default = 100)
"""
# Construct DataFrame
df = pd.DataFrame()
column_names = string.ascii_lowercase
# Create numeric columns
for name in column_names[:num_numeric]:
df[name] = df_numeric_column(num_rows=num_rows)
# Create categorical columns
for name in column_names[num_numeric:num_numeric+num_categorical]:
df[name] = df_categorical_column(['foo', 'bar', 'baz'], num_rows=num_rows)
# Return the dataframe
return df
def df_numeric_column(min_value=0, max_value=1, num_rows=100):
"""Generate a numeric column with random data
Args:
min_value (float): Minimum value (default = 0)
max_value (float): Maximum value (default = 1)
num_rows (int): The number of rows to generate (default = 100)
"""
# Generate numeric column
return pd.Series(np.random.uniform(min_value, max_value, num_rows))
def df_categorical_column(category_values, num_rows=100, probabilities=None):
"""Generate a categorical column with random data
Args:
category_values (list): A list of category values (e.g. ['red', 'blue', 'green'])
num_rows (int): The number of rows to generate (default = 100)
probabilities (list): A list of probabilities of each value (e.g. [0.6, 0.2, 0.2]) (default=None an equal probability)
"""
splitter = np.random.choice(range(len(category_values)), num_rows, p=probabilities)
return pd.Series( | pd.Categorical.from_codes(splitter, categories=category_values) | pandas.Categorical.from_codes |
#reproducability
from numpy.random import seed
seed(1+347823)
import tensorflow as tf
tf.random.set_seed(1+63493)
import numpy as np
import os
import pandas as pd
import datetime
from matplotlib import pyplot
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import shap
gpus = tf.config.experimental.list_physical_devices('GPU')
def load_GW_and_HYRAS_Data(Well_ID):
#define where to find the data
pathGW = "./GWData"
pathHYRAS = "./HYRAS"
pathconnect = "/"
#load and merge the data
GWData = pd.read_csv(pathGW+pathconnect+Well_ID+'_GW-Data.csv',
parse_dates=['Date'],index_col=0, dayfirst = True,
decimal = '.', sep=',')
HYRASData = pd.read_csv(pathHYRAS+pathconnect+Well_ID+'_weeklyData_HYRAS.csv',
parse_dates=['Date'],index_col=0, dayfirst = True,
decimal = '.', sep=',')
data = pd.merge(GWData, HYRASData, how='inner', left_index = True, right_index = True)
return data
def split_data(data, GLOBAL_SETTINGS):
dataset = data[(data.index < GLOBAL_SETTINGS["test_start"])] #Testdaten abtrennen
TrainingData = dataset
TestData = data[(data.index >= GLOBAL_SETTINGS["test_start"]) & (data.index <= GLOBAL_SETTINGS["test_end"])] #Testdaten entsprechend dem angegebenen Testzeitraum
TestData_ext = pd.concat([dataset.iloc[-GLOBAL_SETTINGS["seq_length"]:], TestData], axis=0) # extend Testdata to be able to fill sequence later
return TrainingData, TestData, TestData_ext
def to_supervised(data, GLOBAL_SETTINGS):
X, Y = list(), list()
# step over the entire history one time step at a time
for i in range(len(data)):
# find the end of this pattern
end_idx = i + GLOBAL_SETTINGS["seq_length"]
# check if we are beyond the dataset
if end_idx >= len(data):
break
# gather input and output parts of the pattern
seq_x, seq_y = data[i:end_idx, 1:], data[end_idx, 0]
X.append(seq_x)
Y.append(seq_y)
return np.array(X), np.array(Y)
# =============================================================================
#### start
# =============================================================================
with tf.device("/gpu:0"):
time1 = datetime.datetime.now()
basedir = './'
os.chdir(basedir)
well_list = pd.read_csv("./list.txt")
# =============================================================================
#### loop
# =============================================================================
for pp in range(well_list.shape[0]):
Well_ID = well_list.ID[pp]
print(str(pp)+": "+Well_ID)
bestkonfig = pd.read_csv('./log_summary_CNN_'+Well_ID+'.txt',delimiter='=',skiprows=(10),nrows=(7),header = None)
bestkonfig.columns = ['hp','value']
filters_int = int(bestkonfig.value[0])
densesize_int = int(bestkonfig.value[1])
seqlength_int = int(bestkonfig.value[2])
batchsize_int = int(bestkonfig.value[3])
pathGW = "./GWData/"
GWData = | pd.read_csv(pathGW+Well_ID+'_GW-Data.csv',parse_dates=['Date'],index_col=0, dayfirst = True,decimal = '.', sep=',') | pandas.read_csv |
"""MODFLOW support utilities"""
import os
from datetime import datetime
import shutil
import warnings
import numpy as np
import pandas as pd
import re
pd.options.display.max_colwidth = 100
from pyemu.pst.pst_utils import SFMT,IFMT,FFMT,pst_config,\
parse_tpl_file,try_process_output_file
from pyemu.utils.os_utils import run
from pyemu.utils.helpers import _write_df_tpl
from ..pyemu_warnings import PyemuWarning
PP_FMT = {"name": SFMT, "x": FFMT, "y": FFMT, "zone": IFMT, "tpl": SFMT,
"parval1": FFMT}
PP_NAMES = ["name","x","y","zone","parval1"]
def modflow_pval_to_template_file(pval_file,tpl_file=None):
"""write a template file for a modflow parameter value file.
Args:
pval_file (`str`): the path and name of the existing modflow pval file
tpl_file (`str`, optional): template file to write. If None, use
`pval_file` +".tpl". Default is None
Note:
Uses names in the first column in the pval file as par names.
Returns:
**pandas.DataFrame**: a dataFrame with control file parameter information
"""
if tpl_file is None:
tpl_file = pval_file + ".tpl"
pval_df = pd.read_csv(pval_file,delim_whitespace=True,
header=None,skiprows=2,
names=["parnme","parval1"])
pval_df.index = pval_df.parnme
pval_df.loc[:,"tpl"] = pval_df.parnme.apply(lambda x: " ~ {0:15s} ~".format(x))
with open(tpl_file,'w') as f:
f.write("ptf ~\n#pval template file from pyemu\n")
f.write("{0:10d} #NP\n".format(pval_df.shape[0]))
f.write(pval_df.loc[:,["parnme","tpl"]].to_string(col_space=0,
formatters=[SFMT,SFMT],
index=False,
header=False,
justify="left"))
return pval_df
def modflow_hob_to_instruction_file(hob_file, ins_file=None):
"""write an instruction file for a modflow head observation file
Args:
hob_file (`str`): the path and name of the existing modflow hob file
ins_file (`str`, optional): the name of the instruction file to write.
If `None`, `hob_file` +".ins" is used. Default is `None`.
Returns:
**pandas.DataFrame**: a dataFrame with control file observation information
"""
hob_df = pd.read_csv(hob_file,delim_whitespace=True,skiprows=1,
header=None,names=["simval","obsval","obsnme"])
hob_df.loc[:,"obsnme"] = hob_df.obsnme.apply(str.lower)
hob_df.loc[:,"ins_line"] = hob_df.obsnme.apply(lambda x:"l1 !{0:s}!".format(x))
hob_df.loc[0,"ins_line"] = hob_df.loc[0,"ins_line"].replace('l1','l2')
if ins_file is None:
ins_file = hob_file + ".ins"
f_ins = open(ins_file, 'w')
f_ins.write("pif ~\n")
f_ins.write(hob_df.loc[:,["ins_line"]].to_string(col_space=0,
columns=["ins_line"],
header=False,
index=False,
formatters=[SFMT]) + '\n')
hob_df.loc[:,"weight"] = 1.0
hob_df.loc[:,"obgnme"] = "obgnme"
f_ins.close()
return hob_df
def modflow_hydmod_to_instruction_file(hydmod_file, ins_file=None):
"""write an instruction file for a modflow hydmod file
Args:
hydmod_file (`str`): the path and name of the existing modflow hob file
ins_file (`str`, optional): the name of the instruction file to write.
If `None`, `hydmod_file` +".ins" is used. Default is `None`.
Returns:
**pandas.DataFrame**: a dataFrame with control file observation information
Note:
calls `pyemu.gw_utils.modflow_read_hydmod_file()`
"""
hydmod_df, hydmod_outfile = modflow_read_hydmod_file(hydmod_file)
hydmod_df.loc[:,"ins_line"] = hydmod_df.obsnme.apply(lambda x:"l1 w !{0:s}!".format(x))
if ins_file is None:
ins_file = hydmod_outfile + ".ins"
with open(ins_file, 'w') as f_ins:
f_ins.write("pif ~\nl1\n")
f_ins.write(hydmod_df.loc[:,["ins_line"]].to_string(col_space=0,
columns=["ins_line"],
header=False,
index=False,
formatters=[SFMT]) + '\n')
hydmod_df.loc[:,"weight"] = 1.0
hydmod_df.loc[:,"obgnme"] = "obgnme"
df = try_process_output_file(hydmod_outfile+".ins")
if df is not None:
df.loc[:,"obsnme"] = df.index.values
df.loc[:,"obgnme"] = df.obsnme.apply(lambda x: x[:-9])
df.to_csv("_setup_"+os.path.split(hydmod_outfile)[-1]+'.csv',index=False)
return df
return hydmod_df
def modflow_read_hydmod_file(hydmod_file, hydmod_outfile=None):
""" read a binary hydmod file and return a dataframe of the results
Args:
hydmod_file (`str`): The path and name of the existing modflow hydmod binary file
hydmod_outfile (`str`, optional): output file to write. If `None`, use `hydmod_file` +".dat".
Default is `None`.
Returns:
**pandas.DataFrame**: a dataFrame with hymod_file values
"""
try:
import flopy.utils as fu
except Exception as e:
print('flopy is not installed - cannot read {0}\n{1}'.format(hydmod_file, e))
return
obs = fu.HydmodObs(hydmod_file)
hyd_df = obs.get_dataframe()
hyd_df.columns = [i[2:] if i.lower() != 'totim' else i for i in hyd_df.columns]
#hyd_df.loc[:,"datetime"] = hyd_df.index
hyd_df['totim'] = hyd_df.index.map(lambda x: x.strftime("%Y%m%d"))
hyd_df.rename(columns={'totim': 'datestamp'}, inplace=True)
# reshape into a single column
hyd_df = pd.melt(hyd_df, id_vars='datestamp')
hyd_df.rename(columns={'value': 'obsval'}, inplace=True)
hyd_df['obsnme'] = [i.lower() + '_' + j.lower() for i, j in zip(hyd_df.variable, hyd_df.datestamp)]
vc = hyd_df.obsnme.value_counts().sort_values()
vc = list(vc.loc[vc>1].index.values)
if len(vc) > 0:
hyd_df.to_csv("hyd_df.duplciates.csv")
obs.get_dataframe().to_csv("hyd_org.duplicates.csv")
raise Exception("duplicates in obsnme:{0}".format(vc))
#assert hyd_df.obsnme.value_counts().max() == 1,"duplicates in obsnme"
if not hydmod_outfile:
hydmod_outfile = hydmod_file + '.dat'
hyd_df.to_csv(hydmod_outfile, columns=['obsnme','obsval'], sep=' ',index=False)
#hyd_df = hyd_df[['obsnme','obsval']]
return hyd_df[['obsnme','obsval']], hydmod_outfile
def setup_mtlist_budget_obs(list_filename,gw_filename="mtlist_gw.dat",sw_filename="mtlist_sw.dat",
start_datetime="1-1-1970",gw_prefix='gw',sw_prefix="sw",
save_setup_file=False):
""" setup observations of gw (and optionally sw) mass budgets from mt3dusgs list file.
Args:
list_filename (`str`): path and name of existing modflow list file
gw_filename (`str`, optional): output filename that will contain the gw budget
observations. Default is "mtlist_gw.dat"
sw_filename (`str`, optional): output filename that will contain the sw budget
observations. Default is "mtlist_sw.dat"
start_datetime (`str`, optional): an str that can be parsed into a `pandas.TimeStamp`.
used to give budget observations meaningful names. Default is "1-1-1970".
gw_prefix (`str`, optional): a prefix to add to the GW budget observations.
Useful if processing more than one list file as part of the forward run process.
Default is 'gw'.
sw_prefix (`str`, optional): a prefix to add to the SW budget observations. Useful
if processing more than one list file as part of the forward run process.
Default is 'sw'.
save_setup_file (`bool`, optional): a flag to save "_setup_"+ `list_filename` +".csv" file
that contains useful control file information. Default is `False`.
Returns:
tuple containing
- **str**: the command to add to the forward run script
- **str**: the names of the instruction files that were created
- **pandas.DataFrame**: a dataframe with information for constructing a control file
Note:
writes an instruction file and also a _setup_.csv to use when constructing a pest
control file
the instruction files are named `out_filename` +".ins"
It is recommended to use the default value for `gw_filename` or `sw_filename`.
This is the companion function of `gw_utils.apply_mtlist_budget_obs()`.
"""
gw,sw = apply_mtlist_budget_obs(list_filename, gw_filename, sw_filename, start_datetime)
gw_ins = gw_filename + ".ins"
_write_mtlist_ins(gw_ins, gw, gw_prefix)
ins_files = [gw_ins]
df_gw = try_process_output_file(gw_ins,gw_filename)
if df_gw is None:
raise Exception("error processing groundwater instruction file")
if sw is not None:
sw_ins = sw_filename + ".ins"
_write_mtlist_ins(sw_ins, sw, sw_prefix)
ins_files.append(sw_ins)
df_sw = try_process_output_file(sw_ins,sw_filename)
if df_sw is None:
raise Exception("error processing surface water instruction file")
df_gw = df_gw.append(df_sw)
df_gw.loc[:, "obsnme"] = df_gw.index.values
if save_setup_file:
df_gw.to_csv("_setup_" + os.path.split(list_filename)[-1] + '.csv', index=False)
frun_line = "pyemu.gw_utils.apply_mtlist_budget_obs('{0}')".format(list_filename)
return frun_line,ins_files,df_gw
def _write_mtlist_ins(ins_filename,df,prefix):
""" write an instruction file for a MT3D-USGS list file
"""
try:
dt_str = df.index.map(lambda x: x.strftime("%Y%m%d"))
except:
dt_str = df.index.map(lambda x: "{0:08.1f}".format(x).strip())
with open(ins_filename,'w') as f:
f.write('pif ~\nl1\n')
for dt in dt_str:
f.write("l1 ")
for col in df.columns.str.translate(
{ord(s): None for s in ['(', ')', '/', '=']}):
if prefix == '':
obsnme = "{0}_{1}".format(col, dt)
else:
obsnme = "{0}_{1}_{2}".format(prefix, col, dt)
f.write(" w !{0}!".format(obsnme))
f.write("\n")
def apply_mtlist_budget_obs(list_filename,gw_filename="mtlist_gw.dat",
sw_filename="mtlist_sw.dat",
start_datetime="1-1-1970"):
""" process an MT3D-USGS list file to extract mass budget entries.
Args:
list_filename (`str`): the path and name of an existing MT3D-USGS list file
gw_filename (`str`, optional): the name of the output file with gw mass
budget information. Default is "mtlist_gw.dat"
sw_filename (`str`): the name of the output file with sw mass budget information.
Default is "mtlist_sw.dat"
start_datatime (`str`): an str that can be cast to a pandas.TimeStamp. Used to give
observations a meaningful name
Returns:
2-element tuple containing
- **pandas.DataFrame**: the gw mass budget dataframe
- **pandas.DataFrame**: (optional) the sw mass budget dataframe.
If the SFT process is not active, this returned value is `None`.
Note:
this is the companion function of `gw_utils.setup_mtlist_budget_obs()`.
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
mt = flopy.utils.MtListBudget(list_filename)
gw, sw = mt.parse(start_datetime=start_datetime, diff=True)
gw = gw.drop([col for col in gw.columns
for drop_col in ["kper", "kstp", "tkstp"]
if (col.lower().startswith(drop_col))], axis=1)
gw.to_csv(gw_filename, sep=' ', index_label="datetime", date_format="%Y%m%d")
if sw is not None:
sw = sw.drop([col for col in sw.columns
for drop_col in ["kper", "kstp", "tkstp"]
if (col.lower().startswith(drop_col))], axis=1)
sw.to_csv(sw_filename, sep=' ', index_label="datetime", date_format="%Y%m%d")
return gw, sw
def setup_mflist_budget_obs(list_filename,flx_filename="flux.dat",
vol_filename="vol.dat",start_datetime="1-1'1970",prefix='',
save_setup_file=False):
""" setup observations of budget volume and flux from modflow list file.
Args:
list_filename (`str`): path and name of the existing modflow list file
flx_filename (`str`, optional): output filename that will contain the budget flux
observations. Default is "flux.dat"
vol_filename (`str`, optional): output filename that will contain the budget volume
observations. Default is "vol.dat"
start_datetime (`str`, optional): a string that can be parsed into a pandas.TimeStamp.
This is used to give budget observations meaningful names. Default is "1-1-1970".
prefix (`str`, optional): a prefix to add to the water budget observations. Useful if
processing more than one list file as part of the forward run process. Default is ''.
save_setup_file (`bool`): a flag to save "_setup_"+ `list_filename` +".csv" file that contains useful
control file information
Returns:
**pandas.DataFrame**: a dataframe with information for constructing a control file.
Note:
This method writes instruction files and also a _setup_.csv to use when constructing a pest
control file. The instruction files are named <flux_file>.ins and <vol_file>.ins, respectively
It is recommended to use the default values for flux_file and vol_file.
This is the companion function of `gw_utils.apply_mflist_budget_obs()`.
"""
flx,vol = apply_mflist_budget_obs(list_filename,flx_filename,vol_filename,
start_datetime)
_write_mflist_ins(flx_filename+".ins",flx,prefix+"flx")
_write_mflist_ins(vol_filename+".ins",vol, prefix+"vol")
df = try_process_output_file(flx_filename+".ins")
if df is None:
raise Exception("error processing flux instruction file")
df2 = try_process_output_file(vol_filename+".ins")
if df2 is None:
raise Exception("error processing volume instruction file")
df = df.append(df2)
df.loc[:,"obsnme"] = df.index.values
if save_setup_file:
df.to_csv("_setup_" + os.path.split(list_filename)[-1] + '.csv', index=False)
return df
def apply_mflist_budget_obs(list_filename,flx_filename="flux.dat",
vol_filename="vol.dat",
start_datetime="1-1-1970"):
""" process a MODFLOW list file to extract flux and volume water budget entries.
Args:
list_filename (`str`): path and name of the existing modflow list file
flx_filename (`str`, optional): output filename that will contain the budget flux
observations. Default is "flux.dat"
vol_filename (`str`, optional): output filename that will contain the budget volume
observations. Default is "vol.dat"
start_datetime (`str`, optional): a string that can be parsed into a pandas.TimeStamp.
This is used to give budget observations meaningful names. Default is "1-1-1970".
prefix (`str`, optional): a prefix to add to the water budget observations. Useful if
processing more than one list file as part of the forward run process. Default is ''.
save_setup_file (`bool`): a flag to save _setup_<list_filename>.csv file that contains useful
control file information
Note:
this is the companion function of `gw_utils.setup_mflist_budget_obs()`.
Returns:
tuple containing
- **pandas.DataFrame**: a dataframe with flux budget information
- **pandas.DataFrame**: a dataframe with cumulative budget information
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
mlf = flopy.utils.MfListBudget(list_filename)
flx,vol = mlf.get_dataframes(start_datetime=start_datetime,diff=True)
flx.to_csv(flx_filename,sep=' ',index_label="datetime",date_format="%Y%m%d")
vol.to_csv(vol_filename,sep=' ',index_label="datetime",date_format="%Y%m%d")
return flx,vol
def _write_mflist_ins(ins_filename,df,prefix):
""" write an instruction file for a MODFLOW list file
"""
dt_str = df.index.map(lambda x: x.strftime("%Y%m%d"))
with open(ins_filename,'w') as f:
f.write('pif ~\nl1\n')
for dt in dt_str:
f.write("l1 ")
for col in df.columns:
obsnme = "{0}_{1}_{2}".format(prefix,col,dt)
f.write(" w !{0}!".format(obsnme))
f.write("\n")
def setup_hds_timeseries(bin_file, kij_dict, prefix=None, include_path=False,
model=None, postprocess_inact=None, text=None,
fill=None,precision="single"):
"""a function to setup a forward process to extract time-series style values
from a binary modflow binary file (or equivalent format - hds, ucn, sub, cbb, etc).
Args:
bin_file (`str`): path and name of existing modflow binary file - headsave, cell budget and MT3D UCN supported.
kij_dict (`dict`): dictionary of site_name: [k,i,j] pairs. For example: `{"wel1":[0,1,1]}`.
prefix (`str`, optional): string to prepend to site_name when forming observation names. Default is None
include_path (`bool`, optional): flag to setup the binary file processing in directory where the hds_file
is located (if different from where python is running). This is useful for setting up
the process in separate directory for where python is running.
model (`flopy.mbase`, optional): a `flopy.basemodel` instance. If passed, the observation names will
have the datetime of the observation appended to them (using the flopy `start_datetime` attribute.
If None, the observation names will have the zero-based stress period appended to them. Default is None.
postprocess_inact (`float`, optional): Inactive value in heads/ucn file e.g. mt.btn.cinit. If `None`, no
inactive value processing happens. Default is `None`.
text (`str`): the text record entry in the binary file (e.g. "constant_head").
Used to indicate that the binary file is a MODFLOW cell-by-cell budget file.
If None, headsave or MT3D unformatted concentration file
is assummed. Default is None
fill (`float`): fill value for NaNs in the extracted timeseries dataframe. If
`None`, no filling is done, which may yield model run failures as the resulting
processed timeseries CSV file (produced at runtime) may have missing values and
can't be processed with the cooresponding instruction file. Default is `None`.
precision (`str`): the precision of the binary file. Can be "single" or "double".
Default is "single".
Returns:
tuple containing
- **str**: the forward run command to execute the binary file process during model runs.
- **pandas.DataFrame**: a dataframe of observation information for use in the pest control file
Note:
This function writes hds_timeseries.config that must be in the same
dir where `apply_hds_timeseries()` is called during the forward run
Assumes model time units are days
this is the companion function of `gw_utils.apply_hds_timeseries()`.
"""
try:
import flopy
except Exception as e:
print("error importing flopy, returning {0}".format(str(e)))
return
assert os.path.exists(bin_file), "binary file not found"
iscbc = False
if text is not None:
text = text.upper()
try:
# hack: if model is passed and its None, it trips up CellBudgetFile...
if model is not None:
bf = flopy.utils.CellBudgetFile(bin_file,precision=precision,model=model)
iscbc=True
else:
bf = flopy.utils.CellBudgetFile(bin_file, precision=precision)
iscbc=True
except Exception as e:
try:
if model is not None:
bf = flopy.utils.HeadFile(bin_file, precision=precision, model=model,text=text)
else:
bf = flopy.utils.HeadFile(bin_file, precision=precision,text=text)
except Exception as e1:
raise Exception("error instantiating binary file as either CellBudgetFile:{0} or as HeadFile with text arg: {1}".format(str(e),str(e1)))
if iscbc:
tl = [t.decode().strip() for t in bf.textlist]
if text not in tl:
raise Exception("'text' {0} not found in CellBudgetFile.textlist:{1}".\
format(text,tl))
elif bin_file.lower().endswith(".ucn"):
try:
bf = flopy.utils.UcnFile(bin_file)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
else:
try:
bf = flopy.utils.HeadFile(bin_file)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
if text is None:
text = "none"
nlay,nrow,ncol = bf.nlay,bf.nrow,bf.ncol
#if include_path:
# pth = os.path.join(*[p for p in os.path.split(hds_file)[:-1]])
# config_file = os.path.join(pth,"{0}_timeseries.config".format(hds_file))
#else:
config_file = "{0}_timeseries.config".format(bin_file)
print("writing config file to {0}".format(config_file))
if fill is None:
fill = "none"
f_config = open(config_file,'w')
if model is not None:
if model.dis.itmuni != 4:
warnings.warn("setup_hds_timeseries only supports 'days' time units...",PyemuWarning)
f_config.write("{0},{1},d,{2},{3},{4},{5}\n".
format(os.path.split(bin_file)[-1],
model.start_datetime,text,fill,precision,iscbc))
start = pd.to_datetime(model.start_datetime)
else:
f_config.write("{0},none,none,{1},{2},{3},{4}\n".format(os.path.split(bin_file)[-1],
text, fill,precision,iscbc))
f_config.write("site,k,i,j\n")
dfs = []
for site,(k,i,j) in kij_dict.items():
assert k >= 0 and k < nlay, k
assert i >= 0 and i < nrow, i
assert j >= 0 and j < ncol, j
site = site.lower().replace(" ",'')
if iscbc:
ts = bf.get_ts((k, i, j),text=text)
#print(ts)
df = pd.DataFrame(data=ts, columns=["totim", site])
else:
df = pd.DataFrame(data=bf.get_ts((k,i,j)),columns=["totim",site])
if model is not None:
dts = start + pd.to_timedelta(df.totim,unit='d')
df.loc[:,"totim"] = dts
#print(df)
f_config.write("{0},{1},{2},{3}\n".format(site,k,i,j))
df.index = df.pop("totim")
dfs.append(df)
f_config.close()
df = pd.concat(dfs,axis=1).T
df.to_csv(bin_file + "_timeseries.processed", sep=' ')
if model is not None:
t_str = df.columns.map(lambda x: x.strftime("%Y%m%d"))
else:
t_str = df.columns.map(lambda x: "{0:08.2f}".format(x))
ins_file = bin_file + "_timeseries.processed.ins"
print("writing instruction file to {0}".format(ins_file))
with open(ins_file,'w') as f:
f.write('pif ~\n')
f.write("l1 \n")
for site in df.index:
#for t in t_str:
f.write("l1 w ")
#for site in df.columns:
for t in t_str:
if prefix is not None:
obsnme = "{0}_{1}_{2}".format(prefix,site,t)
else:
obsnme = "{0}_{1}".format(site, t)
f.write(" !{0}!".format(obsnme))
f.write('\n')
if postprocess_inact is not None:
_setup_postprocess_hds_timeseries(bin_file, df, config_file, prefix=prefix, model=model)
bd = '.'
if include_path:
bd = os.getcwd()
pth = os.path.join(*[p for p in os.path.split(bin_file)[:-1]])
os.chdir(pth)
config_file = os.path.split(config_file)[-1]
try:
df = apply_hds_timeseries(config_file, postprocess_inact=postprocess_inact)
except Exception as e:
os.chdir(bd)
raise Exception("error in apply_hds_timeseries(): {0}".format(str(e)))
os.chdir(bd)
df = try_process_output_file(ins_file)
if df is None:
raise Exception("error processing {0} instruction file".format(ins_file))
df.loc[:,"weight"] = 0.0
if prefix is not None:
df.loc[:,"obgnme"] = df.index.map(lambda x: '_'.join(x.split('_')[:2]))
else:
df.loc[:, "obgnme"] = df.index.map(lambda x: x.split('_')[0])
frun_line = "pyemu.gw_utils.apply_hds_timeseries('{0}',{1})\n".format(config_file, postprocess_inact)
return frun_line,df
def apply_hds_timeseries(config_file=None, postprocess_inact=None):
"""process a modflow binary file using a previously written
configuration file
Args:
config_file (`str`, optional): configuration file written by `pyemu.gw_utils.setup_hds_timeseries`.
If `None`, looks for `hds_timeseries.config`
postprocess_inact (`float`, optional): Inactive value in heads/ucn file e.g. mt.btn.cinit. If `None`, no
inactive value processing happens. Default is `None`.
Note:
this is the companion function of `gw_utils.setup_hds_timeseries()`.
"""
import flopy
if config_file is None:
config_file = "hds_timeseries.config"
assert os.path.exists(config_file), config_file
with open(config_file,'r') as f:
line = f.readline()
bf_file,start_datetime,time_units, text, fill, precision,_iscbc = line.strip().split(',')
site_df = pd.read_csv(f)
text = text.upper()
if _iscbc.lower().strip() == "false":
iscbc = False
elif _iscbc.lower().strip() == "true":
iscbc = True
else:
raise Exception("apply_hds_timeseries() error: unrecognized 'iscbc' string in config file: {0}".format(_iscbc))
assert os.path.exists(bf_file), "head save file not found"
if iscbc:
try:
bf = flopy.utils.CellBudgetFile(bf_file,precision=precision)
except Exception as e:
raise Exception("error instantiating CellBudgetFile:{0}".format(str(e)))
elif bf_file.lower().endswith(".ucn"):
try:
bf = flopy.utils.UcnFile(bf_file,precision=precision)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
else:
try:
if text != "NONE":
bf = flopy.utils.HeadFile(bf_file,text=text, precision=precision)
else:
bf = flopy.utils.HeadFile(bf_file, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
nlay, nrow, ncol = bf.nlay, bf.nrow, bf.ncol
dfs = []
for site,k,i,j in zip(site_df.site,site_df.k,site_df.i,site_df.j):
assert k >= 0 and k < nlay
assert i >= 0 and i < nrow
assert j >= 0 and j < ncol
if iscbc:
df = pd.DataFrame(data=bf.get_ts((k, i, j), text=text), columns=["totim", site])
else:
df = pd.DataFrame(data=bf.get_ts((k,i,j)),columns=["totim",site])
df.index = df.pop("totim")
dfs.append(df)
df = pd.concat(dfs,axis=1).T
if df.shape != df.dropna().shape:
warnings.warn("NANs in processed timeseries file",PyemuWarning)
if fill.upper() != "NONE":
fill = float(fill)
df.fillna(fill,inplace=True)
#print(df)
df.to_csv(bf_file+"_timeseries.processed",sep=' ')
if postprocess_inact is not None:
_apply_postprocess_hds_timeseries(config_file, postprocess_inact)
return df
def _setup_postprocess_hds_timeseries(hds_file, df, config_file, prefix=None, model=None):
"""Dirty function to setup post processing concentrations in inactive/dry cells"""
warnings.warn(
"Setting up post processing of hds or ucn timeseries obs. "
"Prepending 'pp' to obs name may cause length to exceed 20 chars", PyemuWarning)
if model is not None:
t_str = df.columns.map(lambda x: x.strftime("%Y%m%d"))
else:
t_str = df.columns.map(lambda x: "{0:08.2f}".format(x))
if prefix is not None:
prefix = "pp{0}".format(prefix)
else:
prefix = "pp"
ins_file = hds_file+"_timeseries.post_processed.ins"
print("writing instruction file to {0}".format(ins_file))
with open(ins_file,'w') as f:
f.write('pif ~\n')
f.write("l1 \n")
for site in df.index:
f.write("l1 w ")
#for site in df.columns:
for t in t_str:
obsnme = "{0}{1}_{2}".format(prefix, site, t)
f.write(" !{0}!".format(obsnme))
f.write('\n')
frun_line = "pyemu.gw_utils._apply_postprocess_hds_timeseries('{0}')\n".format(config_file)
return frun_line
def _apply_postprocess_hds_timeseries(config_file=None, cinact=1e30):
"""private function to post processing binary files"""
import flopy
if config_file is None:
config_file = "hds_timeseries.config"
assert os.path.exists(config_file), config_file
with open(config_file,'r') as f:
line = f.readline()
hds_file,start_datetime,time_units,text,fill,precision,_iscbc = line.strip().split(',')
site_df = pd.read_csv(f)
#print(site_df)
text = text.upper()
assert os.path.exists(hds_file), "head save file not found"
if hds_file.lower().endswith(".ucn"):
try:
hds = flopy.utils.UcnFile(hds_file,precision=precision)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
else:
try:
if text != "NONE":
hds = flopy.utils.HeadFile(hds_file,text=text,precision=precision)
else:
hds = flopy.utils.HeadFile(hds_file,precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
nlay, nrow, ncol = hds.nlay, hds.nrow, hds.ncol
dfs = []
for site, k, i, j in zip(site_df.site, site_df.k, site_df.i, site_df.j):
assert k >= 0 and k < nlay
assert i >= 0 and i < nrow
assert j >= 0 and j < ncol
if text.upper() != "NONE":
df = pd.DataFrame(data=hds.get_ts((k, i, j)), columns=["totim", site])
else:
df = pd.DataFrame(data=hds.get_ts((k, i, j)), columns=["totim", site])
df.index = df.pop("totim")
inact_obs = df[site].apply(lambda x: np.isclose(x, cinact))
if inact_obs.sum() > 0:
assert k+1 < nlay, "Inactive observation in lowest layer"
df_lower = pd.DataFrame(data=hds.get_ts((k+1, i, j)), columns=["totim", site])
df_lower.index = df_lower.pop("totim")
df.loc[inact_obs] = df_lower.loc[inact_obs]
print("{0} observation(s) post-processed for site {1} at kij ({2},{3},{4})".
format(inact_obs.sum(), site, k, i, j))
dfs.append(df)
df = pd.concat(dfs, axis=1).T
#print(df)
df.to_csv(hds_file+"_timeseries.post_processed", sep=' ')
return df
def setup_hds_obs(hds_file,kperk_pairs=None,skip=None,prefix="hds",text="head", precision="single",
include_path=False):
"""a function to setup using all values from a layer-stress period
pair for observations.
Args:
hds_file (`str`): path and name of an existing MODFLOW head-save file.
If the hds_file endswith 'ucn', then the file is treated as a UcnFile type.
kperk_pairs ([(int,int)]): a list of len two tuples which are pairs of kper
(zero-based stress period index) and k (zero-based layer index) to
setup observations for. If None, then all layers and stress period records
found in the file will be used. Caution: a shit-ton of observations may be produced!
skip (variable, optional): a value or function used to determine which values
to skip when setting up observations. If np.scalar(skip)
is True, then values equal to skip will not be used.
If skip can also be a np.ndarry with dimensions equal to the model.
Observations are set up only for cells with Non-zero values in the array.
If not np.ndarray or np.scalar(skip), then skip will be treated as a lambda function that
returns np.NaN if the value should be skipped.
prefix (`str`): the prefix to use for the observation names. default is "hds".
text (`str`): the text tag the flopy HeadFile instance. Default is "head"
precison (`str`): the precision string for the flopy HeadFile instance. Default is "single"
include_path (`bool`, optional): flag to setup the binary file processing in directory where the hds_file
is located (if different from where python is running). This is useful for setting up
the process in separate directory for where python is running.
Returns:
tuple containing
- **str**: the forward run script line needed to execute the headsave file observation
operation
- **pandas.DataFrame**: a dataframe of pest control file information
Note:
Writes an instruction file and a _setup_ csv used construct a control file.
This is the companion function to `gw_utils.apply_hds_obs()`.
"""
try:
import flopy
except Exception as e:
print("error importing flopy, returning {0}".format(str(e)))
return
assert os.path.exists(hds_file),"head save file not found"
if hds_file.lower().endswith(".ucn"):
try:
hds = flopy.utils.UcnFile(hds_file)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
else:
try:
hds = flopy.utils.HeadFile(hds_file,text=text,precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
if kperk_pairs is None:
kperk_pairs = []
for kstp,kper in hds.kstpkper:
kperk_pairs.extend([(kper-1,k) for k in range(hds.nlay)])
if len(kperk_pairs) == 2:
try:
if len(kperk_pairs[0]) == 2:
pass
except:
kperk_pairs = [kperk_pairs]
#if start_datetime is not None:
# start_datetime = pd.to_datetime(start_datetime)
# dts = start_datetime + pd.to_timedelta(hds.times,unit='d')
data = {}
kpers = [kper-1 for kstp,kper in hds.kstpkper]
for kperk_pair in kperk_pairs:
kper,k = kperk_pair
assert kper in kpers, "kper not in hds:{0}".format(kper)
assert k in range(hds.nlay), "k not in hds:{0}".format(k)
kstp = last_kstp_from_kper(hds,kper)
d = hds.get_data(kstpkper=(kstp,kper))[k,:,:]
data["{0}_{1}".format(kper,k)] = d.flatten()
#data[(kper,k)] = d.flatten()
idx,iidx,jidx = [],[],[]
for _ in range(len(data)):
for i in range(hds.nrow):
iidx.extend([i for _ in range(hds.ncol)])
jidx.extend([j for j in range(hds.ncol)])
idx.extend(["i{0:04d}_j{1:04d}".format(i,j) for j in range(hds.ncol)])
idx = idx[:hds.nrow*hds.ncol]
df = pd.DataFrame(data,index=idx)
data_cols = list(df.columns)
data_cols.sort()
#df.loc[:,"iidx"] = iidx
#df.loc[:,"jidx"] = jidx
if skip is not None:
for col in data_cols:
if np.isscalar(skip):
df.loc[df.loc[:,col]==skip,col] = np.NaN
elif isinstance(skip, np.ndarray):
assert skip.ndim >= 2, "skip passed as {}D array, At least 2D (<= 4D) array required".format(skip.ndim)
assert skip.shape[-2:] == (hds.nrow, hds.ncol), \
"Array dimensions of arg. skip needs to match model dimensions ({0},{1}). ({2},{3}) passed".\
format(hds.nrow, hds.ncol, skip.shape[-2], skip.shape[-1])
if skip.ndim == 2:
print("2D array passed for skip, assuming constant for all layers and kper")
skip = np.tile(skip, (len(kpers), hds.nlay, 1, 1))
if skip.ndim == 3:
print("3D array passed for skip, assuming constant for all kper")
skip = np.tile(skip, (len(kpers), 1, 1, 1))
kper, k = [int(c) for c in col.split('_')]
df.loc[df.index.map(
lambda x: skip[kper, k, int(x.split('_')[0].strip('i')), int(x.split('_')[1].strip('j'))] == 0),
col] = np.NaN
else:
df.loc[:,col] = df.loc[:,col].apply(skip)
# melt to long form
df = df.melt(var_name="kperk",value_name="obsval")
# set row and col identifies
df.loc[:,"iidx"] = iidx
df.loc[:,"jidx"] = jidx
#drop nans from skip
df = df.dropna()
#set some additional identifiers
df.loc[:,"kper"] = df.kperk.apply(lambda x: int(x.split('_')[0]))
df.loc[:,"kidx"] = df.pop("kperk").apply(lambda x: int(x.split('_')[1]))
# form obs names
#def get_kper_str(kper):
# if start_datetime is not None:
# return dts[int(kper)].strftime("%Y%m%d")
# else:
# return "kper{0:04.0f}".format(kper)
fmt = prefix + "_{0:02.0f}_{1:03.0f}_{2:03.0f}_{3:03.0f}"
# df.loc[:,"obsnme"] = df.apply(lambda x: fmt.format(x.kidx,x.iidx,x.jidx,
# get_kper_str(x.kper)),axis=1)
df.loc[:,"obsnme"] = df.apply(lambda x: fmt.format(x.kidx,x.iidx,x.jidx,
x.kper),axis=1)
df.loc[:,"ins_str"] = df.obsnme.apply(lambda x: "l1 w !{0}!".format(x))
df.loc[:,"obgnme"] = prefix
#write the instruction file
with open(hds_file+".dat.ins","w") as f:
f.write("pif ~\nl1\n")
df.ins_str.to_string(f,index=False,header=False)
#write the corresponding output file
df.loc[:,["obsnme","obsval"]].to_csv(hds_file+".dat",sep=' ',index=False)
hds_path = os.path.dirname(hds_file)
setup_file = os.path.join(hds_path,"_setup_{0}.csv".format(os.path.split(hds_file)[-1]))
df.to_csv(setup_file)
if not include_path:
hds_file = os.path.split(hds_file)[-1]
fwd_run_line = "pyemu.gw_utils.apply_hds_obs('{0}',precision='{1}',text='{2}')\n".format(hds_file,precision,text)
df.index = df.obsnme
return fwd_run_line, df
def last_kstp_from_kper(hds,kper):
""" function to find the last time step (kstp) for a
give stress period (kper) in a modflow head save file.
Args:
hds (`flopy.utils.HeadFile`): head save file
kper (`int`): the zero-index stress period number
Returns:
**int**: the zero-based last time step during stress period
kper in the head save file
"""
#find the last kstp with this kper
kstp = -1
for kkstp,kkper in hds.kstpkper:
if kkper == kper+1 and kkstp > kstp:
kstp = kkstp
if kstp == -1:
raise Exception("kstp not found for kper {0}".format(kper))
kstp -= 1
return kstp
def apply_hds_obs(hds_file, inact_abs_val=1.0e+20, precision="single",text="head"):
""" process a modflow head save file. A companion function to
`gw_utils.setup_hds_obs()` that is called during the forward run process
Args:
hds_file (`str`): a modflow head save filename. if hds_file ends with 'ucn',
then the file is treated as a UcnFile type.
inact_abs_val (`float`, optional): the value that marks the mininum and maximum
active value. values in the headsave file greater than `inact_abs_val` or less
than -`inact_abs_val` are reset to `inact_abs_val`
Returns:
**pandas.DataFrame**: a dataframe with extracted simulated values.
Note:
This is the companion function to `gw_utils.setup_hds_obs()`.
"""
try:
import flopy
except Exception as e:
raise Exception("apply_hds_obs(): error importing flopy: {0}".\
format(str(e)))
from .. import pst_utils
assert os.path.exists(hds_file)
out_file = hds_file+".dat"
ins_file = out_file + ".ins"
assert os.path.exists(ins_file)
df = pd.DataFrame({"obsnme":pst_utils.parse_ins_file(ins_file)})
df.index = df.obsnme
# populate metdata
items = ["k","i","j","kper"]
for i,item in enumerate(items):
df.loc[:,item] = df.obsnme.apply(lambda x: int(x.split('_')[i+1]))
if hds_file.lower().endswith('ucn'):
hds = flopy.utils.UcnFile(hds_file)
else:
hds = flopy.utils.HeadFile(hds_file, precision=precision,text=text)
kpers = df.kper.unique()
df.loc[:,"obsval"] = np.NaN
for kper in kpers:
kstp = last_kstp_from_kper(hds,kper)
data = hds.get_data(kstpkper=(kstp,kper))
#jwhite 15jan2018 fix for really large values that are getting some
#trash added to them...
data[np.isnan(data)] = 0.0
data[data>np.abs(inact_abs_val)] = np.abs(inact_abs_val)
data[data<-np.abs(inact_abs_val)] = -np.abs(inact_abs_val)
df_kper = df.loc[df.kper==kper,:]
df.loc[df_kper.index,"obsval"] = data[df_kper.k,df_kper.i,df_kper.j]
assert df.dropna().shape[0] == df.shape[0]
df.loc[:,["obsnme","obsval"]].to_csv(out_file,index=False,sep=" ")
return df
def setup_sft_obs(sft_file,ins_file=None,start_datetime=None,times=None,ncomp=1):
"""writes a post-processor and instruction file for a mt3d-usgs sft output file
Args:
sft_file (`str`): path and name of an existing sft output file (ASCII)
ins_file (`str`, optional): the name of the instruction file to create.
If None, the name is `sft_file`+".ins". Default is `None`.
start_datetime (`str`): a pandas.to_datetime() compatible str. If not None,
then the resulting observation names have the datetime
suffix. If None, the suffix is the output totim. Default
is `None`.
times ([`float`]): a list of times to make observations for. If None, all times
found in the file are used. Default is None.
ncomp (`int`): number of components in transport model. Default is 1.
Note:
this is the companion function to `gw_utils.apply_sft_obs()`.
Returns:
**pandas.DataFrame**: a dataframe with observation names and values for the sft simulated
concentrations.
"""
df = pd.read_csv(sft_file,skiprows=1,delim_whitespace=True)
df.columns = [c.lower().replace("-","_") for c in df.columns]
if times is None:
times = df.time.unique()
missing = []
utimes = df.time.unique()
for t in times:
if t not in utimes:
missing.append(str(t))
if len(missing) > 0:
print(df.time)
raise Exception("the following times are missing:{0}".format(','.join(missing)))
with open("sft_obs.config",'w') as f:
f.write(sft_file+'\n')
[f.write("{0:15.6E}\n".format(t)) for t in times]
df = apply_sft_obs()
utimes = df.time.unique()
for t in times:
assert t in utimes,"time {0} missing in processed dataframe".format(t)
idx = df.time.apply(lambda x: x in times)
if start_datetime is not None:
start_datetime = pd.to_datetime(start_datetime)
df.loc[:,"time_str"] = pd.to_timedelta(df.time,unit='d') + start_datetime
df.loc[:,"time_str"] = df.time_str.apply(lambda x: datetime.strftime(x,"%Y%m%d"))
else:
df.loc[:,"time_str"] = df.time.apply(lambda x: "{0:08.2f}".format(x))
df.loc[:,"ins_str"] = "l1\n"
# check for multiple components
df_times = df.loc[idx,:]
df.loc[:,"icomp"] = 1
icomp_idx = list(df.columns).index("icomp")
for t in times:
df_time = df.loc[df.time==t,:]
vc = df_time.sfr_node.value_counts()
ncomp = vc.max()
assert np.all(vc.values==ncomp)
nstrm = df_time.shape[0] / ncomp
for icomp in range(ncomp):
s = int(nstrm*(icomp))
e = int(nstrm*(icomp+1))
idxs = df_time.iloc[s:e,:].index
#df_time.iloc[nstrm*(icomp):nstrm*(icomp+1),icomp_idx.loc["icomp"] = int(icomp+1)
df_time.loc[idxs,"icomp"] = int(icomp+1)
#df.loc[df_time.index,"ins_str"] = df_time.apply(lambda x: "l1 w w !sfrc{0}_{1}_{2}! !swgw{0}_{1}_{2}! !gwcn{0}_{1}_{2}!\n".\
# format(x.sfr_node,x.icomp,x.time_str),axis=1)
df.loc[df_time.index, "ins_str"] = df_time.apply(
lambda x: "l1 w w !sfrc{0}_{1}_{2}!\n".format(x.sfr_node, x.icomp, x.time_str), axis=1)
df.index = np.arange(df.shape[0])
if ins_file is None:
ins_file = sft_file+".processed.ins"
with open(ins_file,'w') as f:
f.write("pif ~\nl1\n")
[f.write(i) for i in df.ins_str]
#df = try_process_ins_file(ins_file,sft_file+".processed")
df = try_process_output_file(ins_file,sft_file+".processed")
return df
def apply_sft_obs():
"""process an mt3d-usgs sft ASCII output file using a previous-written
config file
Returns:
**pandas.DataFrame**: a dataframe of extracted simulated outputs
Note:
this is the companion function to `gw_utils.setup_sft_obs()`.
"""
# this is for dealing with the missing 'e' problem
def try_cast(x):
try:
return float(x)
except:
return 0.0
times = []
with open("sft_obs.config") as f:
sft_file = f.readline().strip()
for line in f:
times.append(float(line.strip()))
df = | pd.read_csv(sft_file,skiprows=1,delim_whitespace=True) | pandas.read_csv |
# -*- coding: utf-8 -*-
from pmdarima.arima import ARIMA, auto_arima, AutoARIMA
from pmdarima.arima.arima import VALID_SCORING, _uses_legacy_pickling
from pmdarima.arima.auto import _post_ppc_arima
from pmdarima.arima.utils import nsdiffs
from pmdarima.arima.warnings import ModelFitWarning
from pmdarima.compat.pytest import pytest_error_str
from pmdarima.datasets import load_lynx, load_wineind, load_heartrate, \
load_austres
from pmdarima.utils import get_callable
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_almost_equal
from numpy.random import RandomState
import joblib
from statsmodels import api as sm
import pandas as pd
import pickle
import pytest
import time
import os
from os.path import abspath, dirname
# initialize the random state
rs = RandomState(42)
y = rs.rand(25)
# more interesting heart rate data (asserts we can use a series)
hr = load_heartrate(as_series=True)
# > set.seed(123)
# > abc <- rnorm(50, 5, 1)
abc = np.array([4.439524, 4.769823, 6.558708, 5.070508,
5.129288, 6.715065, 5.460916, 3.734939,
4.313147, 4.554338, 6.224082, 5.359814,
5.400771, 5.110683, 4.444159, 6.786913,
5.497850, 3.033383, 5.701356, 4.527209,
3.932176, 4.782025, 3.973996, 4.271109,
4.374961, 3.313307, 5.837787, 5.153373,
3.861863, 6.253815, 5.426464, 4.704929,
5.895126, 5.878133, 5.821581, 5.688640,
5.553918, 4.938088, 4.694037, 4.619529,
4.305293, 4.792083, 3.734604, 7.168956,
6.207962, 3.876891, 4.597115, 4.533345,
5.779965, 4.916631])
austres = load_austres()
wineind = load_wineind()
lynx = load_lynx()
# Yes, m is ACTUALLY 12... but that takes a LONG time. If we set it to
# 1, we actually get a much, much faster model fit. We can only use this
# if we're NOT testing the output of the model, but just the functionality!
wineind_m = 1
# A random xreg for the wineind array
wineind_xreg = rs.rand(wineind.shape[0], 2)
def _unlink_if_exists(path):
if os.path.exists(path):
os.unlink(path)
def test_basic_arma():
arma = ARIMA(order=(0, 0, 0), suppress_warnings=True)
preds = arma.fit_predict(y) # fit/predict for coverage
# No OOB, so assert none
assert arma.oob_preds_ is None
# test some of the attrs
assert_almost_equal(arma.aic(), 11.201, decimal=3) # equivalent in R
# intercept is param 0
intercept = arma.params()[0]
assert_almost_equal(intercept, 0.441, decimal=3) # equivalent in R
assert_almost_equal(arma.aicc(), 11.74676, decimal=5)
assert_almost_equal(arma.bic(), 13.639060053303311, decimal=5)
# get predictions
expected_preds = np.array([0.44079876, 0.44079876, 0.44079876,
0.44079876, 0.44079876, 0.44079876,
0.44079876, 0.44079876, 0.44079876,
0.44079876])
# generate predictions
assert_array_almost_equal(preds, expected_preds)
# Make sure we can get confidence intervals
expected_intervals = np.array([
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139]
])
_, intervals = arma.predict(n_periods=10, return_conf_int=True,
alpha=0.05)
assert_array_almost_equal(intervals, expected_intervals)
@pytest.mark.parametrize(
# will be m - d
'model', [
ARIMA(order=(2, 0, 0)), # arma
ARIMA(order=(2, 1, 0)), # arima
ARIMA(order=(2, 1, 0), seasonal_order=(1, 0, 0, 12)), # sarimax
]
)
def test_predict_in_sample_conf_int(model):
model.fit(wineind)
expected_m_dim = wineind.shape[0]
preds, confints = model.predict_in_sample(return_conf_int=True, alpha=0.05)
assert preds.shape[0] == expected_m_dim
assert confints.shape == (expected_m_dim, 2)
@pytest.mark.parametrize(
'model', [
ARIMA(order=(2, 0, 0)), # arma
ARIMA(order=(2, 1, 0)), # arima
ARIMA(order=(2, 1, 0), seasonal_order=(1, 0, 0, 12)), # sarimax
]
)
@pytest.mark.parametrize('exog', [None, rs.rand(wineind.shape[0], 2)])
@pytest.mark.parametrize('confints', [True, False])
def test_predict_in_sample_exog(model, exog, confints):
model.fit(wineind, exogenous=exog)
res = model.predict_in_sample(exog, return_conf_int=confints)
if confints:
assert isinstance(res, tuple) and len(res) == 2
else:
assert isinstance(res, np.ndarray)
@pytest.mark.parametrize('as_pd', [True, False])
def test_with_oob_and_exog(as_pd):
endog = hr
exog = np.random.RandomState(1).rand(hr.shape[0], 3)
if as_pd:
exog = pd.DataFrame.from_records(exog)
endog = | pd.Series(hr) | pandas.Series |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import decimal
import datetime
import pandas as pd
from scipy.optimize import fsolve
from django.http import HttpResponse
from django.shortcuts import render
from .models import Currency, Category, Bank, Account, AccountCategory, AccountRec, Risk, InvProj, InvRec
from . import tables
def proj_stat(request, projid):
proj = InvProj.objects.get(id=int(projid))
tab = tables.InvRecTable(proj.invrec_set.all(), request=request)
env = {
'proj': proj,
'table': tab,
}
return render(request, 'inv/proj_stat.html', env)
def add_vectory(a, b):
return [i+j for i, j in zip(a, b)]
def sub_vectory(a, b):
return [i-j for i, j in zip(a, b)]
def div_vectory(a, b):
return [i/j for i, j in zip(a, b) if j != 0]
def balance_sheet(request):
curs = Currency.objects.all()
sheet = {}
for cat in Category.objects.all():
values = cat.values_by_currency()
l = []
total = 0
for cur in curs:
l.append(values.get(cur.name, decimal.Decimal()))
total += values.get(cur.name, decimal.Decimal()) * cur.rate
l.append(total)
sheet.setdefault(cat.cat, [])
sheet[cat.cat].append((cat, l))
assets = [decimal.Decimal(),]*(len(curs)+1)
liabilities = [decimal.Decimal(),]*(len(curs)+1)
for i in range(1, 6):
sums = [decimal.Decimal(),]*(len(curs)+1)
for name, values in sheet[i]:
sums = add_vectory(sums, values)
if i == 1:
current_asset = sums[:]
if i == 2:
current_liabilities = sums[:]
if i in {1, 3, 5}:
assets = add_vectory(assets, sums)
else:
liabilities = add_vectory(liabilities, sums)
sheet[i].append(({'name': '小记'}, sums))
liquidity_list = div_vectory(map(float, current_asset),
map(float, current_liabilities))
if liquidity_list:
liquidity_ratio = min(liquidity_list)
else:
liquidity_ratio = -1
debt_asset_ratio = max(div_vectory(map(float, liabilities),
map(float, assets)))
env = {
'sheet': sheet,
'curs': curs,
'assets': assets,
'liabilities': liabilities,
'equity': list(sub_vectory(assets, liabilities)),
'liquidity_ratio': liquidity_ratio,
'debt_asset_ratio': 100*debt_asset_ratio,
}
return render(request, 'inv/balance_sheet.html', env)
def income_outgoing_sheet(request):
lastyear = datetime.date.today()-datetime.timedelta(days=365)
td = datetime.date.today()
income = []
for cat in AccountCategory.objects.filter(cat=1).all():
num = sum((rec.value for rec in cat.accountrec_set.filter(date__gte=lastyear).all()))
if num:
income.append((cat.name, num))
s_income = sum((n for c, n in income))
income.append(('小计', s_income))
outgoing = []
for cat in AccountCategory.objects.filter(cat=2).all():
num = sum((rec.value for rec in cat.accountrec_set.filter(date__gte=lastyear).all()))
if num:
outgoing.append((cat.name, num))
s_outgoing = sum((n for c, n in outgoing))
outgoing.append(('小计', s_outgoing))
investments = []
iotab = []
for cat in Category.objects.filter(cat=5).all():
num = 0
for proj in cat.invproj_set.filter(isopen=False, end__gte=lastyear).all():
num -= (proj.value*proj.acct.currency.rate).quantize(decimal.Decimal('1.00'))
iotab.extend(proj.calc_iotab(td, True))
if num:
investments.append((cat.name, num))
s_investments = sum((n for c, n in investments))
investments.append(('小计', s_investments))
def f(r):
return sum((value*r**dur for dur, value in iotab))
r = fsolve(f, 1.01)[0]
env = {
'income': income,
'outgoing': outgoing,
'investments': investments,
'total_income': s_income+s_investments,
'total_outgoing': s_outgoing,
'net_income': s_income+s_investments-s_outgoing,
'saving_rate': 100*(s_income+s_investments-s_outgoing)/(s_income+s_investments),
'invest_income_rate': 100*s_investments/(s_income+s_investments),
'invest_outgoing_rate': 100*s_investments/s_outgoing,
'invest_rate': 365*100*(r-1),
}
return render(request, 'inv/ios.html', env)
def income_details(request):
df = pd.DataFrame()
for cat in AccountCategory.objects.filter(cat=1).all():
s = pd.Series(name=cat.name)
for rec in cat.accountrec_set.all():
dt = rec.date.replace(day=1)
if dt not in s:
s[dt] = rec.value
else:
s[dt] += rec.value
if s.count():
df = df.join(s, how='outer')
for cat in Category.objects.filter(cat=5).all():
s = | pd.Series(name=cat.name) | pandas.Series |
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import KFold, StratifiedKFold
import pandas as pd
import numpy as np
class BaggingRegressor():
def __init__(self, regressors, seeds = [2022], n_fold=5):
self.regressors = regressors
self.n_regressors = 1 if type(self.regressors) != list else len(self.regressors)
self.fitted_regressors = []
self.seeds = seeds
self.n_seeds = len(self.seeds)
self.n_fold = n_fold
self.folds = None
def fit(self, X, y):
for idx, cur_regressor in enumerate(self.regressors):
cur_fitted_regressors = []
for seed in self.seeds:
self.folds = KFold(n_splits=self.n_fold, shuffle=True, random_state=seed)
for fold_n, (train_index, valid_index) in enumerate(self.folds.split(X, y)):
clf = cur_regressor.fit(X.loc[train_index], y.loc[train_index],
eval_set=[(X.loc[valid_index], y.loc[valid_index])],
early_stopping_rounds = 50,
verbose = 0)
cur_fitted_regressors.append(clf)
self.fitted_regressors.append(cur_fitted_regressors)
print('Training Done')
def predict(self, X, regressor_weights = []):
predict_test = pd.DataFrame()
if np.sum(regressor_weights) != 1:
regressor_weights = np.ones(self.n_regressors) / self.n_regressors
for idx, cur_fitted_regressors in enumerate(self.fitted_regressors):
for i, cur_fitted_regressor in enumerate(cur_fitted_regressors):
if i == 0:
pred = cur_fitted_regressor.predict(X) / float(self.n_fold) / float(self.n_seeds)
else:
pred += cur_fitted_regressor.predict(X) / float(self.n_fold) / float(self.n_seeds)
predict_test['model_%d_predict' % (idx)] = pred * regressor_weights[idx]
self.result = predict_test.sum(axis = 1)
print('Prediction Done')
return self.result
class BaggingClassifier():
def __init__(self, classifiers, seeds = [2022], n_fold=5):
self.classifiers = classifiers
self.n_classifiers = 1 if type(self.classifiers) != list else len(self.classifiers)
self.fitted_classifiers = []
self.seeds = seeds
self.n_seeds = len(self.seeds)
self.n_fold = n_fold
self.folds = None
def fit(self, X, y, custom_metric_list = []):
for idx, cur_classifier in enumerate(self.classifiers):
cur_fitted_classifiers = []
if idx < len(custom_metric_list):
cur_metric = custom_metric_list[idx]
else:
cur_metric = 'auc'
for seed in self.seeds:
self.folds = StratifiedKFold(n_splits=self.n_fold, shuffle=True, random_state=seed)
for fold_n, (train_index, valid_index) in enumerate(self.folds.split(X, y)):
clf = cur_classifier.fit(X.loc[train_index], y.loc[train_index],
eval_set=[(X.loc[valid_index], y.loc[valid_index])],
eval_metric = cur_metric,
early_stopping_rounds = 50,
verbose = 0)
cur_fitted_classifiers.append(clf)
self.fitted_classifiers.append(cur_fitted_classifiers)
print('Training Done')
def predict_proba(self, X, classifier_weights = []):
predict_proba_test = | pd.DataFrame() | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from tasrif.processing_pipeline.pandas import RenameOperator
from tasrif.processing_pipeline.custom import SimulateDayOperator, AggregateOperator
# -
dates = pd.date_range("2016-12-31", "2020-01-03", freq="15T").to_series()
df = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# # Interrogating building age distributions
#
# This notebook is to explore the distribution of building ages in
# communities in Western Australia.
from os.path import join as pjoin
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import re
import seaborn as sns
sns.set_context("poster")
sns.set_style('darkgrid')
# Apply GA colour palette
palette = sns.blend_palette(["#5E6A71", "#006983", "#72C7E7",
"#A33F1F", "#CA7700", "#A5D867",
"#6E7645"], 7)
# The source file `WA_Residential_Wind_Exposure_2018_TCRM.CSV` can be
# found in HPRM D2018-6256. Download a local version (by using the
# 'Supercopy' option when right-clicking on the record), and change
# the path to the appropriate folder.
inputFile = "C:/WorkSpace/data/derived/exposure/WA/WA_TILES_Residential_Wind_Exposure.csv"
df = pd.read_csv(inputFile)
output_path = "C:/Workspace/data/derived/exposure/WA/"
SA2_names = sorted(list(pd.unique(df['SA2_NAME'])))
ages = sorted(list(pd.unique(df['YEAR_BUILT'])))
print(ages)
def plotAgeDist(df, locality):
fig = plt.figure()
ax = fig.add_subplot(111)
locdf = df[df['SA2_NAME'] == locality]
sns.countplot(x="YEAR_BUILT", data=locdf, order=ages, ax=ax,
palette=palette)
ax.set_xlabel("Year built")
ax.set_ylabel("Number")
plt.setp(ax.get_xticklabels(), rotation=90)
ax.set_title("{0} - {1:,} residential buildings".format(locality, len(locdf.index)))
fig.tight_layout()
fig.savefig(pjoin(output_path, "AgeProfile", "SA2",
"{0}.png".format(locality)))
plt.clf()
plt.close('all')
# There's two aspects to the age distribution - communities where
# there has been substantial growth since the last significant
# cyclone, and communities with a large proportion of older (pre-1980)
# era construction.
# TODO:
# 1. Add a chart that ranks the localities by proportion of a
# selected age group. The list of age groups is already compiled
# (`ages`), just need to do the calculations to get proportions for
# the specified age group.
# 2. Add another figure that plots the
# predominant age group for each suburb in the locality. If there's a
# spatial layer of the boundaries for `SUBURB_2015`, then one could
# plot up a categorised map of the suburbs based on predominant age
# group.
# In[26]:
def plotBySuburb(df, locality):
fig = plt.figure()
ax = fig.add_subplot(111)
locdf = df[df['SA2_NAME'] == locality]
suburblist = locdf[locdf['SUBURB'].notnull()]['SUBURB']
suburbs = sorted(list( | pd.unique(suburblist) | pandas.unique |
from collections import OrderedDict
import pydoc
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Series,
TimedeltaIndex,
date_range,
period_range,
timedelta_range,
)
from pandas.core.arrays import PeriodArray
from pandas.core.indexes.datetimes import Timestamp
import pandas.util.testing as tm
import pandas.io.formats.printing as printing
class TestSeriesMisc:
def test_scalarop_preserve_name(self, datetime_series):
result = datetime_series * 2
assert result.name == datetime_series.name
def test_copy_name(self, datetime_series):
result = datetime_series.copy()
assert result.name == datetime_series.name
def test_copy_index_name_checking(self, datetime_series):
# don't want to be able to modify the index stored elsewhere after
# making a copy
datetime_series.index.name = None
assert datetime_series.index.name is None
assert datetime_series is datetime_series
cp = datetime_series.copy()
cp.index.name = "foo"
printing.pprint_thing(datetime_series.index.name)
assert datetime_series.index.name is None
def test_append_preserve_name(self, datetime_series):
result = datetime_series[:5].append(datetime_series[5:])
assert result.name == datetime_series.name
def test_binop_maybe_preserve_name(self, datetime_series):
# names match, preserve
result = datetime_series * datetime_series
assert result.name == datetime_series.name
result = datetime_series.mul(datetime_series)
assert result.name == datetime_series.name
result = datetime_series * datetime_series[:-2]
assert result.name == datetime_series.name
# names don't match, don't preserve
cp = datetime_series.copy()
cp.name = "something else"
result = datetime_series + cp
assert result.name is None
result = datetime_series.add(cp)
assert result.name is None
ops = ["add", "sub", "mul", "div", "truediv", "floordiv", "mod", "pow"]
ops = ops + ["r" + op for op in ops]
for op in ops:
# names match, preserve
s = datetime_series.copy()
result = getattr(s, op)(s)
assert result.name == datetime_series.name
# names don't match, don't preserve
cp = datetime_series.copy()
cp.name = "changed"
result = getattr(s, op)(cp)
assert result.name is None
def test_combine_first_name(self, datetime_series):
result = datetime_series.combine_first(datetime_series[:5])
assert result.name == datetime_series.name
def test_getitem_preserve_name(self, datetime_series):
result = datetime_series[datetime_series > 0]
assert result.name == datetime_series.name
result = datetime_series[[0, 2, 4]]
assert result.name == datetime_series.name
result = datetime_series[5:10]
assert result.name == datetime_series.name
def test_pickle_datetimes(self, datetime_series):
unp_ts = self._pickle_roundtrip(datetime_series)
tm.assert_series_equal(unp_ts, datetime_series)
def test_pickle_strings(self, string_series):
unp_series = self._pickle_roundtrip(string_series)
tm.assert_series_equal(unp_series, string_series)
def _pickle_roundtrip(self, obj):
with tm.ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self, datetime_series):
result = datetime_series.argsort()
assert result.name == datetime_series.name
def test_sort_index_name(self, datetime_series):
result = datetime_series.sort_index(ascending=False)
assert result.name == datetime_series.name
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d)
expected = Series(d, index=sorted(d.keys()))
tm.assert_series_equal(result, expected)
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = Series(data)
expected = Series(dict(data.items()))
tm.assert_series_equal(series, expected)
def test_constructor_ordereddict(self):
# GH3283
data = OrderedDict(
("col{i}".format(i=i), np.random.random()) for i in range(12)
)
series = Series(data)
expected = Series(list(data.values()), list(data.keys()))
tm.assert_series_equal(series, expected)
# Test with subclass
class A(OrderedDict):
pass
series = Series(A(data))
tm.assert_series_equal(series, expected)
def test_constructor_dict_multiindex(self):
d = {("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0}
_d = sorted(d.items())
result = Series(d)
expected = Series(
[x[1] for x in _d], index=pd.MultiIndex.from_tuples([x[0] for x in _d])
)
tm.assert_series_equal(result, expected)
d["z"] = 111.0
_d.insert(0, ("z", d["z"]))
result = Series(d)
expected = Series(
[x[1] for x in _d], index=pd.Index([x[0] for x in _d], tupleize_cols=False)
)
result = result.reindex(index=expected.index)
tm.assert_series_equal(result, expected)
def test_constructor_dict_timedelta_index(self):
# GH #12169 : Resample category data with timedelta index
# construct Series from dict as data and TimedeltaIndex as index
# will result NaN in result Series data
expected = Series(
data=["A", "B", "C"], index=pd.to_timedelta([0, 10, 20], unit="s")
)
result = Series(
data={
pd.to_timedelta(0, unit="s"): "A",
pd.to_timedelta(10, unit="s"): "B",
pd.to_timedelta(20, unit="s"): "C",
},
index=pd.to_timedelta([0, 10, 20], unit="s"),
)
tm.assert_series_equal(result, expected)
def test_sparse_accessor_updates_on_inplace(self):
s = pd.Series([1, 1, 2, 3], dtype="Sparse[int]")
s.drop([0, 1], inplace=True)
assert s.sparse.density == 1.0
def test_tab_completion(self):
# GH 9910
s = Series(list("abcd"))
# Series of str values should have .str but not .dt/.cat in __dir__
assert "str" in dir(s)
assert "dt" not in dir(s)
assert "cat" not in dir(s)
# similarly for .dt
s = Series(date_range("1/1/2015", periods=5))
assert "dt" in dir(s)
assert "str" not in dir(s)
assert "cat" not in dir(s)
# Similarly for .cat, but with the twist that str and dt should be
# there if the categories are of that type first cat and str.
s = Series(list("abbcd"), dtype="category")
assert "cat" in dir(s)
assert "str" in dir(s) # as it is a string categorical
assert "dt" not in dir(s)
# similar to cat and str
s = Series(date_range("1/1/2015", periods=5)).astype("category")
assert "cat" in dir(s)
assert "str" not in dir(s)
assert "dt" in dir(s) # as it is a datetime categorical
def test_tab_completion_with_categorical(self):
# test the tab completion display
ok_for_cat = [
"categories",
"codes",
"ordered",
"set_categories",
"add_categories",
"remove_categories",
"rename_categories",
"reorder_categories",
"remove_unused_categories",
"as_ordered",
"as_unordered",
]
def get_dir(s):
results = [r for r in s.cat.__dir__() if not r.startswith("_")]
return sorted(set(results))
s = Series(list("aabbcde")).astype("category")
results = get_dir(s)
tm.assert_almost_equal(results, sorted(set(ok_for_cat)))
@pytest.mark.parametrize(
"index",
[
tm.makeUnicodeIndex(10),
tm.makeStringIndex(10),
tm.makeCategoricalIndex(10),
Index(["foo", "bar", "baz"] * 2),
tm.makeDateIndex(10),
tm.makePeriodIndex(10),
tm.makeTimedeltaIndex(10),
tm.makeIntIndex(10),
tm.makeUIntIndex(10),
tm.makeIntIndex(10),
tm.makeFloatIndex(10),
Index([True, False]),
Index(["a{}".format(i) for i in range(101)]),
pd.MultiIndex.from_tuples(zip("ABCD", "EFGH")),
pd.MultiIndex.from_tuples(zip([0, 1, 2, 3], "EFGH")),
],
)
def test_index_tab_completion(self, index):
# dir contains string-like values of the Index.
s = pd.Series(index=index)
dir_s = dir(s)
for i, x in enumerate(s.index.unique(level=0)):
if i < 100:
assert not isinstance(x, str) or not x.isidentifier() or x in dir_s
else:
assert x not in dir_s
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
msg = "'Series' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(s_empty)
with pytest.raises(TypeError, match=msg):
hash(s)
def test_contains(self, datetime_series):
tm.assert_contains_all(datetime_series.index, datetime_series)
def test_iter_datetimes(self, datetime_series):
for i, val in enumerate(datetime_series):
assert val == datetime_series[i]
def test_iter_strings(self, string_series):
for i, val in enumerate(string_series):
assert val == string_series[i]
def test_keys(self, datetime_series):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = datetime_series.keys
assert getkeys() is datetime_series.index
def test_values(self, datetime_series):
tm.assert_almost_equal(
datetime_series.values, datetime_series, check_dtype=False
)
def test_iteritems_datetimes(self, datetime_series):
for idx, val in datetime_series.iteritems():
assert val == datetime_series[idx]
def test_iteritems_strings(self, string_series):
for idx, val in string_series.iteritems():
assert val == string_series[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(string_series.iteritems(), "reverse")
def test_items_datetimes(self, datetime_series):
for idx, val in datetime_series.items():
assert val == datetime_series[idx]
def test_items_strings(self, string_series):
for idx, val in string_series.items():
assert val == string_series[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(string_series.items(), "reverse")
def test_raise_on_info(self):
s = Series(np.random.randn(10))
msg = "'Series' object has no attribute 'info'"
with pytest.raises(AttributeError, match=msg):
s.info()
def test_copy(self):
for deep in [None, False, True]:
s = Series(np.arange(10), dtype="float64")
# default deep is True
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[::2] = np.NaN
if deep is None or deep is True:
# Did not modify original Series
assert np.isnan(s2[0])
assert not np.isnan(s[0])
else:
# we DID modify the original Series
assert np.isnan(s2[0])
assert np.isnan(s[0])
def test_copy_tzaware(self):
# GH#11794
# copy of tz-aware
expected = Series([Timestamp("2012/01/01", tz="UTC")])
expected2 = Series([Timestamp("1999/01/01", tz="UTC")])
for deep in [None, False, True]:
s = Series([Timestamp("2012/01/01", tz="UTC")])
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[0] = pd.Timestamp("1999/01/01", tz="UTC")
# default deep is True
if deep is None or deep is True:
# Did not modify original Series
tm.assert_series_equal(s2, expected2)
tm.assert_series_equal(s, expected)
else:
# we DID modify the original Series
tm.assert_series_equal(s2, expected2)
tm.assert_series_equal(s, expected2)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
tm.assert_series_equal(s.dropna(axis="rows"), s.dropna(axis="index"))
assert s.dropna().sum("rows") == 3
assert s._get_axis_number("rows") == 0
assert s._get_axis_name("rows") == "index"
def test_class_axis(self):
# https://github.com/pandas-dev/pandas/issues/18147
# no exception and no empty docstring
assert pydoc.getdoc(Series.index)
def test_numpy_unique(self, datetime_series):
# it works!
np.unique(datetime_series)
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
tsdf = DataFrame(
np.random.randn(1000, 3),
columns=["A", "B", "C"],
index=date_range("1/1/2000", periods=1000),
)
def f(x):
return x[x.idxmax()]
result = tsdf.apply(f)
expected = tsdf.max()
tm.assert_series_equal(result, expected)
# .item()
with tm.assert_produces_warning(FutureWarning):
s = Series([1])
result = s.item()
assert result == 1
assert s.item() == s.iloc[0]
# using an ndarray like function
s = Series(np.random.randn(10))
result = Series(np.ones_like(s))
expected = Series(1, index=range(10), dtype="float64")
tm.assert_series_equal(result, expected)
# ravel
s = Series(np.random.randn(10))
tm.assert_almost_equal(s.ravel(order="F"), s.values.ravel(order="F"))
# compress
# GH 6658
s = Series([0, 1.0, -1], index=list("abc"))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s > 0, s)
tm.assert_series_equal(result, Series([1.0], index=["b"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s < -1, s)
# result empty Index(dtype=object) as the same as original
exp = Series([], dtype="float64", index=Index([], dtype="object"))
tm.assert_series_equal(result, exp)
s = Series([0, 1.0, -1], index=[0.1, 0.2, 0.3])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s > 0, s)
tm.assert_series_equal(result, Series([1.0], index=[0.2]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s < -1, s)
# result empty Float64Index as the same as original
exp = Series([], dtype="float64", index=Index([], dtype="float64"))
tm.assert_series_equal(result, exp)
def test_str_accessor_updates_on_inplace(self):
s = pd.Series(list("abc"))
s.drop([0], inplace=True)
assert len(s.str.lower()) == 2
def test_str_attribute(self):
# GH9068
methods = ["strip", "rstrip", "lstrip"]
s = Series([" jack", "jill ", " jesse ", "frank"])
for method in methods:
expected = Series([getattr(str, method)(x) for x in s.values])
tm.assert_series_equal(getattr(Series.str, method)(s.str), expected)
# str accessor only valid with string values
s = Series(range(5))
with pytest.raises(AttributeError, match="only use .str accessor"):
s.str.repeat(2)
def test_empty_method(self):
s_empty = pd.Series()
assert s_empty.empty
for full_series in [pd.Series([1]), pd.Series(index=[1])]:
assert not full_series.empty
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; s = pd.Series()"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("s.", 1))
def test_integer_series_size(self):
# GH 25580
s = Series(range(9))
assert s.size == 9
s = Series(range(9), dtype="Int64")
assert s.size == 9
class TestCategoricalSeries:
@pytest.mark.parametrize(
"method",
[
lambda x: x.cat.set_categories([1, 2, 3]),
lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),
lambda x: x.cat.rename_categories([1, 2, 3]),
lambda x: x.cat.remove_unused_categories(),
lambda x: x.cat.remove_categories([2]),
lambda x: x.cat.add_categories([4]),
lambda x: x.cat.as_ordered(),
lambda x: x.cat.as_unordered(),
],
)
def test_getname_categorical_accessor(self, method):
# GH 17509
s = Series([1, 2, 3], name="A").astype("category")
expected = "A"
result = method(s).name
assert result == expected
def test_cat_accessor(self):
s = Series( | Categorical(["a", "b", np.nan, "a"]) | pandas.Categorical |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from .mean_action import get_mean_action
from .drawer.berthing_trajectory_drawer import BerthingTrajectoryDrawer
def test(norm_init_coords, init_heading_angle, env, model, deterministic=False, use_recurrent_model=False):
env.reset()
LBP = env.get_attr("Berthing_env")[0].L
init_coords = np.array(norm_init_coords) * LBP
env.reset()
obs = env.env_method("deterministic_reset", init_coords, init_heading_angle)
action_option = "mean" # single | mean ('mean' performs better)
n_steps = env.get_attr("Berthing_env")[0].simulationDuration
state_hist, reward_hist = [], []
#state_hist.append([env.get_attr("coord_x")[0], env.get_attr("coord_y")[0], env.get_attr("heading_angle")[0]])
state = None # initial states for LSTM
done = [False for _ in range(env.num_envs)] # When using VecEnv, done is a vector
for i in range(n_steps):
if action_option == "single":
action = model.predict(obs, deterministic=deterministic)[0]
obs, reward, done, _ = env.step(action)
elif action_option == "mean":
if use_recurrent_model:
action, state = model.predict(obs, state=state, mask=done)
else:
action = model.predict(obs, deterministic=deterministic)[0]
mean_action = get_mean_action(action)
obs, reward, done, _ = env.step(mean_action)
if True in np.array([done]).flatten():
break
rudder_angle, n = env.get_attr("Berthing_env")[0].rudder_angle, env.get_attr("Berthing_env")[0].n
u, v, r = env.get_attr("Berthing_env")[0].u, env.get_attr("Berthing_env")[0].v, env.get_attr("Berthing_env")[0].r
current_state = [env.get_attr("coord_x")[0], env.get_attr("coord_y")[0], env.get_attr("heading_angle")[0], n, rudder_angle, u, v, r]
state_hist.append(current_state)
reward_hist.append(reward)
state_hist = np.array(state_hist)
# plot
pos_termination = env.get_attr("pos_termination")[0]
terminal_circle_radius = env.get_attr("termination_tolerance")[0]
drawer = BerthingTrajectoryDrawer(LBP, pos_termination, terminal_circle_radius, plot_lim=(-1, 20))
x_hist, y_hist, heading_angle_hist, n_hist, rudder_angle_hist, u_hist, v_hist, r_hist = [state_hist[:, i] for i in range(len(current_state))] # n [rps], rudder_angle [rad.]
state_hist = | pd.DataFrame(state_hist, columns=["x_hist", "y_hist", "heading_angle_hist", "n_hist", "rudder_angle_hist", "u_hist", "v_hist", "r_hist"]) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pandas.core.reshape.concat import concat
import streamlit as st
import os
from pathlib import Path
#Data Structures
class DataGroup :
def __init__(self, name, timepoint, dataframe):
self.name = name
self.timepoint = timepoint
self.dataframe = pd.DataFrame(data= dataframe)
#Function to rename datatype parsing to dataframe (e.g. Tumor Area, Vascular Leak, etc.)
def rename_dataframe(self, datatype, dataframe):
dataframe = self.dataframe
dataframe = dataframe.rename(datatype)
return dataframe
#Function to calculate mean raw tumor area in pixel unit
def calculateAvg(self, dataframe):
dataframe = self.dataframe
avg = dataframe.mean()
return avg
#Function to calculate standard deviation of tumor area in pixel unit
def calculateSD(self, dataframe):
dataframe = self.dataframe
std = dataframe.std()
return std
#Function to calculate SEM of tumor area in pixel unit
def calculateSEM(self, dataframe):
dataframe = self.dataframe
sem = dataframe.sem()
return sem
# Create a new class of DataGroup to store data
class DataSeries :
#Initiate a Data object contains a name, timepoint, and a PANDAS series that takes
# raw tumor area from CellProfiler output .csv file
def __init__(self, name, timepoint, df):
self.name = name
self.timepoint = timepoint
self.df = pd.Series(data = df)
self.df = self.df.rename('Value' + timepoint)
#Function to calculate mean raw tumor area in pixel unit
def calculateAvg(self, df):
df = self.df
avg = df.mean()
return avg
#Function to calculate standard deviation of tumor area in pixel unit
def calculateSD(self, df):
df = self.df
std = df.std()
return std
#Function to calculate SEM of tumor area in pixel unit
def calculateSEM(self, df):
df = self.df
sem = df.sem()
return sem
#Main ReadMe
def ReadMe() :
st.markdown("""**Read Me:**
Aracari Biosciences provides a collection of web apps to assist with processing raw data
from different pipelines (e.g. CellProfiler, ImageJ/FIJI) into tabulated dataframes
for *post hoc* statistical analysis and data visualization. Select the type of analysis
and follow the instructions to process your data.
""")
#Tumor Growth Analysis
def TumorGrowth() :
#ReadMe for Tumor Analysis App
def TumorReadMe() :
st.markdown("""**Instructions:**
1. Type name of condition to be analyzed. (e.g. _Control_ or _Bevacizumab-100ng/mL_)
2. Indicate numbers of time points to be analyzed (e.g. 5 timepoint for 0h to 96h).
3. Type name of timepoint to be analyzed and upload its CellProfiler .csv file.
4. This app will automatically display a table summary of raw tumor area data and calculate __Mean/SD/SEM/N.__ In addition, it will also normalize raw data to T-0h and display __%Change/SD/SEM/N.__
5. Save your data tables to .csv files and export to your graphing app (e.g. Prism) or use Plot Data mode for quick data visualization.
6. Use your normalized summary .csv output files (i.e. condition_normalized summary.csv) in Plot Data mode. """)
#Extract raw data from CellProfiler pipeline and assemble into group according to conditions and timepoints
def ExtractTumorData() :
datalist = []
namelist = []
savefile_key = 'savefile_'
st.header('Extract Data')
st.write("""__Instructions:__
1. Indicate number of groups in this dataset, according to your keylist.
2. List name of groups in this dataset according to your keylist, separated each by a comma (e.g. 100_PFU_mL, 300_PFU_mL).
3. Type in _name_ of the timepoint associated with this dataset (e.g. 0h, 24h, 48h). This will be used to label dataframe later.
4. Upload your dataset _*_Image.csv_ file from CellProfiler pipeline.
5. Type in directory path to save raw .csv files after sorting by groups. (e.g. _/Users/a/Desktop/data/_)
""")
num_condition = st.text_input('Number of groups in this dataset, according to your keylist:')
name_input = st.text_input('List name of groups in this dataset according to your keylist, separated each by a comma (e.g. 100_PFU_mL, 300_PFU_mL):')
namelist = name_input.split(', ')
timepoint_input = st.text_input("Timepoint associated with this dataset:")
uploaded_file = st.file_uploader("Upload your CellProfiler *_Image.csv file:")
uploaded_dataframe = pd.read_csv(uploaded_file, header= 0, index_col= 'Metadata_Key_Group')
save_dir = st.text_input('Type directory to save raw data .csv files sorted by groups:')
st.write('___')
st.write("""## _Raw Data Sorted By Groups_ """)
for i in range(0,int(num_condition)) :
condition_dataframe = uploaded_dataframe.loc[namelist[i]]
dataObj = DataGroup(namelist[i], timepoint_input, condition_dataframe)
datalist.append(dataObj)
st.write(datalist[i].dataframe)
raw_csv = os.path.join(save_dir, namelist[i] + '_' + timepoint_input + '_raw_data.csv')
saved = st.button('Save ' + namelist[i] + '_' + timepoint_input + '_raw_data.csv file', key= savefile_key + str(i))
if saved:
datalist[i].dataframe.to_csv(raw_csv)
st.write('_Files saved_')
#Analyze tumor time course data
def AnalyzeTumorData() :
datapoint = []
filenamelist = []
#Collect condition name and number of timepoints
with st.container() :
group_name = st.sidebar.text_input('Name of condition:')
datapoint_input = st.sidebar.text_input('Number of timepoint(s) in time course:')
st.header('Analyze Data')
st.write("""__Instructions:__
1. Type in name your condition. (__Note:__ Avoid using special characters when naming. For example, replace "_10,000 RFU/mL_" with "_10K RFU(mL-1)_")
2. Indicate number of timepoint(s) in the time course experiment.
3. Type in _name_ of a timepoint (e.g. 0h, 24h, 48h). This will be used to label dataframe later.
4. Upload your _*_Image.csv_ file from CellProfiler pipeline for each time point.
5. Type in directory path to save raw or normalized .csv files. (e.g. _/Users/a/Desktop/data/_)
""")
#Define keys to keep track on timepoints and files uploaded
t = 't_'
u = 'upload_'
#Read and parse tumor area raw data into individual DataGroup objects
for i in range(0,int(datapoint_input)) :
time_point = st.text_input('Timepoint of dataset:', key= t + str(i))
uploaded_file = st.file_uploader("Select .csv file:", key= u + str(i))
raw_data = pd.read_csv(uploaded_file)
raw_TumorArea = raw_data['AreaOccupied_AreaOccupied_Identify_Tumor']
file_name = raw_data['FileName_Orig_Tumor']
dataObj = DataSeries(group_name, time_point, raw_TumorArea)
datapoint.append(dataObj)
filenamelist.append(file_name)
st.write('___ ')
#Display raw data table. Calculate mean, SD, SEM of tumor area of each timepoint
avg_dataObj = []
std_dataObj = []
sem_dataObj = []
group_id = []
n_row = []
raw_df = pd.DataFrame(data= None)
for i in range(0, int(datapoint_input)) :
avg_dataObj.append(datapoint[i].calculateAvg(raw_TumorArea))
std_dataObj.append(datapoint[i].calculateSD(raw_TumorArea))
sem_dataObj.append(datapoint[i].calculateSEM(raw_TumorArea))
group_id.append(datapoint[i].name + '_' + datapoint[i].timepoint)
n_row.append(datapoint[i].df.size)
raw_df.insert(loc=i, column= group_id[i], value= datapoint[i].df) #Construct raw tumor area dataframe
tabulated_df = raw_df.join(filenamelist[i]) #Join filenames to its corresponnding data
st.write("""### __Raw Data__ """)
st.write(tabulated_df)
st.write("""### __Mean, SD, SEM__ """)
#Tabulate mean, SD, SEM of all datapoints into new dataframe
df_summary = pd.DataFrame(data = [avg_dataObj, std_dataObj, sem_dataObj, n_row], index = ['Mean','SD','SEM', 'N'], columns= group_id)
st.write(df_summary)
with st.form('save_raw pixel_files') :
save_dir = st.text_input('Type directory to save raw dataframe and Mean/SD/SEM .csv files:')
tabulated_df_csv = os.path.join(save_dir, group_name + '_raw_dataframe.csv')
raw_summary_csv = os.path.join(save_dir, group_name + '_raw_summary.csv')
saved = st.form_submit_button('Save')
if saved:
tabulated_df.to_csv(tabulated_df_csv)
df_summary.to_csv(raw_summary_csv)
st.write('_Files saved_')
st.write('___ ')
#Normalize raw data to T-0h
normalized_df = pd.DataFrame(data=None)
avg_percent_change = []
normalized_std = []
normalized_sem = []
normalized_n_row = []
for i in range(0, int(datapoint_input)) :
normalized_df.insert(loc= i, column= group_id[i], value= raw_df[group_id[i]]/raw_df[group_id[0]])
tabulated_normalized_df = normalized_df.join(filenamelist[i])
normalized_n_row.append(normalized_df[group_id[i]].size)
avg_percent_change.append(normalized_df[group_id[i]].mean())
normalized_std.append(normalized_df[group_id[i]].std())
normalized_sem.append(normalized_df[group_id[i]].sem())
st.write("""### __Normalized Data__ """)
st.write(tabulated_normalized_df)
st.write("""### __%Change, SD, SEM__ """)
normalized_summary = | pd.DataFrame(data = [avg_percent_change, normalized_std, normalized_sem, normalized_n_row], index = ['%Change','SD','SEM', 'N'], columns= group_id) | pandas.DataFrame |
import argparse
import json
import os
import pandas as pd
import requests
def get_parser():
parser = argparse.ArgumentParser(description=__doc__)
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument('-i', "--infile", action='store',
help="""Path to .txt file containing accessions of experiments to process. The txt file must contain two columns with 1 header row, one labeled 'accession' and another labeled 'align_only'. It can optionally include 'custom_message' and 'custom_crop_length' columns.""")
parser.add_argument('-o', '--outputpath', action='store', default='',
help="""Optional path to output folder. Defaults to current path.""")
parser.add_argument('-g', '--gcpath', action='store', default='',
help="""Optional path where the input.json will be uploaded to the Google Cloud instance. Only affects the list of caper commands that is generated.""")
parser.add_argument('--wdl', action='store', default=False,
help="""Path to .wdl file.""")
parser.add_argument('-s', '--server', action='store', default='https://www.encodeproject.org',
help="""Optional specification of server using the full URL. Defaults to production server.""")
parser.add_argument('--use-s3-uris', action='store_true', default=False,
help="""Optional flag to use s3_uri links. Otherwise, defaults to using @@download links from the ENCODE portal.""")
input_group.add_argument("--accessions", action='store',
help="""List of accessions separated by commas.""")
parser.add_argument('--align-only', action='store', default=False,
help="""Pipeline will end after alignments step if True.""")
parser.add_argument('--custom-message', action='store',
help="""An additional custom string to be appended to the messages in the caper submit commands.""")
parser.add_argument('--caper-commands-file-message', action='store', default='',
help="""An additional custom string to be appended to the file name of the caper submit commands.""")
parser.add_argument('--custom-crop-length', action='store', default='',
help="""Custom value for the crop length.""")
parser.add_argument('--multiple-controls', action='store', default='',
help="""Pipeline will assume multiple controls should be used.""")
parser.add_argument('--force-se', action='store', default='',
help="""Pipeline will map as single-ended regardless of input fastqs.""")
parser.add_argument('--redacted', action='store', default='',
help="""Control experiment has redacted alignments.""")
return parser
def check_path_trailing_slash(path):
if path.endswith('/'):
return path.rstrip('/')
else:
return path
def build_experiment_report_query(experiment_list, server):
joined_list = '&accession='.join(experiment_list)
return server + '/report/?type=Experiment' + \
f'&accession={joined_list}' + \
'&field=@id' + \
'&field=accession' + \
'&field=assay_title' + \
'&field=control_type' + \
'&field=possible_controls' + \
'&field=replicates.antibody.targets' + \
'&field=files.s3_uri' + \
'&field=files.href' + \
'&field=replicates.library.biosample.organism.scientific_name' + \
'&limit=all' + \
'&format=json'
def build_file_report_query(experiment_list, server, file_format):
joined_list = '&dataset='.join(experiment_list)
if file_format == 'fastq':
format_parameter = '&file_format=fastq'
award_parameter = ''
output_type_parameter = '&output_type=reads'
elif file_format == 'bam':
format_parameter = '&file_format=bam'
award_parameter = '&award.rfa=ENCODE4'
output_type_parameter = '&output_type=alignments&output_type=redacted alignments'
return server + '/report/?type=File' + \
f'&dataset={joined_list}' + \
'&status=released' + \
'&status=in+progress' + \
award_parameter + \
'&assembly!=hg19' + \
'&assembly!=mm9' + \
format_parameter + \
output_type_parameter + \
'&field=@id' + \
'&field=dataset' + \
'&field=file_format' + \
'&field=biological_replicates' + \
'&field=paired_end' + \
'&field=paired_with' + \
'&field=run_type' + \
'&field=mapped_run_type' + \
'&field=read_length' + \
'&field=cropped_read_length' + \
'&field=cropped_read_length_tolerance' + \
'&field=status' + \
'&field=s3_uri' + \
'&field=href' + \
'&field=replicate.status' + \
'&limit=all' + \
'&format=json'
def parse_infile(infile):
try:
infile_df = pd.read_csv(infile, '\t')
infile_df['align_only'].astype('bool')
infile_df['multiple_controls'].astype('bool')
infile_df['force_se'].astype('bool')
return infile_df
except FileNotFoundError as e:
print(e)
exit()
except KeyError:
print('Missing required align_only column in input file.')
exit()
def strs2bool(strings):
out = []
for string in strings:
if string == "True":
out.append(True)
elif string == "False":
out.append(False)
return out
def get_data_from_portal(infile_df, server, keypair, link_prefix, link_src):
# Retrieve experiment report view json with necessary fields and store as DataFrame.
experiment_input_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import plotly.express as px
import datetime as dt
dd = | pd.read_csv('validation/lithuania_processed.csv', header=2) | pandas.read_csv |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-01-21")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 250.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in | pd.date_range("2015-01-20", "2015-02-09") | pandas.date_range |
# -*- coding: utf-8 -*-
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex, Categorical
from pandas.compat import StringIO
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
class DtypeTests(object):
def test_passing_dtype(self):
# see gh-6607
df = DataFrame(np.random.rand(5, 2).round(4), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# see gh-3795: passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
# for parsing, interpret object as str
result = self.read_csv(path, dtype=object, index_col=0)
tm.assert_frame_equal(result, expected)
# we expect all object columns, so need to
# convert to test for equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# see gh-12048: empty frame
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
assert result['one'].dtype == 'u1'
assert result['two'].dtype == 'object'
def test_categorical_dtype(self):
# GH 10153
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['a', 'a', 'b']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype=CategoricalDtype())
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'a': 'category',
'b': 'category',
'c': CategoricalDtype()})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'b': 'category'})
expected = pd.DataFrame({'a': [1, 1, 2],
'b': Categorical(['a', 'a', 'b']),
'c': [3.4, 3.4, 4.5]})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
# unsorted
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', 'b', 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
# missing
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', np.nan, 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
@pytest.mark.slow
def test_categorical_dtype_high_cardinality_numeric(self):
# GH 18186
data = np.sort([str(i) for i in range(524289)])
expected = DataFrame({'a': Categorical(data, ordered=True)})
actual = self.read_csv(StringIO('a\n' + '\n'.join(data)),
dtype='category')
actual["a"] = actual["a"].cat.reorder_categories(
np.sort(actual.a.cat.categories), ordered=True)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_encoding(self):
# GH 10153
pth = tm.get_data_path('unicode_series.csv')
encoding = 'latin-1'
expected = self.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = self.read_csv(pth, header=None, encoding=encoding,
dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
pth = tm.get_data_path('utf16_ex.txt')
encoding = 'utf-16'
expected = self.read_table(pth, encoding=encoding)
expected = expected.apply(Categorical)
actual = self.read_table(pth, encoding=encoding, dtype='category')
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'])}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'])},
index=[2, 3])]
actuals = self.read_csv(StringIO(data), dtype={'b': 'category'},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('categories', [
['a', 'b', 'c'],
['a', 'c', 'b'],
['a', 'b', 'c', 'd'],
['c', 'b', 'a'],
])
def test_categorical_categoricaldtype(self, categories, ordered):
data = """a,b
1,a
1,b
1,b
2,c"""
expected = pd.DataFrame({
"a": [1, 1, 1, 2],
"b": Categorical(['a', 'b', 'b', 'c'],
categories=categories,
ordered=ordered)
})
dtype = {"b": CategoricalDtype(categories=categories,
ordered=ordered)}
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_unsorted(self):
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(['c', 'b', 'a'])
expected = pd.DataFrame({
'a': [1, 1, 1, 2],
'b': Categorical(['a', 'b', 'b', 'c'], categories=['c', 'b', 'a'])
})
result = self.read_csv(StringIO(data), dtype={'b': dtype})
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_numeric(self):
dtype = {'b': CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = pd.DataFrame({'b': Categorical([1, 1, 2, 3])})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_datetime(self):
dtype = {
'b': CategoricalDtype(pd.date_range('2017', '2019', freq='AS'))
}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
dtype = {
'b': CategoricalDtype([pd.Timestamp("2014")])
}
data = "b\n2014-01-01\n2014-01-01T00:00:00"
expected = pd.DataFrame({'b': Categorical([pd.Timestamp('2014')] * 2)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_timedelta(self):
dtype = {'b': CategoricalDtype(pd.to_timedelta(['1H', '2H', '3H']))}
data = "b\n1H\n2H\n3H"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_unexpected_categories(self):
dtype = {'b': | CategoricalDtype(['a', 'b', 'd', 'e']) | pandas.core.dtypes.dtypes.CategoricalDtype |
def btrain(names,homepath):
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
#reading data and doing work
cresult=pd.DataFrame()
nresult=pd.DataFrame()
for index in range(len(names)):
Cancer = pd.read_csv(homepath+"/train_data/cancer/"+
names[index]+".txt.bz2",header=None, delimiter = "\t")
Normal = pd.read_csv(homepath+"/train_data/normal/"+
names[index]+".txt.bz2",header=None, delimiter = "\t")
Cancer= Cancer.T
Normal=Normal.T
frames1 = [Cancer, cresult]
cresult = pd.concat(frames1)
frames2 = [Normal, nresult]
nresult = pd.concat(frames2)
# merging all the cancer and normal data together and saving them
cresult.to_csv(r''+homepath+'/train_data/bin_Cancer.txt.bz2',
compression="bz2", sep='\t',header=None,index=None,index_label=None)
nresult.to_csv(r''+homepath+'/train_data/bin_Normal.txt.bz2',
compression="bz2", sep='\t',header=None,index=None,index_label=None)
def btest(names,homepath):
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
#reading data and doing work
cresult= | pd.DataFrame() | pandas.DataFrame |
import unittest
import pytest
import pandas as pd
from analitico.schema import generate_schema, apply_schema
from .test_mixin import TestMixin
# pylint: disable=no-member
@pytest.mark.django_db
class DatasetTests(unittest.TestCase, TestMixin):
""" Unit testing of Dataset functionality, reading, converting, transforms, saving, etc """
## Test creations
def test_dataset_csv1_basics(self):
""" Test empty dataset creation """
try:
ds = self.read_dataset_asset("ds_test_1.json")
self.assertEqual(ds.id, "ds_1")
df = ds.get_dataframe()
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertEqual(len(df), 3)
self.assertEqual(df.columns[0], "First")
self.assertEqual(df.columns[1], "Second")
self.assertEqual(df.columns[2], "Third")
self.assertEqual(df.iloc[0, 0], 10)
self.assertEqual(df.iloc[1, 1], 21)
self.assertEqual(df.iloc[2, 2], 32)
except Exception as exc:
raise exc
def test_dataset_csv2_types_default(self):
""" Test standard data type conversions """
try:
ds = self.read_dataset_asset("ds_test_2.json")
self.assertEqual(ds.id, "ds_2")
df = ds.get_dataframe()
self.assertEqual(df.dtypes[0], "int64")
self.assertEqual(df.dtypes[1], "O")
self.assertEqual(df.dtypes[2], "float64")
except Exception as exc:
raise exc
def test_dataset_csv3_types_cast_float(self):
""" Test forcing integers to be floating point instead """
try:
df = self.read_dataframe_asset("ds_test_3_cast_float.json")
# would normally be int, but was forced to float
self.assertEqual(df.dtypes[0], "float64")
self.assertEqual(df.dtypes[1], "O")
self.assertEqual(df.dtypes[2], "float64")
except Exception as exc:
raise exc
def test_dataset_csv3_types_cast_string(self):
""" Test forcing float column to string """
try:
df = self.read_dataframe_asset("ds_test_3_cast_string.json")
self.assertEqual(df.dtypes[0], "int64")
self.assertEqual(df.dtypes[1], "O")
# third column would be float, but is cast to string
self.assertEqual(df.dtypes[2], "O")
self.assertEqual(df.iloc[2, 2], "32.50")
except Exception as exc:
raise exc
def test_dataset_csv4_applyschema_rename(self):
""" Test reading a table then renaming a column """
try:
df = self.read_dataframe_asset("ds_test_4.json")
schema = generate_schema(df)
columns = schema["columns"]
self.assertEqual(len(columns), 3)
self.assertEqual(df.columns[1], "Second")
schema["columns"][1]["rename"] = "Secondo"
df = apply_schema(df, schema)
columns = df.columns
self.assertEqual(df.columns[1], "Secondo")
except Exception as exc:
raise exc
def test_dataset_csv4_applyschema_index(self):
""" Test reading a table then making a column its index """
try:
df = self.read_dataframe_asset("ds_test_4.json")
schema = generate_schema(df)
columns = schema["columns"]
self.assertEqual(len(columns), 3)
self.assertEqual(df.index.name, None)
schema["columns"][0]["index"] = True
df = apply_schema(df, schema)
columns = df.columns
self.assertEqual(df.index.name, "First")
except Exception as exc:
raise exc
def test_dataset_csv4_applyschema_index_rename(self):
""" Test reading a table then making a column its index then renaming it """
try:
df = self.read_dataframe_asset("ds_test_4.json")
schema = generate_schema(df)
columns = schema["columns"]
self.assertEqual(len(columns), 3)
self.assertEqual(df.index.name, None)
schema["columns"][0]["index"] = True
schema["columns"][0]["rename"] = "Primo"
df = apply_schema(df, schema)
columns = df.columns
self.assertEqual(df.index.name, "Primo")
self.assertEqual(df.columns[0], "Primo")
except Exception as exc:
raise exc
def test_dataset_csv4_types_datetime_iso8601(self):
""" Test reading datetime in ISO8601 format """
try:
df = self.read_dataframe_asset("ds_test_4.json")
self.assertEqual(df.dtypes[0], "int64")
self.assertEqual(df.dtypes[1], "O")
self.assertTrue(isinstance(df.iloc[0, 2], pd.Timestamp))
self.assertTrue(isinstance(df.iloc[1, 2], pd.Timestamp))
self.assertTrue(isinstance(df.iloc[2, 2], pd.Timestamp))
self.assertTrue(isinstance(df.iloc[3, 2], pd.Timestamp))
self.assertEqual(df.iloc[0, 2], pd.Timestamp("2019-01-20 00:00:00"))
self.assertEqual(df.iloc[1, 2], pd.Timestamp("2019-01-20 16:30:15"))
self.assertEqual(df.iloc[2, 2], pd.Timestamp("2019-02-01 00:00:00"))
self.assertEqual(df.iloc[3, 2], pd.Timestamp("2019-01-01 00:00:00"))
# Timezones are state machines from row to row...
# 2019-09-15T15:53:00
self.assertEqual(df.iloc[4, 2], pd.Timestamp("2019-09-15 15:53:00"))
# 2019-09-15T15:53:00+05:00 (changes timezone)
self.assertEqual(df.iloc[5, 2], pd.Timestamp("2019-09-15 10:53:00"))
# 2019-09-15T15:53:00 (maintains +5 timezone)
self.assertEqual(df.iloc[6, 2], pd.Timestamp("2019-09-15 15:53:00"))
# 2019-09-15T15:53:00+00 (reverts timezone)
self.assertEqual(df.iloc[7, 2], pd.Timestamp("2019-09-15 15:53:00"))
# 2019-09-15T15:53:00-01:30 (changes timezone)
self.assertEqual(df.iloc[8, 2], pd.Timestamp("2019-09-15 17:23:00"))
# 20080915T155300Z (zulu time)
self.assertEqual(df.iloc[9, 2], pd.Timestamp("2008-09-15 15:53:00"))
# Time only uses today's date: 15:53:00.322348
self.assertEqual(df.iloc[10, 2], pd.Timestamp("15:53:00.322348"))
# Examples:
# http://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a003169814.htm
except Exception as exc:
raise exc
def test_dataset_csv5_category_no_schema(self):
""" Test reading categorical data without a schema """
try:
df = self.read_dataframe_asset("ds_test_5_category_no_schema.json")
self.assertEqual(len(df.columns), 10)
self.assertEqual(df.columns[0], "id")
self.assertEqual(df.columns[1], "name")
self.assertEqual(df.columns[2], "slug")
self.assertEqual(df.columns[3], "parent_id")
self.assertEqual(df.columns[4], "depth")
self.assertEqual(df.columns[5], "priority")
self.assertEqual(df.columns[6], "max_weight")
self.assertEqual(df.columns[7], "frozen")
self.assertEqual(df.columns[8], "rate")
self.assertEqual(df.columns[9], "has_ingredients_book")
# Column types
self.assertEqual(df.dtypes[0], "int") # id
self.assertEqual(df.dtypes[1], "O") # name
self.assertEqual(df.dtypes[2], "O") # slug
self.assertEqual(df.dtypes[3], "float") # parent_id
self.assertEqual(df.dtypes[7], "int") # frozen
# Items types
self.assertEqual(type(df.iloc[0, 1]).__name__, "str") # name
self.assertEqual(type(df.iloc[0, 2]).__name__, "str") # slug
self.assertEqual(type(df.iloc[0, 3]).__name__, "float64") # parent_id
except Exception as exc:
raise exc
def test_dataset_csv5_category_with_schema(self):
""" Test reading categorical data with a schema, check types """
try:
df = self.read_dataframe_asset("ds_test_5_category_with_schema.json")
self.assertEqual(len(df.columns), 10)
self.assertEqual(df.columns[0], "id")
self.assertEqual(df.columns[1], "name")
self.assertEqual(df.columns[2], "slug")
self.assertEqual(df.columns[3], "parent_id")
self.assertEqual(df.columns[4], "depth")
self.assertEqual(df.columns[5], "priority")
self.assertEqual(df.columns[6], "max_weight")
self.assertEqual(df.columns[7], "frozen")
self.assertEqual(df.columns[8], "rate")
self.assertEqual(df.columns[9], "has_ingredients_book")
# Column types
self.assertEqual(df.dtypes[0], "int") # id
self.assertEqual(df.dtypes[1], "category") # name
self.assertEqual(df.dtypes[2], "category") # slug
self.assertEqual(df.dtypes[3], "float") # parent_id
self.assertEqual(df.dtypes[7], "bool") # frozen
except Exception as exc:
raise exc
def test_dataset_csv5_category_check_values(self):
""" Test reading categorical data, check values """
try:
df = self.read_dataframe_asset("ds_test_5_category_with_schema.json")
# Items types
self.assertEqual(type(df.iloc[0, 1]).__name__, "str") # name
self.assertEqual(type(df.iloc[0, 2]).__name__, "str") # slug
self.assertEqual(type(df.iloc[0, 3]).__name__, "float64") # parent_id
self.assertEqual(type(df.iloc[0, 7]).__name__, "bool_") # frozen
except Exception as exc:
raise exc
def test_dataset_csv5_category_no_index(self):
""" Test reading categorical data, check index column """
try:
df1 = self.read_dataframe_asset("ds_test_5_category_with_schema.json")
# By default the index column is the row number.
# If the dataset has an index or id row it is just like
# any other row and is not used to index the pandas dataset
self.assertFalse(df1.loc[205, "frozen"])
self.assertEqual(df1.loc[205, "slug"], "sughi-pronti-primi-piatti")
self.assertEqual(df1.loc[205, "parent_id"], 100150)
# Apply the correct index column manually
df2 = df1.set_index("id", drop=False)
self.assertFalse(df2.loc[205, "frozen"])
self.assertEqual(df2.loc[205, "slug"], "carne-tacchino")
self.assertEqual(df2.loc[205, "parent_id"], 100102)
except Exception as exc:
raise exc
def test_dataset_csv5_category_with_index(self):
""" Test reading categorical data, check explicit index column """
try:
df = self.read_dataframe_asset("ds_test_5_category_with_index.json")
self.assertFalse(df.loc[205, "frozen"])
self.assertEqual(df.loc[205, "slug"], "carne-tacchino")
self.assertEqual(df.loc[205, "parent_id"], 100102)
except Exception as exc:
raise exc
def test_dataset_csv6_weird_index_no_attr(self):
""" Test reading table with 'weird' index column explicitly marked in schema """
try:
df = self.read_dataframe_asset("ds_test_6_weird_index_no_attr.json")
self.assertEqual(df.loc[8, "slug"], "pasta-riso-cereali")
self.assertEqual(df.loc[27, "slug"], "2-alt-pasta")
except Exception as exc:
raise exc
def test_dataset_csv6_weird_index_with_attr(self):
""" Test reading table with 'weird' index column explicitly marked in schema """
try:
df = self.read_dataframe_asset("ds_test_6_weird_index_with_attr.json")
self.assertEqual(df.index.name, "indice")
self.assertEqual(df.loc[8, "slug"], "pane-pasticceria")
self.assertEqual(df.loc[27, "slug"], "sughi-scatolame-condimenti")
self.assertEqual(df.loc[100598, "slug"], "2-alt-salumi")
except Exception as exc:
raise exc
def test_dataset_csv7_timedelta(self):
""" Test timespan to timedelta automatic conversion """
try:
df = self.read_dataframe_asset("ds_test_7_autoschema.json")
# index is from column 'indice'
self.assertEqual(df.loc[1, "elapsed"], pd.Timedelta("1 day"))
self.assertEqual(df.loc[3, "elapsed"], pd.Timedelta("2 days"))
self.assertEqual(df.loc[4, "elapsed"], pd.Timedelta("3 days"))
self.assertEqual(df.loc[6, "elapsed"], pd.Timedelta("1 days 06:05:01.00003"))
except Exception as exc:
raise exc
def test_dataset_csv7_autoschema(self):
""" Test automatically generating an analitico schema from a pandas dataframe """
try:
df = self.read_dataframe_asset("ds_test_7_autoschema.json")
schema = generate_schema(df)
columns = schema["columns"]
self.assertEqual(len(columns), 12)
self.assertEqual(columns[0]["name"], "name")
self.assertEqual(columns[0]["type"], "string")
self.assertEqual(columns[1]["name"], "slug")
self.assertEqual(columns[1]["type"], "category")
self.assertEqual(columns[2]["name"], "parent_id")
self.assertEqual(columns[2]["type"], "float")
self.assertEqual(columns[3]["name"], "depth")
self.assertEqual(columns[3]["type"], "integer")
self.assertEqual(columns[4]["name"], "priority")
self.assertEqual(columns[4]["type"], "integer")
self.assertEqual(columns[5]["name"], "max_weight")
self.assertEqual(columns[5]["type"], "integer")
self.assertEqual(columns[6]["name"], "frozen")
self.assertEqual(columns[6]["type"], "boolean")
self.assertEqual(columns[7]["name"], "rate")
self.assertEqual(columns[7]["type"], "float")
self.assertEqual(columns[8]["name"], "has_ingredients_book")
self.assertEqual(columns[8]["type"], "boolean")
self.assertEqual(columns[9]["name"], "indice")
self.assertEqual(columns[9]["type"], "integer")
self.assertEqual(columns[9]["index"], True)
self.assertEqual(columns[10]["name"], "updated_at")
self.assertEqual(columns[10]["type"], "datetime")
self.assertEqual(columns[11]["name"], "elapsed")
self.assertEqual(columns[11]["type"], "timespan")
except Exception as exc:
raise exc
def test_dataset_csv7_reordering(self):
""" Test reordering of columns in the source """
try:
df = self.read_dataframe_asset("ds_test_7_reordering.json")
self.assertEqual(len(df.columns), 12)
self.assertEqual(df.columns[0], "depth")
self.assertEqual(df.columns[1], "elapsed")
self.assertEqual(df.columns[2], "frozen")
self.assertEqual(df.columns[3], "has_ingredients_book")
self.assertEqual(df.columns[4], "indice")
self.assertEqual(df.columns[5], "max_weight")
self.assertEqual(df.columns[6], "name")
self.assertEqual(df.columns[7], "parent_id")
self.assertEqual(df.columns[8], "priority")
self.assertEqual(df.columns[9], "rate")
self.assertEqual(df.columns[10], "slug")
self.assertEqual(df.columns[11], "updated_at")
except Exception as exc:
raise exc
def test_dataset_csv7_filtering(self):
""" Test removing columns in the source """
try:
df = self.read_dataframe_asset("ds_test_7_filtering.json")
self.assertEqual(len(df.columns), 4)
self.assertEqual(df.columns[0], "indice")
self.assertEqual(df.columns[1], "name")
self.assertEqual(df.columns[2], "slug")
self.assertEqual(df.columns[3], "frozen")
except Exception as exc:
raise exc
def test_dataset_csv8_unicode(self):
""" Test unicode chars in the source """
try:
df = self.read_dataframe_asset("ds_test_8_unicode.json")
self.assertEqual(len(df.columns), 3)
self.assertEqual(df.columns[0], "index")
self.assertEqual(df.columns[1], "language")
self.assertEqual(df.columns[2], "message")
self.assertEqual(
df.loc[0, "message"],
"! \" # $ % & ' ( ) * + - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \\ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~",
)
self.assertEqual(
df.loc[1, "message"],
"¡ ¢ £ ¤ ¥ ¦ § ¨ © ª « ¬ \xad ® ¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö × Ø Ù Ú Û Ü Ý Þ ß à á â ã ä å æ ç è é ê ë ì í î ï ð ñ ò ó ô õ ö ÷ ø ù ú û ü ý þ ÿ",
)
self.assertEqual(
df.loc[2, "message"],
"Ё Ђ Ѓ Є Ѕ І Ї Ј Љ Њ Ћ Ќ Ў Џ А Б В Г Д Е Ж З И Й К Л М Н О П Р С Т У Ф Х Ц Ч Ш Щ Ъ Ы Ь Э Ю Я а б в г д е ж з и й к л м н о п р с т у ф х ц ч ш щ ъ ы ь э ю я ё ђ ѓ є ѕ і ї ј љ њ ћ ќ ў џ Ѡ ѡ Ѣ ѣ Ѥ ѥ Ѧ ѧ Ѩ ѩ Ѫ ѫ Ѭ ѭ Ѯ ѯ Ѱ ѱ Ѳ ѳ Ѵ ѵ Ѷ ѷ Ѹ ѹ Ѻ ѻ Ѽ ѽ Ѿ",
)
self.assertEqual(
df.loc[3, "message"],
"؛ ؟ ء آ أ ؤ إ ئ ا ب ة ت ث ج ح خ د ذ ر ز س ش ص ض ط ظ ع غ ـ ف ق ك ل م ن ه و ى ي ً ٌ ٍ َ ُ ِ ّ ْ ٠ ١ ٢ ٣ ٤ ٥ ٦ ٧ ٨ ٩ ٪ ٫ ٬ ٭ ٰ ٱ ٲ ٳ ٴ ٵ ٶ ٷ ٸ ٹ ٺ ٻ ټ ٽ پ ٿ ڀ ځ ڂ ڃ ڄ څ چ ڇ ڈ ډ ڊ ڋ ڌ ڍ ڎ ڏ ڐ ڑ ڒ ړ ڔ ڕ ږ ڗ ژ ڙ ښ ڛ ڜ ڝ ڞ ڟ ڠ ڡ ڢ ڣ ڤ ڥ ڦ ڧ ڨ ک",
)
self.assertEqual(
df.loc[4, "message"],
"ก ข ฃ ค ฅ ฆ ง จ ฉ ช ซ ฌ ญ ฎ ฏ ฐ ฑ ฒ ณ ด ต ถ ท ธ น บ ป ผ ฝ พ ฟ ภ ม ย ร ฤ ล ฦ ว ศ ษ ส ห ฬ อ ฮ ฯ ะ ั า ำ ิ ี ึ ื ุ ู ฺ ฿ เ แ โ ใ ไ ๅ ๆ ็ ่ ้ ๊ ๋ ์ ํ ๎ ๏ ๐ ๑ ๒ ๓ ๔ ๕ ๖ ๗ ๘ ๙ ๚ ๛",
)
self.assertEqual(
df.loc[5, "message"],
"一 丁 丂 七 丄 丅 丆 万 丈 三 上 下 丌 不 与 丏 丐 丑 丒 专 且 丕 世 丗 丘 丙 业 丛 东 丝 丞 丟 丠 両 丢 丣 两 严 並 丧 丨 丩 个 丫 丬 中 丮 丯 丰 丱 串 丳 临 丵 丶 丷 丸 丹 为 主 丼 丽 举 丿 乀 乁 乂 乃 乄 久 乆 乇 么 义 乊 之 乌 乍 乎 乏 乐 乑 乒 乓 乔 乕 乖 乗 乘 乙 乚 乛 乜 九 乞 也 习 乡 乢 乣 乤 乥 书 乧 乨 乩 乪 乫 乬 乭 乮 乯 买 乱 乲 乳 乴 乵 乶 乷 乸 乹 乺 乻 乼 乽 乾 乿",
)
except Exception as exc:
raise exc
def test_dataset_csv9_types_datetime_nulls(self):
""" Test reading datetime that has null values """
try:
df = self.read_dataframe_asset("ds_test_9.json")
self.assertEqual(df.dtypes[0], "int64")
self.assertEqual(df.dtypes[1], "O")
self.assertTrue(df.dtypes[2] == "datetime64[ns]")
self.assertTrue(isinstance(df.iloc[0, 2], pd.Timestamp))
self.assertTrue(isinstance(df.iloc[1, 2], pd.Timestamp))
# all these are variantions on None and null
self.assertTrue(pd.isnull(df.iloc[2, 2]))
self.assertTrue( | pd.isnull(df.iloc[3, 2]) | pandas.isnull |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = | pd.period_range('2011-01-01 09:00', freq='H', periods=1) | pandas.period_range |
'''
Created on 18.03.2015
@author: <NAME>
'''
import pandas as pd
from pandas import Series, DataFrame, MultiIndex
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.gridspec as gridspec
import numpy as np
from matplotlib.patches import Polygon
from docutils.languages.af import labels
# import HistoQhObs as HistoQhObs
# import HistoQhObs_Together as HistoQhObs_Together
# import plotDiurnalValidateNew as plotDiurnalValidateNew
# import plotWAT as plotWAT
sizeText=10
params = {'backend': 'wxAgg', 'lines.markersize' : 6,
'axes.labelsize': sizeText, "mathtext.default":"regular",
'text.fontsize': sizeText, 'axes.titlesize':sizeText, 'legend.fontsize': sizeText,
'xtick.labelsize': sizeText, 'ytick.labelsize': sizeText}
plt.rcParams.update(params)
fontsize_XLabel = 14
fontsize_YLabel = 14
fontsize_title = 14
fontsize_XTicks = 14
fontsize_YTicks = 14
fontsize_Legend = 14
WithLegendFrame = False
def create_Standardfigure():
"""
prepares a figures """
fontsize_XLabel = 14
fontsize_YLabel = 14
fontsize_title = 14
fontsize_XTicks = 14
fontsize_YTicks = 14
fontsize_Legend = 14
WithLegendFrame = False
fig = plt.figure(figsize=(8, 5))
fig.subplots_adjust(left=0.15)
gs1 = gridspec.GridSpec(1, 1)
ax = plt.subplot(gs1[0, :])
ax.set_ylim(0,1.1)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.3, box.width, box.height * 0.7])
#ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 5))
#ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 25), minor=True)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%I:%M %p'))
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2),frameon=WithLegendFrame, ncol=2, fontsize=fontsize_Legend)
return fig, ax
def Histogram_AT():
recFolder = 'D:/ghi-mbe/Daten Auswertung/records/AT/'
t_1 = 5.0
t_2 = 11.0
t_3 = 14.0
t_4 = 18.0
n_0 = "<5" # "A"
n_1 = "5>11" # "B"
n_2 = "11>14" # "C"
n_3 = "14>18" # "D"
n_4 = ">18" # "E"
n_0 = "A"
n_1 = "B"
n_2 = "C"
n_3 = "D"
n_4 = "E"
def func_AT(row):
if row["Weather","-","-","AT"] <= t_1:
return n_0
elif t_1 < row["Weather","-","-","AT"] <= t_2:
return n_1
elif t_2 < row["Weather","-","-","AT"] <= t_3:
return n_2
elif t_3 < row["Weather","-","-","AT"] <= t_4:
return n_3
else:
return n_4
def func_rAT(row):
if row["Weather","-","-","rAT"] <= t_1:
return n_0
elif t_1 < row["Weather","-","-","rAT"] <= t_2:
return n_1
elif t_2 < row["Weather","-","-","rAT"] <= t_3:
return n_2
elif t_3 < row["Weather","-","-","rAT"] <= t_4:
return n_3
else:
return n_4
df1=pd.read_csv(recFolder+'AT2012.csv',index_col=0,sep=';', header=[0,1,2,3],low_memory=False,parse_dates=True)
df1["Weather","-","-","rAT"] = df1.apply(pd.Series.round)
df1["Weather","-","-","Kategorie_AT"] = df1.apply(func_AT, axis=1)
df1["Weather","-","-","Kategorie_rAT"] = df1.apply(func_rAT, axis=1)
# Zaehlen der Kategorien
Kategorie_A = df1[df1["Weather","-","-","Kategorie_AT"]=="A"]
Kategorie_B = df1[df1["Weather","-","-","Kategorie_AT"]=="B"]
Kategorie_C = df1[df1["Weather","-","-","Kategorie_AT"]=="C"]
Kategorie_D = df1[df1["Weather","-","-","Kategorie_AT"]=="D"]
Kategorie_E = df1[df1["Weather","-","-","Kategorie_AT"]=="E"]
Kategorie_rA = df1[df1["Weather","-","-","Kategorie_rAT"]=="A"]
Kategorie_rB = df1[df1["Weather","-","-","Kategorie_rAT"]=="B"]
Kategorie_rC = df1[df1["Weather","-","-","Kategorie_rAT"]=="C"]
Kategorie_rD = df1[df1["Weather","-","-","Kategorie_rAT"]=="D"]
Kategorie_rE = df1[df1["Weather","-","-","Kategorie_rAT"]=="E"]
# Zahlen der Kategoriewechsel allgemein
print ("Kategorie A:", len(Kategorie_A), "Kategorie rA:", len(Kategorie_rA))
print ("Kategorie B:", len(Kategorie_B), "Kategorie rB:", len(Kategorie_rB))
print ("Kategorie C:", len(Kategorie_C), "Kategorie rC:", len(Kategorie_rC))
print ("Kategorie D:", len(Kategorie_D), "Kategorie rD:", len(Kategorie_rD))
print ("Kategorie E:", len(Kategorie_E), "Kategorie rE:", len(Kategorie_rE))
print ("Summe Kategorie A-E:", len(Kategorie_A)+len(Kategorie_B)+len(Kategorie_C)+len(Kategorie_D)+len(Kategorie_E))
print ("Summe Kategorie rA-rE:", len(Kategorie_rA)+len(Kategorie_rB)+len(Kategorie_rC)+len(Kategorie_rD)+len(Kategorie_rE))
# Zaehlen der Kategoriewechsel entsprechend der Tage
Wechsel_A_B = 0
Wechsel_B_C = 0
Wechsel_C_D = 0
Wechsel_D_E = 0
for index, line in enumerate(df1.iterrows()):
if index == len(df1.index)-1:
print ("no")
else:
if df1["Weather","-","-","Kategorie_AT"][index] == "A" and df1["Weather","-","-","Kategorie_AT"][index+1] == "B":
Wechsel_A_B = Wechsel_A_B + 1
if df1["Weather","-","-","Kategorie_AT"][index] == "B" and df1["Weather","-","-","Kategorie_AT"][index+1] == "C":
Wechsel_B_C = Wechsel_B_C + 1
if df1["Weather","-","-","Kategorie_AT"][index] == "C" and df1["Weather","-","-","Kategorie_AT"][index+1] == "D":
Wechsel_C_D = Wechsel_C_D + 1
if df1["Weather","-","-","Kategorie_AT"][index] == "D" and df1["Weather","-","-","Kategorie_AT"][index+1] == "E":
Wechsel_D_E = Wechsel_D_E + 1
# Erkennung von Wochentagen, Wochenende
df1['dayNumber'] = df1.index.weekday
onlyWeekdays = df1[df1['dayNumber']<5]
onlyWeekend = df1[df1['dayNumber']>=5]
print ("Histogram_AT done")
def Select_ColorsAndMarkers(Level0="", Level2="",Level3="", Level4="", Level5=""):
markEntr_Alt1 = True
print ("Start SelectAnalysisFunction")
# ColorList Level0
colorsTemperature=["LimeGreen",'Indigo','RoyalBlue','DeepSkyBlue','Orange','Red']
markersTemperature=['^','o','s','*','d','v']
# ColorList Level2
colorsEntrances=["LimeGreen","ForestGreen","DarkGreen","LightSkyBlue","CornflowerBlue","DarkSlateBlue"]
if markEntr_Alt1:
markersEntrances=['^','o','s','*','d','v'] # alternative 1
else:
markersEntrances2=['^','o','s','^','o','s'] # alternative 2
markersEntrances = markersEntrances2
# ColorList Level3
colorsAps=["Sienna","FireBrick","Red","OrangeRed","Tomato","DeepPink","Fuchsia","Magenta","MediumVioletRed","Crimson","LimeGreen"]
markersAps=["s",'^','o','h','+','x','s','p','*','d',None]
# ColorList Level4
colorRooms=["LimeGreen",'Crimson','GoldenRod','CornflowerBlue',"DarkGreen",'MidnightBlue']
markersRooms=[None,'^','o','s','*','d']
# Checklisten
CheckTemperatures = ["T1","T2","T3","T4","T5"]
CheckTemperatures = ["T0","T1","T2","T3","T4","T5"]
CheckEntrances = ["B2E1","B2E2","B2E3","B3E1","B3E2","B3E3"]
CheckApartments = ["A01","A02","A03","A04","A05","A06","A07","A08","A09","A10",'-']
CheckRooms = ['-', "Room_Bath","Room_Children","Room_Kitchen","Room_Living","Room_Sleeping",]
if Level0 == "T0":
#print "Nur eine Linie, also alle Temperaturbereiche zusammen"
if Level2 == None:
#print "Alle Eingaenge"
if Level3 == "-":
#print "mean von allen Apartments"
if Level4 == "-":
#print "mean von allen Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments","meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments","meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments","meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments","meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments","meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments","meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
#-----------------------------------------------------------------
#-----------------------------------------------------------------
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments",Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments",Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments",Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments",Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments",Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments",Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
#-----------------------------------------------------------------
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
elif Level3 in CheckApartments:
#print Level3
if Level4 == "-":
print ("mean von allen Rooms")
if Level5 == "WP1":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,"meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,"meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,"meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,"meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,"meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,"meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
#-----------------------------------------------------------------
#-----------------------------------------------------------------
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
#-----------------------------------------------------------------
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
else:
print ("ERROR: Auswahl Level3 nicht korrekt")
elif Level2 in CheckEntrances:
#print Level2
if Level3 == "-":
#print "mean von allen Apartments"
if Level4 == "-":
#print "mean von allen Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorsEntrances[CheckEntrances.index(Level2)]
markerList = markersEntrances[CheckEntrances.index(Level2)]
title = ["T0", Level2,"meanApartments","meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsEntrances[CheckEntrances.index(Level2)]
markerList = markersEntrances[CheckEntrances.index(Level2)]
title = ["T0", Level2,"meanApartments","meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsEntrances[CheckEntrances.index(Level2)]
markerList = markersEntrances[CheckEntrances.index(Level2)]
title = ["T0", Level2,"meanApartments","meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsEntrances[CheckEntrances.index(Level2)]
markerList = markersEntrances[CheckEntrances.index(Level2)]
title = ["T0", Level2,"meanApartments","meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsEntrances[CheckEntrances.index(Level2)]
markerList = markersEntrances[CheckEntrances.index(Level2)]
title = ["T0", Level2,"meanApartments","meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsEntrances[CheckEntrances.index(Level2)]
markerList = markersEntrances[CheckEntrances.index(Level2)]
title = ["T0", Level2,"meanApartments","meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 == None:
print ("Alle Rooms")
if Level5 == "WP1":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,"meanApartments","allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,"meanApartments","allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,"meanApartments","allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,"meanApartments","allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,"meanApartments","allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,"meanApartments","allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,"meanApartments",Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,"meanApartments",Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,"meanApartments",Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,"meanApartments",Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,"meanApartments",Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,"meanApartments",Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
elif Level3 == None:
#print "Alle Apartments"
if Level4 == "-":
#print "mean von allen Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments","meanRooms",Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments","meanRooms",Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments","meanRooms",Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments","meanRooms",Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments","meanRooms",Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments","meanRooms",Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments",Level4,Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments",Level4,Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments",Level4,Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments",Level4,Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments",Level4,Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments",Level4,Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
elif Level3 in CheckApartments:
#print Level3
if Level4 == "-":
#print "mean von allen Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorsAps[CheckApartments.index(Level3)]
markerList = markersAps[CheckApartments.index(Level3)]
title = ["T0", Level2,Level3,"meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsAps[CheckApartments.index(Level3)]
markerList = markersAps[CheckApartments.index(Level3)]
title = ["T0", Level2,Level3,"meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsAps[CheckApartments.index(Level3)]
markerList = markersAps[CheckApartments.index(Level3)]
title = ["T0", Level2,Level3,"meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsAps[CheckApartments.index(Level3)]
markerList = markersAps[CheckApartments.index(Level3)]
title = ["T0", Level2,Level3,"meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsAps[CheckApartments.index(Level3)]
markerList = markersAps[CheckApartments.index(Level3)]
title = ["T0", Level2,Level3,"meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsAps[CheckApartments.index(Level3)]
markerList = markersAps[CheckApartments.index(Level3)]
title = ["T0", Level2,Level3,"meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 == None:
#print "Alle Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,Level3,"allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,Level3,"allRooms",Level5]
labels = CheckRooms
return colorList, markerList, title, labels
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,Level3,"allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,Level3,"allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,Level3,"allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,Level3,"allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,Level3,Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,Level3,Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,Level3,Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,Level3,Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,Level3,Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,Level3,Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
else:
print ("ERROR: Auswahl Level3 nicht korrekt")
else:
print ("ERROR: Auswahl Level2 nicht eindeutig")
#-----------------------------------------------------------------
#-----------------------------------------------------------------
#-----------------------------------------------------------------
#-----------------------------------------------------------------
elif Level0 == None:
print ("Alle Linien, also T0 ..... T5")
if Level2 in CheckEntrances:
#print Level2
if Level3 == "-":
#print "mean alle Apartments"
if Level4 == '-':
#print "mean alle Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
elif Level3 in CheckApartments:
#print Level3
if Level4 == '-':
#print "mean alle Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3, "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3, "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3, "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3, "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3, "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3, "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3,Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3,Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3,Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3,Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3,Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
##print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3,Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
else:
print ("ERROR: Auswahl Level3 nicht korrekt")
else:
print ("ERROR: Auswahl Level2 nicht eindeutig")
#-----------------------------------------------------------------
#-----------------------------------------------------------------
#-----------------------------------------------------------------
#-----------------------------------------------------------------
elif Level0 in ["T1","T2","T3","T4","T5"]:
if Level2 in CheckEntrances:
if Level3 == "-":
if Level4 == "-":
if Level5 == "WP1":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments","meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments","meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments","meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments","meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments","meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments","meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
if Level5 == "WP1":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments",Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments",Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments",Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments",Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments",Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments",Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
elif Level3 in CheckApartments:
if Level4 == "-":
if Level5 == "WP1":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,"meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,"meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,"meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,"meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,"meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,"meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
if Level5 == "WP1":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
else:
print ("ERROR: Auswahl Level3 nicht korrekt")
else:
print ("ERROR: Auswahl Level2 nicht eindeutig")
else:
print ("ERROR: Auswahl Level0[0] nicht eindeutig")
print ("Ende SelectAnalysisFunction")
def english2German(titleList,labelList):
translateDictonary ={"B2E1":"R2E1",
"B2E2":"R2E2",
"B2E3":"R2E3",
"B3E1":"R3E1",
"B3E2":"R3E2",
"B3E2":"R3E3",
"allBuildings": "Gebaeude",
"meanApartment": "Durchschnitt Wohnung",
"allApartments": "Wohnung",
"Room_Sleeping":"Schlafzimmer",
"Room_Kitchen": u"Kueche",
"Room_Children": "Kinderzimmer",
"Room_Living": "Wohnzimmer",
"Room_Bath": "Badezimmer",
"allRooms": "Zimmer",
"meanRooms": "Durchschnitt Zimmer",
"T0": "ATR",
"T1": 'DAT $\leq$ 5',
"T2": "5 $\leq$ DAT $\leq$ 11",
"T3": "11 $\leq$ DAT $\leq$ 14",
"T4": "14 $\leq$ DAT $\leq$ 18",
"T5": "DAT $\geq$ 18",
"-":"Durschnitt"}
new_titleList = []
for titleComponent in titleList:
pass
if titleComponent in translateDictonary.keys():
new_titleList.append(translateDictonary.get(titleComponent))
else:
new_titleList.append(titleComponent)
new_labelList = []
for labelComponent in labelList:
if labelComponent in translateDictonary.keys():
new_labelList.append(translateDictonary.get(labelComponent))
else:
new_labelList.append(labelComponent)
return new_titleList, new_labelList
def codifyL1(codeList):
if isinstance(codeList[0], type(None)):
return codeList
else:
codeListZ=codeList[0]
translateDictonary={'T0':'ATR',
'T1':'5 < AT Daily Average',
'T2':'5 < AT Daily Average <= 11',
'T3':'1 < AT Daily Average <= 14',
'T4':'14 < AT Daily Average <= 18',
'T5':'18 < AT Daily Average'}
if isinstance(codeListZ, basestring): codeListZ=[codeListZ]
new_codeList = []
for titleComponent in codeListZ:
pass
if titleComponent in translateDictonary.keys():
new_codeList.append(translateDictonary.get(titleComponent))
else:
new_codeList.append(titleComponent)
codeList[0]=new_codeList[0]
print (new_codeList[0])
return codeList
def english2English(titleList,labelList):
translateDictonary ={"B2E1":"B2E1",
"B2E2":"B2E2",
"B2E3":"B2E3",
"B3E1":"B3E1",
"B3E2":"B3E2",
"B3E2":"B3E3",
"allBuildings": "all buildings",
"meanApartments": "Mean Apartment",
"allApartments": "all Apartments",
"Room_Sleeping":"Sleeping room",
"Room_Kitchen": "Kitchen",
"Room_Children": "Children room",
"Room_Living": "Living room",
"Room_Bath": "Bathroom",
"allRooms": "all Rooms",
"meanRooms": "Mean roooms",
"T0": "ATR",
"T1": 'DAT $\leq$ 5',
"T2": "5 $\leq$ DAT $\leq$ 11",
"T3": "11 $\leq$ DAT $\leq$ 14",
"T4": "14 $\leq$ DAT $\leq$ 18",
"T5": "DAT $\geq$ 18",
"-":"Average"}
new_titleList = []
for titleComponent in titleList:
pass
if titleComponent in translateDictonary.keys():
new_titleList.append(translateDictonary.get(titleComponent))
else:
new_titleList.append(titleComponent)
new_labelList = []
for labelComponent in labelList:
if labelComponent in translateDictonary.keys():
new_labelList.append(translateDictonary.get(labelComponent))
else:
new_labelList.append(labelComponent)
return new_titleList, new_labelList
def readDF(df1=pd.DataFrame(),df2=pd.DataFrame(),df3=pd.DataFrame(),df4=pd.DataFrame(),df5=pd.DataFrame(),df6=pd.DataFrame(),level0='ATR',level1='Standard Diurnal',level2='MD',level3='B2E1',level4='A01',level5='Room_Living',level6="WP1"):
levels=[level0,level1,level2,level3,level4,level5,level6]
print (levels)
if not df1.empty:
for levelNr,level in enumerate(levels):
if level!=None: df1=df1.iloc[:,df1.columns.get_level_values(levelNr)==level]
if not df2.empty:
for levelNr,level in enumerate(levels):
if level!=None: df2=df2.iloc[:,df2.columns.get_level_values(levelNr)==level]
if not df3.empty:
for levelNr,level in enumerate(levels):
if level!=None: df3=df3.iloc[:,df3.columns.get_level_values(levelNr)==level]
if not df4.empty:
for levelNr,level in enumerate(levels):
if level!=None: df4=df4.iloc[:,df4.columns.get_level_values(levelNr)==level]
if not df5.empty:
for levelNr,level in enumerate(levels):
if level!=None: df5=df5.iloc[:,df5.columns.get_level_values(levelNr)==level]
if not df6.empty:
for levelNr,level in enumerate(levels):
if level!=None: df6=df6.iloc[:,df6.columns.get_level_values(levelNr)==level]
print ("COls: {}".format(df1.columns))
# if level0!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(0)==level0]
# df2=df2.iloc[:,df2.columns.get_level_values(0)==level0]
# df3=df3.iloc[:,df3.columns.get_level_values(0)==level0]
# df4=df4.iloc[:,df4.columns.get_level_values(0)==level0]
# df5=df5.iloc[:,df5.columns.get_level_values(0)==level0]
# df6=df6.iloc[:,df6.columns.get_level_values(0)==level0]
# if level1!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(1)==level1]
# df2=df2.iloc[:,df2.columns.get_level_values(1)==level1]
# df3=df3.iloc[:,df3.columns.get_level_values(1)==level1]
# df4=df4.iloc[:,df4.columns.get_level_values(1)==level1]
# df5=df5.iloc[:,df5.columns.get_level_values(1)==level1]
# df6=df6.iloc[:,df6.columns.get_level_values(1)==level1]
# if level2!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(2)==level2]
# df2=df2.iloc[:,df2.columns.get_level_values(2)==level2]
# df3=df3.iloc[:,df3.columns.get_level_values(2)==level2]
# df4=df4.iloc[:,df4.columns.get_level_values(2)==level2]
# df5=df5.iloc[:,df5.columns.get_level_values(2)==level2]
# df6=df6.iloc[:,df6.columns.get_level_values(2)==level2]
# if level3!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(3)==level3]
# df2=df2.iloc[:,df2.columns.get_level_values(3)==level3]
# df3=df3.iloc[:,df3.columns.get_level_values(3)==level3]
# df4=df4.iloc[:,df4.columns.get_level_values(3)==level3]
# df5=df5.iloc[:,df5.columns.get_level_values(3)==level3]
# df6=df6.iloc[:,df6.columns.get_level_values(3)==level3]
# if level4!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(4)==level4]
# df2=df2.iloc[:,df2.columns.get_level_values(4)==level4]
# df3=df3.iloc[:,df3.columns.get_level_values(4)==level4]
# df4=df4.iloc[:,df4.columns.get_level_values(4)==level4]
# df5=df5.iloc[:,df5.columns.get_level_values(4)==level4]
# df6=df6.iloc[:,df6.columns.get_level_values(4)==level4]
# if level5!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(5)==level5]
# df2=df2.iloc[:,df2.columns.get_level_values(5)==level5]
# df3=df3.iloc[:,df3.columns.get_level_values(5)==level5]
# df4=df4.iloc[:,df4.columns.get_level_values(5)==level5]
# df5=df5.iloc[:,df5.columns.get_level_values(5)==level5]
# df6=df6.iloc[:,df6.columns.get_level_values(5)==level5]
# if level6!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(6)==level6]
# df2=df2.iloc[:,df2.columns.get_level_values(6)==level6]
# df3=df3.iloc[:,df3.columns.get_level_values(6)==level6]
# df4=df4.iloc[:,df4.columns.get_level_values(6)==level6]
# df5=df5.iloc[:,df5.columns.get_level_values(6)==level6]
# df6=df6.iloc[:,df6.columns.get_level_values(6)==level6]
print ("Ende readDF")
def plotDiurnal(df,df2, labels=[],levels=[],timeType='Standard Diurnal',dataType='MD',title=None,colors=None):
if levels[0]!=None:
df=df.iloc[:,df.columns.get_level_values(0)==levels[0]]
df2=df2.iloc[:,df2.columns.get_level_values(0)==levels[0]]
if timeType!=None:
df=df.iloc[:,df.columns.get_level_values(1)==timeType]
df2=df2.iloc[:,df2.columns.get_level_values(1)==timeType]
if dataType!=None:
df=df.iloc[:,df.columns.get_level_values(2)==dataType]
df2=df2.iloc[:,df2.columns.get_level_values(2)==dataType]
if levels[1]!=None:
df=df.iloc[:,df.columns.get_level_values(3)==levels[1]]
df2=df2.iloc[:,df2.columns.get_level_values(3)==levels[1]]
if levels[2]!=None:
df=df.iloc[:,df.columns.get_level_values(4)==levels[2]]
df2=df2.iloc[:,df2.columns.get_level_values(4)==levels[2]]
if levels[3]!=None:
df=df.iloc[:,df.columns.get_level_values(5)==levels[3]]
df2=df2.iloc[:,df2.columns.get_level_values(5)==levels[3]]
if levels[4]!=None:
df=df.iloc[:,df.columns.get_level_values(6)==levels[4]]
df2=df2.iloc[:,df2.columns.get_level_values(6)==levels[4]]
fig = plt.figure(figsize=(16./2.54, 10/2.54))
fig.subplots_adjust(left=0.1)
gs1 = gridspec.GridSpec(1, 1)
#ax = plt.subplot(gs1[0, :])
ax = plt.axes([0.1, 0.1, .85, .8])
for index,column in enumerate(df.columns.values):
if index!=10: ax.plot(df.index, df[column], colors[index], linewidth=2.0,label=labels[index],alpha=0.4)
for index,column in enumerate(df2.columns.values):
if index!=10: ax.plot(df.index, df2[column], colors[index], marker="x", linewidth=0.7,markevery=60,mfc='None', mec=colors[index],label=labels[index]+' Sim')
ax.set_ylabel("Proportion of windows open")
ax.set_xlabel("Time of the day")
ticks = ax.get_xticks()
ax.set_ylim(0,1)
plt.title(title, y=1.05)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.32,
box.width, box.height * 0.68])
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
ax.legend(loc='upper center', bbox_to_anchor=(0.475, -0.2),frameon=False, ncol=3)
plt.show()
def plotBoxes(df,df2, labels=[],levels=[],title=None,colors=None, savingFolder="", extraName=""):
fig2= plt.figure(figsize=(16./2.54, 8/2.54))
fig2.subplots_adjust(left=0.1)
#gs2 = gridspec.GridSpec(1, 1)
#ax2 = fig2.add_subplot(gs2[0, :])
ax2 = fig2.add_axes([0.13, 0.355, .85, .55])
#plt.title(title, y=1.05)
bp = ax2.boxplot(df2.values-df.values, sym='-', vert=True, whis=1.5)#, linewidth=2.0,label=labels[index],alpha=0.4)
# Now fill the boxes with desired colors
boxColors = colors
bisColors = [a for a in colors for i in range(2)]
numBoxes = 6
medians = range(numBoxes)
meanValues=DataFrame(df2.values-df.values).mean(axis=0).values
meanAbsResiduals=DataFrame(abs(df2.values-df.values)).mean(axis=0).values
for i in range(numBoxes):
box = bp['boxes'][i]
boxY = []
boxX = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
boxPolygon = Polygon(boxCoords, facecolor=boxColors[i], alpha=0.1,zorder=1)
ax2.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
plt.plot(medianX, medianY, boxColors[i],linewidth=2)
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
plt.plot([np.average(med.get_xdata())], meanValues[i],
color='None', marker='o', markeredgecolor=boxColors[i], markersize=7,zorder=0)
plt.plot([np.average(med.get_xdata())], meanValues[i],
color=boxColors[i], marker='o', markeredgecolor=boxColors[i], markersize=7,alpha=0.2,zorder=3)
plt.setp(bp['medians'][i], color=colors[i]) # DarkSlateGray
plt.setp(bp['boxes'][i], color='DarkSlateGray')
for i in range(len(bisColors)):
plt.setp(bp['whiskers'][i], color='DarkSlateGray')
plt.setp(bp['caps'][i], color='DarkSlateGray')
plt.setp(bp['fliers'], color='Gainsboro')
plt.setp(bp['whiskers'], linestyle='solid')
ax2.set_ylabel("Simulated-Observed WP profile")
# ax2.set_ylabel("Simulated-Observed WS")
ax2.yaxis.set_label_coords(-0.09, 0.5)
ax2.set_ylim(-0.02,0.02)
#ax2.set_yticks([0.2, 0.6, 0.8], minor=False)
ax2.yaxis.set_ticks_position('left')
ax2.xaxis.set_ticks_position('bottom')
#newLabels= ["ATR",'DAT $\leq$ 5'," 5 $\leq$ \nDAT\n $\leq$ 11", "11 $\leq$ \nDAT\n $\leq$ 14","14 $\leq$ \nDAT\n $\leq$ 18","DAT $\geq$ 18"]
xtickNames = plt.setp(ax2, xticklabels=labels)
plt.setp(xtickNames,rotation=30)#, fontsize=8
ax2.yaxis.grid(True,zorder=0, color="Gainsboro", ls="-")
ax2.xaxis.grid(False)
ax2.set_axisbelow(True)
title=str(np.char.replace(title," ", '_'))
title=str(np.char.replace(title,"Apartment", 'Ap'))
plt.savefig(savingFolder+title+'_BP.png',figure=fig2, format='png')
plt.savefig(savingFolder+title+'_BP.pdf',figure=fig2, format='pdf')
#plt.show()
def plotDiurnalandBoxes(df,df2, labels=[],levels=[],timeType='Standard Diurnal',dataType='MD',title=None,colors=None, savingFolder="", extraName=""):
print (levels)
if levels[1]== "B2E3" and levels[2]=='A03'and levels[3]=='Room_Kitchen':
return np.empty(6) * np.nan, str(levels)
else:
oldtitle=title
title=desmountTitle(title, extraName)
name=buildName(oldtitle, extraName)
if timeType!='Standard Diurnal':
title=timeType+' - '+ title
name=timeType+' - '+ name
if levels[0]!=None:
df=df.iloc[:,df.columns.get_level_values(0)==levels[0]]
df2=df2.iloc[:,df2.columns.get_level_values(0)==levels[0]]
if timeType!=None:
df=df.iloc[:,df.columns.get_level_values(1)==timeType]
df2=df2.iloc[:,df2.columns.get_level_values(1)==timeType]
if dataType!=None:
df=df.iloc[:,df.columns.get_level_values(2)==dataType]
df2=df2.iloc[:,df2.columns.get_level_values(2)==dataType]
if levels[1]!=None:
df=df.iloc[:,df.columns.get_level_values(3)==levels[1]]
df2=df2.iloc[:,df2.columns.get_level_values(3)==levels[1]]
if levels[2]!=None:
df=df.iloc[:,df.columns.get_level_values(4)==levels[2]]
df2=df2.iloc[:,df2.columns.get_level_values(4)==levels[2]]
if levels[3]!=None:
df=df.iloc[:,df.columns.get_level_values(5)==levels[3]]
df2=df2.iloc[:,df2.columns.get_level_values(5)==levels[3]]
if levels[4]!=None:
df=df.iloc[:,df.columns.get_level_values(6)==levels[4]]
df2=df2.iloc[:,df2.columns.get_level_values(6)==levels[4]]
print ("WE", df.columns)
print ('We', df2.columns)
fig = plt.figure(figsize=(16./2.54, 10/2.54))
fig.subplots_adjust(left=0.1)
# gs1 = gridspec.GridSpec(1, 1)
# ax = plt.subplot(gs1[0, :])
#ax = fig.add_axes([0.13, 0.1, .85, .8])
ax = fig.add_axes([0.13, 0.355, .85, .55])
for index,column in enumerate(df.columns.values):
if index!=10: ax.plot(df.index, df[column], colors[index], linewidth=2.0,label=labels[index],alpha=0.4)
for index,column in enumerate(df2.columns.values):
if index!=10: ax.plot(df.index, df2[column], colors[index], marker="x", linewidth=0.7,markevery=60,mfc='None', mec=colors[index],label=labels[index]+' Sim')
ax.set_ylabel("Proportion of window open")
ax.yaxis.set_label_coords(-0.09, 0.5)
ax.set_xlabel("Time of the day")
ticks = ax.get_xticks()
ax.set_ylim(0,1)
plt.title(title, y=1.05)
box = ax.get_position()
#ax.set_position([box.x0, box.y0 + box.height * 0.32,
# box.width, box.height * 0.68])
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
ax.legend(loc='upper center', bbox_to_anchor=(0.475, -0.2),frameon=False, ncol=3)
#ax.yaxis.grid(True,zorder=0, color="Gainsboro", ls="-")
#ax.xaxis.grid(False)
plt.savefig(savingFolder+name+'.pdf',figure=fig, format='pdf')
fig2= plt.figure(figsize=(16./2.54, 10/2.54))
fig2.subplots_adjust(left=0.1)
#gs2 = gridspec.GridSpec(1, 1)
#ax2 = fig2.add_subplot(gs2[0, :])
ax2 = fig2.add_axes([0.13, 0.355, .85, .55])
plt.title(title, y=1.05)
bp = ax2.boxplot(df2.values-df.values, sym='-', vert=True, whis=1.5)#, linewidth=2.0,label=labels[index],alpha=0.4)
# Now fill the boxes with desired colors
boxColors = colors
bisColors = [a for a in colors for i in range(2)]
numBoxes = 6
medians = range(numBoxes)
meanValues=DataFrame(df2.values-df.values).mean(axis=0).values
meanAbsResiduals=DataFrame(abs(df2.values-df.values)).mean(axis=0).values
for i in range(numBoxes):
box = bp['boxes'][i]
boxY = []
boxX = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
boxPolygon = Polygon(boxCoords, facecolor=boxColors[i], alpha=0.1,zorder=1)
ax2.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
plt.plot(medianX, medianY, boxColors[i],linewidth=2)
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
plt.plot([np.average(med.get_xdata())], meanValues[i],
color='None', marker='o', markeredgecolor=boxColors[i], markersize=7,zorder=0)
plt.plot([np.average(med.get_xdata())], meanValues[i],
color=boxColors[i], marker='o', markeredgecolor=boxColors[i], markersize=7,alpha=0.2,zorder=3)
plt.setp(bp['medians'][i], color=colors[i]) # DarkSlateGray
plt.setp(bp['boxes'][i], color='DarkSlateGray')
for i in range(len(bisColors)):
plt.setp(bp['whiskers'][i], color='DarkSlateGray')
plt.setp(bp['caps'][i], color='DarkSlateGray')
plt.setp(bp['fliers'], color='Gainsboro')
plt.setp(bp['whiskers'], linestyle='solid')
ax2.set_ylabel("Simulated-Observed WP profile")
ax2.yaxis.set_label_coords(-0.09, 0.5)
ax2.set_ylim(-0.1,0.1)
#ax2.set_yticks([0.2, 0.6, 0.8], minor=False)
ax2.yaxis.set_ticks_position('left')
ax2.xaxis.set_ticks_position('bottom')
#newLabels= ["ATR",'DAT $\leq$ 5'," 5 $\leq$ \nDAT\n $\leq$ 11", "11 $\leq$ \nDAT\n $\leq$ 14","14 $\leq$ \nDAT\n $\leq$ 18","DAT $\geq$ 18"]
xtickNames = plt.setp(ax2, xticklabels=labels)
plt.setp(xtickNames,rotation=30)#, fontsize=8
ax2.yaxis.grid(True,zorder=0, color="Gainsboro", ls="-")
ax2.xaxis.grid(False)
ax2.set_axisbelow(True)
plt.savefig(savingFolder+title+'_BP.pdf',figure=fig2, format='pdf')
#plt.show()
return meanValues, str(levels), meanAbsResiduals
def plotDiurnalandBoxesBeta(df,df2, labels=[],levels=[],timeType='Standard Diurnal',dataType='MD',title=None,colors=None, savingFolder="", extraName=""):
print (levels)
if levels[1]== "B2E3" and levels[2]=='A03'and levels[3]=='Room_Kitchen':
return np.empty(6) * np.nan, str(levels)
else:
oldtitle=title
title=desmountTitle(title, extraName)
name=buildName(oldtitle, extraName)
if timeType!='Standard Diurnal':
title=timeType+' - '+ title
name=timeType+' - '+ name
if levels[0]!=None:
df=df.iloc[:,df.columns.get_level_values(0)==levels[0]]
df2=df2.iloc[:,df2.columns.get_level_values(0)==levels[0]]
if timeType!=None:
df=df.iloc[:,df.columns.get_level_values(1)==timeType]
df2=df2.iloc[:,df2.columns.get_level_values(1)==timeType]
if dataType!=None:
df=df.iloc[:,df.columns.get_level_values(2)==dataType]
df2=df2.iloc[:,df2.columns.get_level_values(2)==dataType]
if levels[1]!=None:
df=df.iloc[:,df.columns.get_level_values(3)==levels[1]]
df2=df2.iloc[:,df2.columns.get_level_values(3)==levels[1]]
if levels[2]!=None:
df=df.iloc[:,df.columns.get_level_values(4)==levels[2]]
df2=df2.iloc[:,df2.columns.get_level_values(4)==levels[2]]
if levels[3]!=None:
df=df.iloc[:,df.columns.get_level_values(5)==levels[3]]
df2=df2.iloc[:,df2.columns.get_level_values(5)==levels[3]]
if levels[4]!=None:
df=df.iloc[:,df.columns.get_level_values(6)==levels[4]]
df2=df2.iloc[:,df2.columns.get_level_values(6)==levels[4]]
fig = plt.figure(figsize=(16./2.54, 9/2.54))
fig.subplots_adjust(left=0.1)
# gs1 = gridspec.GridSpec(1, 1)
# ax = plt.subplot(gs1[0, :])
#ax = fig.add_axes([0.13, 0.1, .85, .8])
ax = fig.add_axes([0.13, 0.4, .85, .5])
for index,column in enumerate(df.columns.values):
if index!=10: ax.plot(df.index, df[column], colors[index], linewidth=2.0,label=labels[index],alpha=0.4)
for index,column in enumerate(df2.columns.values):
if index!=10: ax.plot(df.index, df2[column], colors[index], marker="x", linewidth=0.7,markevery=60,mfc='None', mec=colors[index],label=labels[index]+' Sim')
if timeType=='Standard Diurnal': ax.set_ylabel("SD - Aver. WS, "+str(title.split(", ")[1]))
if timeType=='Week End': ax.set_ylabel("WE - Aver. WS, "+str(title.split(", ")[1]))
if timeType=='Week': ax.set_ylabel("WD - Aver. WS, "+str(title.split(", ")[1]))
ax.yaxis.set_label_coords(-0.09, 0.5)
ax.set_xlabel("Time of the day")
ticks = ax.get_xticks()
ax.set_ylim(0,1)
#plt.title(title, y=1.05)
box = ax.get_position()
#ax.set_position([box.x0, box.y0 + box.height * 0.32,
# box.width, box.height * 0.68])
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
ax.legend(loc='upper center', bbox_to_anchor=(0.475, -0.25),frameon=False, ncol=3)
#ax.yaxis.grid(True,zorder=0, color="Gainsboro", ls="-")
#ax.xaxis.grid(False)
titleb=str(np.char.replace(title," ", ''))
titleb=str(np.char.replace(titleb,",", '_'))
plt.savefig(savingFolder+titleb+'.pdf',figure=fig, format='pdf')
fig2= plt.figure(figsize=(16./2.54, 9/2.54))
fig2.subplots_adjust(left=0.1)
#gs2 = gridspec.GridSpec(1, 1)
#ax2 = fig2.add_subplot(gs2[0, :])
ax2 = fig2.add_axes([0.13, 0.4, .85, .5])
#plt.title(title, y=1.05)
print('start')
print (df2.head(1))
print('break')
#print (df.head(1))
print('stop')
bp = ax2.boxplot(df2.values-df.values, sym='-', vert=True, whis=1.5)#, linewidth=2.0,label=labels[index],alpha=0.4)
# Now fill the boxes with desired colors
boxColors = colors
bisColors = [a for a in colors for i in range(2)]
numBoxes = 6
medians = range(numBoxes)
meanValues=DataFrame(df2.values-df.values).mean(axis=0).values
meanAbsResiduals=DataFrame(abs(df2.values-df.values)).mean(axis=0).values
for i in range(numBoxes):
box = bp['boxes'][i]
boxY = []
boxX = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
boxPolygon = Polygon(boxCoords, facecolor=boxColors[i], alpha=0.1,zorder=1)
ax2.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
plt.plot(medianX, medianY, boxColors[i],linewidth=2)
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
plt.plot([np.average(med.get_xdata())], meanValues[i],
color='None', marker='o', markeredgecolor=boxColors[i], markersize=7,zorder=0)
plt.plot([np.average(med.get_xdata())], meanValues[i],
color=boxColors[i], marker='o', markeredgecolor=boxColors[i], markersize=7,alpha=0.2,zorder=3)
plt.setp(bp['medians'][i], color=colors[i]) # DarkSlateGray
plt.setp(bp['boxes'][i], color='DarkSlateGray')
for i in range(len(bisColors)):
plt.setp(bp['whiskers'][i], color='DarkSlateGray')
plt.setp(bp['caps'][i], color='DarkSlateGray')
plt.setp(bp['fliers'], color='Gainsboro')
plt.setp(bp['whiskers'], linestyle='solid')
if timeType=='Standard Diurnal': ax2.set_ylabel("SD - Sim.-Obs. WS, "+str(title.split(", ")[1]))
if timeType=='Week End': ax2.set_ylabel("WE - Sim.-Obs. WS, "+str(title.split(", ")[1]))
if timeType=='Week': ax2.set_ylabel("WD - Sim.-Obs. WS, "+str(title.split(", ")[1]))
ax2.set_ylabel("Sim.-Obs. WS, "+str(title.split(", ")[1]))
ax2.yaxis.set_label_coords(-0.09, 0.5)
ax2.set_ylim(-0.1,0.1)
#ax2.set_yticks([0.2, 0.6, 0.8], minor=False)
ax2.yaxis.set_ticks_position('left')
ax2.xaxis.set_ticks_position('bottom')
#newLabels= ["ATR",'DAT $\leq$ 5'," 5 $\leq$ \nDAT\n $\leq$ 11", "11 $\leq$ \nDAT\n $\leq$ 14","14 $\leq$ \nDAT\n $\leq$ 18","DAT $\geq$ 18"]
xtickNames = plt.setp(ax2, xticklabels=labels)
plt.setp(xtickNames,rotation=30)#, fontsize=8
ax2.yaxis.grid(True,zorder=0, color="Gainsboro", ls="-")
ax2.xaxis.grid(False)
ax2.set_axisbelow(True)
title=str(np.char.replace(title," ", ''))
title=str(np.char.replace(title,",", '_'))
#plt.show()
plt.savefig(savingFolder+title+'_BP.pdf',figure=fig2, format='pdf')
return meanValues, str(levels), meanAbsResiduals
def desmountTitle(title,startTitle):
newTitle=startTitle
for i, word in enumerate(title):
if i== len(title)-1: newTitle=newTitle+str(word)
else:
if i==0:
newTitle=startTitle+' - '
else:
newTitle=newTitle+str(word)+', '
return newTitle
def buildName(title,startTitle):
newTitle=startTitle
for i, word in enumerate(title):
if i== len(title)-1: newTitle=newTitle+str(word)
else:
if i==0:
newTitle=startTitle+'_'
else:
newTitle=newTitle+str(word)+'_'
return newTitle
if __name__ == '__main__':
print ("Start main")
recordFolder='D:/dc224615_Ddiss/Documento/Pictures/MCValidation/B2E1/'
recFolder='D:/EBC0018_PTJ_Volkswohnung_tos/HDF-Programming/pd4hdf/MarkovChain/MC4Windows/records/'
df1=pd.read_csv(recFolder+'diurnals/B2E1_20121_201212diurnals.csv', index_col=0, sep=';', header=[0,1,2,4,5,6,7],skiprows=[8], parse_dates=True,low_memory=False)
# df1=pd.read_csv(recFolder+'diurnals3/B2E1_20121_201212diurnals_MD.csv', index_col=0, sep=';', header=[0,1,2,4,5,6,7],skiprows=[8], parse_dates=True,low_memory=False)
df2=pd.read_csv(recFolder+'validationM3_B2E1/proSet_100_B2E1_CDPL.csv', index_col=0, sep=';', header=[0,1,2,4,5,6,7],skiprows=[8], parse_dates=True,low_memory=False)
roomsWP1 = ['Room_Kitchen','Room_Bath','Room_Living']
roomsWP = ['Room_Children','Room_Sleeping']
entrances = ["B2E1"]#,"B2E2","B2E3","B3E1","B3E2","B3E3"]
apartments = ["A01","A02","A03","A04","A05","A06","A07","A08","A09","A10"]
#apartmentsPlus = ["A01","A02","A03","A04","A05","A06","A07","A08","A09","A10",'-']
results=[]
indicis=[]
columns4Results=[]
for entrance in entrances:
for apartment in apartments:
for room in roomsWP1:
colors,markers,title,labels,keys = Select_ColorsAndMarkers(Level0 = None , Level2=entrance, Level3 = apartment,Level4 = room,Level5 = "WP1")
title,labels = english2English(title,labels)
keys = codifyL1(keys)
values,indice=plotDiurnalandBoxes(df1,df2,levels=keys,labels=labels,title=title,colors=colors,savingFolder=recordFolder,extraName='2012')
results.append(values)
indicis.append(indice)
for room in roomsWP:
print (entrance, apartment, room)
colors,markers,title,labels,keys = Select_ColorsAndMarkers(Level0 = None , Level2=entrance, Level3 = apartment,Level4 = room,Level5 = "WP")
title,labels = english2English(title,labels)
keys = codifyL1(keys)
values,indice=plotDiurnalandBoxes(df1,df2,levels=keys,labels=labels,title=title,colors=colors,savingFolder=recordFolder,extraName='2012')
results.append(values)
indicis.append(indice)
columns4Results=labels
print (results)
resultDF= | DataFrame(results, index=indicis,columns=columns4Results) | pandas.DataFrame |
# Author: <NAME>
# Created: 7/7/20, 10:12 AM
import logging
import argparse
import pandas as pd
from typing import *
import matplotlib.pyplot as plt
import seaborn
from tqdm import tqdm
# noinspection All
import pathmagic
# noinspection PyUnresolvedReferences
import mg_log # runs init in mg_log and configures logger
# Custom imports
from mg_general import Environment, add_env_args_to_parser
# ------------------------------ #
# Parse CMD #
# ------------------------------ #
from mg_general.general import os_join, fix_names
from mg_general.labels_comparison_detailed import LabelsComparisonDetailed
from mg_io.labels import read_labels_from_file
from mg_viz import sns
parser = argparse.ArgumentParser("Collect statistics for experiment running tools on "
"genome chunks.")
parser.add_argument('--pf-summary', required=True)
parser.add_argument('--pf-output', required=True)
add_env_args_to_parser(parser)
parsed_args = parser.parse_args()
# ------------------------------ #
# Main Code #
# ------------------------------ #
# Load environment variables
my_env = Environment.init_from_argparse(parsed_args)
# Setup logger
logging.basicConfig(level=parsed_args.loglevel)
logger = logging.getLogger("logger") # type: logging.Logger
def stats_tools_on_chunks(env, df):
# type: (Environment, pd.DataFrame) -> pd.DataFrame
list_entries = list()
for idx in tqdm(df.index, total=len(df)):
pf_prediction = df.at[idx, "Predictions"]
pf_verified = os_join(env["pd-data"], df.at[idx, "Genome"], "verified.gff")
labels = read_labels_from_file(pf_prediction, shift=-1)
labels_ref = read_labels_from_file(pf_verified)
lcd = LabelsComparisonDetailed(labels_ref, labels)
list_entries.append({
"Error": 100 - 100 * len(lcd.match_3p_5p('a')) / len(lcd.match_3p('a')),
"Number of genes found": len(lcd.match_3p('a')),
**df.loc[idx, :].to_dict(),
})
return pd.DataFrame(list_entries)
def main(env, args):
# type: (Environment, argparse.Namespace) -> None
df = | pd.read_csv(args.pf_summary) | pandas.read_csv |
try: import cPickle as pickle
except: import pickle
import os
if os.name == 'posix' and 'DISPLAY' not in os.environ:
import matplotlib
matplotlib.use('Agg')
import matplotlib
import matplotlib.pyplot as plt
import itertools
from matplotlib import rc
import random
import seaborn
import numpy as np
import pandas as pd
import pdb
from argparse import ArgumentParser
font = {'family': 'serif', 'serif': ['computer modern roman']}
rc('text', usetex=False)
rc('font', weight='bold')
rc('font', size=20)
rc('lines', markersize=10)
rc('xtick', labelsize=12)
rc('ytick', labelsize=12)
rc('axes', labelsize='x-large')
rc('axes', labelweight='bold')
rc('axes', titlesize='x-large')
rc('axes', linewidth=3)
plt.rc('font', **font)
seaborn.set_style("darkgrid")
figsize_d = {2: (5, 2),
4: (9, 2)}
m_name_l = {"dynAE": "DynAE",
"dynRNN": "DynRNN",
"rand": "RandDynamic",
}
expMap = {"gr": "GR MAP", "lp": "LP MAP",
"nc": "NC F1 score"}
expMap2 = {"gr": "GR MAP", "lp": "LP P@100",
"nc": "NC F1 score"}
def get_node_color(node_community):
cnames = [item[0] for item in matplotlib.colors.cnames.items()]
node_colors = [cnames[c] for c in node_community]
return node_colors
def plot(x_s, y_s, fig_n, x_lab, y_lab,
file_save_path, title, legendLabels=None, show=False):
plt.rcParams.update({'font.size': 16, 'font.weight': 'bold'})
markers = ['o', '*', 'v', 'D', '<', 's', '+', '^', '>']
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
series = []
plt.figure(fig_n)
i = 0
for i in range(len(x_s)):
# n_points = len(x_s[i])
# n_points = int(n_points/10) + random.randint(1,100)
# x = x_s[i][::n_points]
# y = y_s[i][::n_points]
x = x_s[i]
y = y_s[i]
series.append(plt.plot(x, y, color=colors[i],
linewidth=2, marker=markers[i],
markersize=8))
plt.xlabel(x_lab, fontsize=16, fontweight='bold')
plt.ylabel(y_lab, fontsize=16, fontweight='bold')
plt.title(title, fontsize=16, fontweight='bold')
if legendLabels:
plt.legend([s[0] for s in series], legendLabels)
plt.savefig(file_save_path)
if show:
plt.show()
def plot_ts(ts_df, plot_title, eventDates,
eventLabels=None, save_file_name=None,
xLabel=None, yLabel=None, show=False):
ax = ts_df.plot(title=plot_title, marker='*',
markerfacecolor='red', markersize=10,
linestyle='solid')
colors = ['r', 'g', 'c', 'm', 'y', 'b', 'k']
if not eventLabels:
for eventDate in eventDates:
# Show event as a red vertical line
ax.axvline(eventDate, color='r', linestyle='--', lw=2)
else:
for idx in range(len(eventDates)):
ax.axvline(eventDates[idx], color=colors[idx],
linestyle='--', lw=2, label=eventLabels[idx])
ax.legend()
if xLabel:
ax.set_xlabel(xLabel, fontweight='bold')
if yLabel:
ax.set_ylabel(yLabel, fontweight='bold')
fig = ax.get_figure()
if save_file_name:
fig.savefig(save_file_name, bbox_inches='tight')
if show:
fig.show()
def turn_latex(key_str):
if key_str in ['mu', 'rho', 'beta', 'alpha', 'gamma']:
return '$\%s$' % key_str
else:
return '$%s$' % key_str.upper()
def plot_hyp_data2(hyp_keys, exp_param,
meths, data,
s_sch="u_rand",
dim=2):
font = {'family': 'serif', 'serif': ['computer modern roman']}
rc('text', usetex=True)
rc('font', weight='bold')
rc('font', size=8)
rc('lines', markersize=2.5)
rc('lines', linewidth=0.5)
rc('xtick', labelsize=6)
rc('ytick', labelsize=6)
rc('axes', labelsize='small')
rc('axes', labelweight='bold')
rc('axes', titlesize='small')
rc('axes', linewidth=1)
plt.rc('font', **font)
seaborn.set_style("darkgrid")
for exp in exp_param:
df_all = pd.DataFrame()
n_meths = 0
for meth in meths:
try:
df = pd.read_hdf(
"intermediate/%s_%s_%s_%s_dim_%d_data_hyp.h5" % (data, meth, exp, s_sch, dim),
"df"
)
n_meths += 1
except:
print ('%s_%s_%s_%s_dim_%d_data_hyp.h5 not found. Ignoring data set' % (data, meth, exp, s_sch, dim))
continue
# Check if experiment is in the dataframe
if expMap[exp] not in df:
continue
df["Method"] = m_name_l[meth]
# pdb.set_trace()
df_all = df_all.append(df).reset_index()
df_all = df_all.drop(['index'], axis=1)
if df_all.empty:
continue
df = df_all
col_names = df.columns
col_rename_d = {}
for col_name in col_names:
col_rename_d[col_name] = col_name.replace('_', '\ ')
df.rename(columns=col_rename_d, inplace=True)
for hyp_key in hyp_keys:
# hyp_key_ren = hyp_key.replace('_', '\ ')
df_trun = df[hyp_keys + ["Round Id", expMap[exp], expMap2[exp], "Method"]]
df_grouped = df_trun
rem_hyp_keys = list(set(hyp_keys) - {hyp_key})
val_lists = [df_grouped[r_k].unique() for r_k in rem_hyp_keys]
n_cols = len(list(itertools.product(*val_lists)))
if len(df_grouped[hyp_key].unique()) < 3:
continue
plot_shape = (1, n_cols)
fin1, axarray1 = plt.subplots(1, n_cols, figsize=figsize_d[n_cols])
fin2, axarray2 = plt.subplots(1, n_cols, figsize=figsize_d[n_cols])
for plt_idx, hyp_vals in enumerate(itertools.product(*val_lists)):
plot_idx = np.unravel_index(plt_idx, plot_shape)
hyp_dict = dict(zip(rem_hyp_keys, hyp_vals))
hyp_str = ', '.join(
"%s:%r" % (turn_latex(key), val) for (key, val) in hyp_dict.iteritems() if len(df_grouped[key].unique()) > 1
)
df_temp = df_grouped
for hyp_idx, hyp_val in enumerate(hyp_vals):
df_temp = df_temp[df_temp[rem_hyp_keys[hyp_idx]] == hyp_val]
if len(df_temp[hyp_key].unique()) < 3:
continue
print('Plotting %s: %s' % (exp, hyp_key))
try:
ax = seaborn.tsplot(time=hyp_key, value=expMap[exp],
unit="Round Id", condition="Method",
data=df_temp,
ax=axarray1[plot_idx[0], plot_idx[1]])
if plot_idx[1]:
ax.set_ylabel('')
if not plot_idx[0]:
ax.set_xlabel('')
except IndexError:
try:
ax = seaborn.tsplot(time=hyp_key, value=expMap[exp],
unit="Round Id", condition="Method",
data=df_temp,
ax=axarray1[plt_idx])
except:
import pdb
pdb.set_trace()
if plt_idx:
ax.set_ylabel('')
ax.set_title(hyp_str)
hyp_values = df_grouped[hyp_key].unique()
l_diff = hyp_values[-1] - hyp_values[-2]
f_diff = hyp_values[1] - hyp_values[0]
l_f_diff_r = l_diff / f_diff
if l_f_diff_r > 1:
log_base = pow(l_f_diff_r, 1.0 / (len(hyp_values) - 2))
ax.set_xscale('log', basex=round(log_base))
marker = ["o", "s", "D", "^", "v", "8", "*", "p", "1", "h"]
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
ax.legend_.remove()
try:
ax = seaborn.tsplot(time=hyp_key, value=expMap2[exp],
unit="Round Id", condition="Method",
data=df_temp,
ax=axarray2[plot_idx[0], plot_idx[1]])
if plot_idx[1]:
ax.set_ylabel('')
if not plot_idx[0]:
ax.set_xlabel('')
except IndexError:
ax = seaborn.tsplot(time=hyp_key, value=expMap2[exp],
unit="Round Id", condition="Method",
data=df_temp,
ax=axarray2[plt_idx])
if plt_idx:
ax.set_ylabel('')
ax.set_title(hyp_str)
if l_f_diff_r > 1:
log_base = pow(l_f_diff_r, 1.0 / (len(hyp_values) - 2))
ax.set_xscale('log', basex=round(log_base))
marker = ["o", "s", "D", "^", "v", "8", "*", "p", "1", "h"]
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
ax.legend_.remove()
for col_idx in range(axarray1.shape[0]):
box = axarray1[col_idx].get_position()
axarray1[col_idx].set_position(
[box.x0,
box.y0 + box.height * 0.1,
box.width,
box.height * 0.9]
)
box = axarray2[col_idx].get_position()
axarray2[col_idx].set_position(
[box.x0,
box.y0 + box.height * 0.1,
box.width,
box.height * 0.9]
)
fin1.legend(loc='lower center', bbox_to_anchor=(0.45, -0.01),
ncol=n_meths, fancybox=True, shadow=True)
fin2.legend(loc='lower center', bbox_to_anchor=(0.45, -0.01),
ncol=n_meths, fancybox=True, shadow=True)
fin1.savefig(
'plots/data_hyp/%s_%s_%s_%d_%s.pdf' % (data, exp, s_sch, dim, hyp_key),
dpi=300, format='pdf', bbox_inches='tight'
)
fin2.savefig(
'plots/data_hyp/%s_%s_%s_%d_%s_p100.pdf' % (data, exp, s_sch, dim, hyp_key),
dpi=300, format='pdf', bbox_inches='tight'
)
fin1.clf()
fin2.clf()
def plot_hyp_data(hyp_keys, exp_param,
meths, data,
s_sch="u_rand",
dim=2):
for exp in exp_param:
df_all = | pd.DataFrame() | pandas.DataFrame |
#Plot
import matplotlib.pyplot as plt
import seaborn as sns
from bleu import file_bleu
#Data Packages
import math
import pandas as pd
import numpy as np
#Progress bar
from tqdm import tqdm
#Counter
from collections import Counter
#Operation
import operator
#Natural Language Processing Packages
import re
import nltk
## Download Resources
nltk.download("vader_lexicon")
nltk.download("stopwords")
nltk.download("averaged_perceptron_tagger")
nltk.download("wordnet")
from nltk.sentiment import SentimentAnalyzer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.sentiment.util import *
from nltk import tokenize
from nltk.corpus import stopwords
from nltk.tag import PerceptronTagger
from nltk.data import find
sns.set(rc={'figure.figsize':(5,3.5)})
# CHANGE FLIEPATH before running this locally
# Use vader to evaluated sentiment of reviews
def evalSentences(sentences, to_df=False, columns=[]):
# Instantiate an instance to access SentimentIntensityAnalyzer class
sid = SentimentIntensityAnalyzer()
pdlist = []
if to_df:
for sentence in tqdm(sentences):
ss = sid.polarity_scores(sentence)
pdlist.append([sentence] + [ss['compound']])
reviewDf = pd.DataFrame(pdlist)
reviewDf.columns = columns
return reviewDf
else:
for sentence in tqdm(sentences):
print(sentence)
ss = sid.polarity_scores(sentence)
for k in sorted(ss):
print('{0}: {1}, '.format(k, ss[k]), end='')
print()
def getHistogram(df, measure, title, hue=None, figsize=(5, 3)):
if hue:
sns_plot = sns.kdeplot(data=df, x=measure, hue=hue)
# sns_plot = sns.histplot(data=df, x=measure, hue=hue)
else:
sns_plot = sns.histplot(data=df, x=measure)
# sns_plot.set_title(title)
sns_plot.set_xlabel("Value")
sns_plot.set_ylabel("Density")
plt.tight_layout()
sns_plot.figure.savefig("{}.png".format(title))
def calculate_vader_ALE(filename=None):
print("Evaluate ALE")
if filename:
file_path = "./{}.txt".format(filename)
else:
file_path ="./outputext_step1_eps5.txt"
file_path_neg ="../../data/yelp/sentiment.test.0"
file_path_pos ="../../data/yelp/sentiment.test.1"
review_file = open(file_path, "r")
reviews = review_file.readlines()
review_file.close()
reviewDF = evalSentences(reviews, to_df=True, columns=['review','vader'])
# sanity check
assert(reviewDF.shape[0]==1000)
neg_2_pos = (reviewDF[:500]['vader']>=0).sum()
pos_2_neg = (reviewDF[500:]['vader']<=0).sum()
acc = (neg_2_pos+pos_2_neg)/1000
print("accuracy of changed sentences is {}".format(acc))
print("accuracy of pos_to_neg sentences is {}".format(pos_2_neg/500))
print("accuracy of neg_to_pos sentences is {}".format(neg_2_pos/500))
review_file_neg = open(file_path_neg, "r")
review_file_pos = open(file_path_pos, "r")
reviews_neg = review_file_neg.readlines()
reviews_pos = review_file_pos.readlines()
review_file_neg.close()
review_file_pos.close()
reviewDF_neg = evalSentences(reviews_neg, to_df=True, columns=['review','vader'])
reviewDF_pos = evalSentences(reviews_pos, to_df=True, columns=['review','vader'])
# sanity check
assert(reviewDF_neg.shape[0]==500)
assert (reviewDF_pos.shape[0] == 500)
pos_acc = (reviewDF_pos['vader']>=0).sum()
neg_acc = (reviewDF_neg['vader']<=0).sum()
org_acc = (pos_acc+neg_acc)/1000
print("accuracy of original sentences is {}".format(org_acc))
print("accuracy of original positive sentences is {}".format(pos_acc/500))
print("accuracy of original negative sentences is {}".format(neg_acc/500))
return reviewDF, reviewDF_pos, reviewDF_neg
def calculate_style_trans():
print("Evaluate Style Transformer")
file_path ="./style_transformer.txt"
review_file = open(file_path, "r")
reviews_raw = review_file.readlines()
review_file.close()
reviews_pos_to_neg = [] # changed sentence
reviews_neg_to_pos = [] # changed sentence
reviews_pos = [] #original pos
reviews_neg = [] #original neg
pos_example = False
neg_example = False
for sent in reviews_raw:
if sent.startswith("[raw 0.0]"):
reviews_neg.append(sent[11:])
neg_example = True
pos_example = False
elif sent.startswith("[raw 1.0]"):
reviews_pos.append(sent[11:])
neg_example = False
pos_example = True
elif sent.startswith("[rev 0.0]") and pos_example:
reviews_pos_to_neg.append(sent[11:])
pos_example = False
neg_example = False
elif sent.startswith("[rev 1.0]") and neg_example:
reviews_neg_to_pos.append(sent[11:])
pos_example = False
neg_example = False
assert (len(reviews_pos_to_neg) == 500)
assert (len(reviews_neg_to_pos) == 500)
assert (len(reviews_pos) == 500)
assert (len(reviews_neg) == 500)
reviewDF_pos_to_neg = evalSentences(reviews_pos_to_neg, to_df=True, columns=['review','vader'])
reviewDF_neg_to_pos = evalSentences(reviews_neg_to_pos, to_df=True, columns=['review','vader'])
neg_2_pos = (reviewDF_neg_to_pos['vader']>=0).sum()
pos_2_neg = (reviewDF_pos_to_neg['vader']<=0).sum()
acc = (neg_2_pos+pos_2_neg)/1000
print("accuracy of changed sentences is {}".format(acc))
print("accuracy of pos_to_neg sentences is {}".format(pos_2_neg/500))
print("accuracy of neg_to_pos sentences is {}".format(neg_2_pos/500))
reviewDF_neg = evalSentences(reviews_neg, to_df=True, columns=['review','vader'])
reviewDF_pos = evalSentences(reviews_pos, to_df=True, columns=['review','vader'])
# sanity check
assert(reviewDF_neg.shape[0]==500)
assert (reviewDF_pos.shape[0] == 500)
pos_acc = (reviewDF_pos['vader']>=0).sum()
neg_acc = (reviewDF_neg['vader']<=0).sum()
org_acc = (pos_acc+neg_acc)/1000
print("accuracy of original sentences is {}".format(org_acc))
print("accuracy of original positive sentences is {}".format(pos_acc/500))
print("accuracy of original negative sentences is {}".format(neg_acc/500))
return reviewDF_pos_to_neg, reviewDF_neg_to_pos, reviewDF_pos, reviewDF_neg
def graph_ALE(reviewDF_ALE, reviewDF_pos_ALE, reviewDF_neg_ALE, color1, color2):
reviewDF_pos_ALE['label'] = "POS"
reviewDF_neg_ALE['label'] = "NEG"
reviewDF_org = pd.concat((reviewDF_neg_ALE, reviewDF_pos_ALE), 0).reset_index(drop=True)
assert (reviewDF_org['review']==reviewDF_ALE['review']).any() # there are definitely unchanged sentence, otherwise the ordering is wrong
reviewDF_org = reviewDF_org.rename(columns={"vader":"vader_original"})
reviewDF_ALE_all = pd.concat([reviewDF_ALE, reviewDF_org], axis=1, join="inner")
reviewDF_ALE_all = reviewDF_ALE_all.loc[:,~reviewDF_ALE_all.columns.duplicated()]
reviewDF_ALE_all['change in vader'] = reviewDF_ALE_all['vader'] - reviewDF_ALE_all['vader_original']
# getHistogram(reviewDF_ALE_all, 'change in vader', 'ALE change in vader score', hue="label")
pal = dict(POS=color2, NEG=color1)
sns_plot = sns.kdeplot(data=reviewDF_ALE_all, x='change in vader', hue="label", palette=pal)
# sns_plot = sns.kdeplot(data=reviewDF_ALE_all, x='change in vader', color=color1)
return sns_plot
def draw_transition_graph():
palette = sns.color_palette("coolwarm", n_colors=10)
i=0
for filename in ["outputext_step1_eps0.5", "outputext_step1_eps2", "outputext_step1_eps3", "outputext_step1_eps4", "outputext_step1_eps5"]:
color1 = palette[4-i]
color2 = palette[i+5]
i+=1
reviewDF_ALE, reviewDF_pos_ALE, reviewDF_neg_ALE = calculate_vader_ALE(filename)
plot = graph_ALE(reviewDF_ALE, reviewDF_pos_ALE, reviewDF_neg_ALE, color1, color2)
plot.figure.savefig("ALE transition")
def graph_ALE_vader():
reviewDF_ALE, reviewDF_pos_ALE, reviewDF_neg_ALE = calculate_vader_ALE()
reviewDF_pos_ALE['label'] = "POS → NEG"
reviewDF_neg_ALE['label'] = "NEG → POS"
reviewDF_org = | pd.concat((reviewDF_neg_ALE, reviewDF_pos_ALE), 0) | pandas.concat |
import re
import numpy as np
import pandas as pd
from run_gw_ridge import load_genotype_from_bedfile
def add_noise(y, sd_noise):
return y + np.random.normal(scale=sd_noise, size=(y.shape[0]))
def load_indiv(fn):
res = []
with open(fn, 'r') as f:
for i in f:
line = i.strip().split(' ')
res.append(line[0])
return res
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(prog='simulate_phenotypes.py', description='''
Given effect sizes,
Simulate mediators with dense effect size matrix B (normal) and
phenotypes with sparse effect size beta (zero or normal).
Also, we simulate the null by setting effect size of SNPs on y as
b (normal) with heritability = h2 * PVE.
Specify h2, PVE, and number of mediators to simulate.
Set random seed.
For each run, only one B, beta, and b are simulated.
Will simulate for each h2, PVE combination.
''')
parser.add_argument('--geno_bed_pattern', help='''
Genotype file in BED format (plink).
It takes {chr_num} as wildcard.
The script will load one chromosome at a time assuming the genotype file
has 1 .. 22 chromosomes.
''')
parser.add_argument('--output_prefix', help='''
Phenotype in parquet format.
''')
parser.add_argument('--effect_size_prefix', help='''
Prefix of the effect size parquet.
''')
parser.add_argument('--rand_seed', type=int, help='''
The list of random seeds
''')
parser.add_argument('--h2s', default=None, nargs='+', type=float, help='''
The list of h2
''')
parser.add_argument('--pves', default=None, nargs='+', type=float, help='''
The list of PVE
''')
parser.add_argument('--indiv_list', help='''
The list of individuals
''')
parser.add_argument('--param_config', default=None, help='''
Parameters in one config file. Should include h2s, pves
''')
args = parser.parse_args()
import logging, time, sys, os
# configing util
logging.basicConfig(
level = logging.INFO,
stream = sys.stderr,
format = '%(asctime)s %(message)s',
datefmt = '%Y-%m-%d %I:%M:%S %p')
import yaml
np.random.seed(args.rand_seed)
# load parameters
if args.param_config is None:
h2s, pves = args.h2s, args.pves
else:
with open(args.param_config, 'r') as f:
try:
param_dict = yaml.safe_load(f)
except:
raise ValueError('Something wrong in param_config')
h2s = param_dict['h2s'] if args.h2s is None else args.h2s
pves = param_dict['pves'] if args.pves is None else args.pves
logging.info('Loading effect sizes')
# load individual list
indiv_list = load_indiv(args.indiv_list)
# load effect sizes
df_snp = pd.read_parquet(args.effect_size_prefix + '.snp_effect.parquet')
df_mediator = pd.read_parquet(args.effect_size_prefix + '.mediator_effect.parquet')
num_mediators = df_mediator.shape[0]
logging.info('Calculating genetic component of mediators/y_null')
gm = np.zeros((len(indiv_list), num_mediators))
ynullm = np.zeros(len(indiv_list))
for i in range(1, 23):
logging.info(f'-> Working on chromosome{i}')
geno_prefix = args.geno_bed_pattern.format(chr_num=i)
geno_i, _, _, snp_meta = load_genotype_from_bedfile(
f'{geno_prefix}.bed', indiv_list, snplist_to_exclude=set([]),
return_snp=True, standardize=True)
df_snp_i = pd.DataFrame({'snpid': snp_meta[0], 'a0': snp_meta[1], 'a1': snp_meta[2]})
df_snp_i = pd.merge(
df_snp_i, df_snp,
left_on=['snpid', 'a0', 'a1'],
right_on=['snpid', 'ref', 'alt'],
how='left')
df_snp_i.fillna(0, inplace=True)
B_cols = [ f'B_{k}' for k in range(num_mediators) ]
b_col = 'b_y_null'
B_i = df_snp_i[B_cols].values
b_i = df_snp_i[b_col].values
gm += geno_i @ B_i
ynullm += geno_i @ b_i
df_gmed = pd.concat([
pd.DataFrame({'individual': indiv_list}),
pd.DataFrame(gm, columns=[ f'm_{k}' for k in range(num_mediators) ])],
axis=1)
df_gynull = pd.DataFrame({'individual': indiv_list, 'gynull': ynullm})
logging.info('Simulating observed mediators/y_null')
var_gmed = df_gmed.iloc[:, 1:].var(axis=0)
gmed_names = var_gmed.index.tolist()
gmed_values = var_gmed.values.tolist()
df_omed = {'individual': indiv_list}
for h2 in h2s:
for n, v in zip(gmed_names, gmed_values):
error_sd = np.sqrt(v / h2 * (1 - h2))
df_omed[f'{n}_h2_{h2}'] = add_noise(df_gmed[n].values, error_sd)
df_omed = | pd.DataFrame(df_omed) | pandas.DataFrame |
# EPA_SIT.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Loads EPA State Inventory Tool (SIT) data for state specified from external
data directory. Parses EPA SIT data to flowbyactivity format.
"""
import pandas as pd
import os
from flowsa.settings import externaldatapath, log
from flowsa.flowbyfunctions import assign_fips_location_system
from flowsa.location import apply_county_FIPS
def epa_sit_parse(*, source, year, config, **_):
state = config['state']
filepath = f"{externaldatapath}/SIT_data/{state}/{config['file']}"
# dictionary containing Excel sheet-specific information
sheet_dict = config['sheet_dict']
# initialize the dataframe
df0 = pd.DataFrame()
if not os.path.exists(filepath):
raise FileNotFoundError(f'SIT file not found in {filepath}')
# for each sheet in the Excel file containing data...
for sheet, sheet_dict in config.get('sheet_dict').items():
sheetname = sheet_dict.get('sheetname', sheet)
tablename = sheet_dict.get('tablename')
if tablename:
sheetandtable = f'{sheetname}, {tablename}'
else:
sheetandtable = sheetname
tablename = sheet_dict.get('tablename', sheetname)
log.debug(f'Loading data from: {sheetname}...')
# read in data from Excel sheet
df = pd.read_excel(filepath,
sheet_name = sheetname,
header=sheet_dict.get('header', 2),
skiprows=range(sheet_dict.get('skiprowstart', 0),
sheet_dict.get('skiprowend', 0)),
usecols="B:AG",
nrows=sheet_dict.get('nrows'))
df.columns = df.columns.map(str)
df['ActivityProducedBy'] = df.iloc[:,0]
# for each row in the data table...
# ...emissions categories will be renamed with the format
# 'sheet name, emissions category'
# ...emissions subcategories will be renamed with the format
# 'sheet name, emissions category, emissions subcategory'
for ind in df.index:
current_header = df['ActivityProducedBy'][ind].strip()
# for level 1 headers...
if current_header in sheet_dict.get('headers'):
active_header = current_header
if sheet_dict.get('subgroup') == 'activitybyflow':
df.loc[ind, 'FlowName'] = active_header
elif sheet_dict.get('subgroup') == 'flow':
df.loc[ind, 'FlowName'] = 'Total N2O and CH4 Emissions'
df.loc[ind,'ActivityProducedBy'] = (
f'{sheetandtable}, {active_header}')
# for level 2 headers...
elif current_header not in sheet_dict.get('subsubheaders',''):
active_subheader = df['ActivityProducedBy'][ind].strip()
if sheet_dict.get('subgroup') == 'flow':
df.loc[ind, 'FlowName'] = active_subheader
df.loc[ind,'ActivityProducedBy'] = (
f'{sheetandtable}, {active_header}')
elif sheet_dict.get('subgroup') == 'activitybyflow':
df.loc[ind, 'FlowName'] = active_header
df.loc[ind,'ActivityProducedBy'] = (
f'{sheetandtable}, {active_subheader}')
else:
df.loc[ind,'ActivityProducedBy'] = (
f'{sheetandtable}, {active_header}, '
f'{active_subheader}')
# for level 3 headers (only occur in IndirectCO2 and Agriculture tabs)...
else:
subsubheader = df['ActivityProducedBy'][ind].strip()
df.loc[ind,'ActivityProducedBy'] = (
f'{sheetandtable}, {active_header}, '
f'{active_subheader}, {subsubheader}')
# drop all columns except the desired emissions year and the
# emissions activity source
df = df.filter([year, 'ActivityProducedBy', 'FlowName'])
# rename columns
df = df.rename(columns={year: 'FlowAmount'})
# add sheet-specific hardcoded data
if 'subgroup' not in sheet_dict:
df['FlowName'] = sheet_dict.get('flow')
df['Unit'] = sheet_dict.get('unit')
df['Description'] = sheetname
# concatenate dataframe from each sheet with existing master dataframe
df0 = | pd.concat([df0, df]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import openpyxl
import os
import re
import argparse
from datetime import datetime
import json
# In[2]:
def load_json_as_df(json_data):
out_df = pd.DataFrame(list(json_data.items()),
columns=[key_column, english_col])
return out_df
# In[3]:
def read_json(json_file_path):
with open(json_file_path) as f:
data = json.load(f)
return data
# In[4]:
def reformat_json(json_obj):
json_dict = {}
for key, value in json_obj:
json_dict[key] = value
return json_dict
# In[5]:
def set_variables(df_row):
for value in allowed_values:
try:
if pd.notna(df_row[value]):
df_row[english_col] = df_row[english_col].replace('<'+ value + '>', df_row[value])
except:
pass
try:
if pd.notna(df_row['a-tag-replacement']):
start_index = df_row[english_col].find('<a')+2
end_index = df_row[english_col].find('>')
df_row[english_col] = df_row[english_col][:start_index] + df_row['a-tag-replacement'] + df_row[english_col][end_index:]
except Exception as e:
print(e)
return df_row
# In[6]:
def write_df_to_json(df, output_json_path):
jsonFile = df.to_json(orient='values')
json_string = json.loads(jsonFile)
reformatted_json = reformat_json(json_string)
with open(output_json_path, 'w') as f:
f.write(json.dumps(reformatted_json, indent = 4, ensure_ascii=False))
# In[7]:
def get_matched_count(excel_df, merged_df):
count = 0
for key in excel_df[key_column]:
for k_key in merged_df[key_column]:
if key == k_key:
count+=1
break
return count
# In[8]:
def clean_df(df):
df_no_na = df.dropna(subset = [key_column], inplace=False)
df_fill_na = df.fillna(value="")
df_no_duplicates = df_fill_na.drop_duplicates(subset=[key_column], keep='last')
return df_no_duplicates
# In[9]:
def read_excel_as_df(excel_file):
excel = pd.ExcelFile(excel_file)
if len(excel.sheet_names) == 0:
return None
sheet = excel.parse(sheet_name = excel.sheet_names[0], header=1)
return sheet
# In[10]:
def read_excels_as_df(file):
excel = | pd.ExcelFile(file) | pandas.ExcelFile |
#psaw
from psaw import PushshiftAPI
api = PushshiftAPI()
import datetime as dt
start_epoch=int(dt.datetime(2017, 1, 1).timestamp())
end_epoch=int(dt.datetime(2020, 1, 1).timestamp())
headlines_data = list(api.search_submissions(after=start_epoch,
before=end_epoch,
subreddit='usanews',
filter=[ 'created','title']))
headlines_data = pd.DataFrame([thing.d_ for thing in headlines_data])
#change structure of date
def get_date(created):
return dt.datetime.fromtimestamp(created)
timestamp = headlines_data["created"].apply(get_date)
headlines_data= headlines_data.assign(timestamp = timestamp)
#drop unix timestamp dates
headlines_data.drop(['created','created_utc'],axis=1,inplace=True)
# extract only date part from timestamp
for row in range(len(headlines_data)):
headlines_data['timestamp'].iloc[row]=headlines_data['timestamp'].iloc[row].date()
x = headlines_data.groupby(by='timestamp')
count = x.count()
count.sort_values(ascending=False,by='title')
# get only those rows where total news>25. we ill be testing on more than top 25
y = count[x.count()>=25]
y =y.dropna()
y = y.reset_index()
#getting titles and no of news of one date together
news =pd.merge(headlines_data,y,on='timestamp')
#logic for making final dictionary of date with news as columns(key-> date,val->news titles) & exporting each file to csv format
i=1
for date in headlines_data['timestamp'].unique():
y = headlines_data[headlines_data['timestamp']==date]
key_value = {}
list_key = []
list_val = []
list_val2=[]
list_key.append(str(date))
list_key = str(list_key)
for index, row in y.iterrows():
list_val.append(row['title'])
lenList = len(list_val)
for elements in range(0,lenList) :
list_val2.append(list_val[elements])
key_value[list_key] = list_val2
data = pd.DataFrame(key_value)
data=data.transpose()
#file_name_str = str(data.columns[0]) i
i = str(i)
file_name = i +".csv"
data.to_csv(file_name)
i=int(i)+1
#csv files exported
#---------------------------------------------------------------------
# combining all csv files in the directory
import glob
import pandas as pd
extension = 'csv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
#combine all files in the list
combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames ])
#export to csv
combined_csv.to_csv( "combined.csv", index=False, encoding='utf-8-sig')
#make changes to combined_csv (insert column names at top row and make it as combinednew.csv)
#--------------------------------------------------------------------
livenews= | pd.read_csv('combinednew.csv',error_bad_lines=False) | pandas.read_csv |
"""Module defining the class responsible for implementing the testing framework
for the PPE matching problem.
Copyright 2021 <NAME>, <NAME>, <NAME>, <NAME>
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
import pandas as pd
import numpy as np
import datetime
import os
from . import strategies
import logging
logger = logging.getLogger(__name__)
stream_hdlr = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
stream_hdlr.setFormatter(formatter)
logger.addHandler(stream_hdlr)
logger.setLevel(logging.INFO)
class TestingFramework:
""" Class to run the testing procedure on a given data set
"""
def __init__(self,
donor_path= 'data/anon_donors.csv',
recipient_path= 'data/anon_recipients.csv',
distance_matrix_path= "data/anon_distance_matrix.csv",
strategy=strategies.proximity_match_strategy,
interval=7, max_donation_qty=1000,
writeFiles=False, output_directory = 'output/'):
"""Initialize the framework.
:param donor_path: the file name (csv) of the table D of donor requests, defaults to 'data/anon_donors.csv'
:type donor_path: str, optional
:param recipient_path: the file name (csv) of the table R of recipient requests, defaults to 'data/anon_recipients.csv'
:type recipient_path: str, optional
:param distance_matrix_path: the file name (csv) of the distance matrix M, defaults to "data/anon_distance_matrix.csv"
:type distance_matrix_path: str, optional
:param strategy: a function that solves the matching problem given the current donor and recipient requests, defaults to strategies.proximity_match_strategy
:type strategy: a function with 4 inputs: current date, D^t, R^t, M, optional
:param interval: days between subsequent solutions of the PPE matching problem, defaults to 7
:type interval: int, optional
:param max_donation_qty: donation requests with more than this number of units will be considered erroneous and removed, defaults to 1000
:type max_donation_qty: int, optional
:param writeFiles: whether to write the files of each execution, defaults to False
:type writeFiles: bool, optional
:param output_directory: the output directory, defaults to 'output/'
:type output_directory: str, optional
"""
# Data
dirname = os.path.dirname(__file__)
self.all_donors = pd.read_csv(os.path.join(dirname, donor_path),parse_dates=['date'],index_col=0)
self.all_recipients = pd.read_csv(os.path.join(dirname, recipient_path),parse_dates=['date'],index_col=0)
self.distance_mat = pd.read_csv(os.path.join(dirname, distance_matrix_path))
# Initialize dataframes
self.all_granular_decisions = | pd.DataFrame(columns=['don_id', 'rec_id', 'ppe','date', 'qty', 'distance', 'holding_time']) | pandas.DataFrame |
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_no_items_raises(self):
with pytest.raises(ValueError, match="No objects to concatenate"):
concat([])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat({"a": None, "b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
expected = concat({"b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_order(self):
# GH 17344
dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)]
result = concat(dfs, sort=True).columns
expected = dfs[0].columns
tm.assert_index_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
a = Series(pd.array([1, 2], dtype="Int64"))
b = Series(to_decimal([1, 2]))
result = concat([a, b], ignore_index=True)
expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_ordered_dict(self):
# GH 21510
expected = concat(
[Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
result = concat({"First": Series(range(3)), "Another": Series(range(4))})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("pdt", [Series, DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
dims = pdt(dtype=object).ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize("pdt", [create_series_with_explicit_dtype, DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = concat(dfs)
assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
# GH 18178 regression test
df1 = DataFrame({"foo": [1]})
df2 = DataFrame({"foo": []})
expected = DataFrame({"foo": [1.0]})
result = | concat([df1, df2]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
These test the private routines in types/cast.py
"""
import pytest
from datetime import datetime, timedelta, date
import numpy as np
import pandas as pd
from pandas import (Timedelta, Timestamp, DatetimeIndex,
DataFrame, NaT, Period, Series)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
maybe_convert_objects,
cast_scalar_to_array,
infer_dtype_from_scalar,
infer_dtype_from_array,
maybe_convert_string_to_object,
maybe_convert_scalar,
find_common_type)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
PeriodDtype)
from pandas.core.dtypes.common import (
is_dtype_equal)
from pandas.util import testing as tm
class TestMaybeDowncast(object):
def test_downcast_conv(self):
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
assert (np.array_equal(result, arr))
arr = np.array([8., 8., 8., 8., 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
arr = np.array([8., 8., 8., 8., 9.0000000000005])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
# GH16875 coercing of bools
ser = Series([True, True, False])
result = maybe_downcast_to_dtype(ser, np.dtype(np.float64))
expected = ser
tm.assert_series_equal(result, expected)
# conversions
expected = np.array([1, 2])
for dtype in [np.float64, object, np.int64]:
arr = np.array([1.0, 2.0], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected, check_dtype=False)
for dtype in [np.float64, object]:
expected = np.array([1.0, 2.0, np.nan], dtype=dtype)
arr = np.array([1.0, 2.0, np.nan], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected)
# empties
for dtype in [np.int32, np.float64, np.float32, np.bool_,
np.int64, object]:
arr = np.array([], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'int64')
tm.assert_almost_equal(result, np.array([], dtype=np.int64))
assert result.dtype == np.int64
def test_datetimelikes_nan(self):
arr = np.array([1, 2, np.nan])
exp = np.array([1, 2, np.datetime64('NaT')], dtype='datetime64[ns]')
res = maybe_downcast_to_dtype(arr, 'datetime64[ns]')
tm.assert_numpy_array_equal(res, exp)
exp = np.array([1, 2, np.timedelta64('NaT')], dtype='timedelta64[ns]')
res = maybe_downcast_to_dtype(arr, 'timedelta64[ns]')
tm.assert_numpy_array_equal(res, exp)
def test_datetime_with_timezone(self):
# GH 15426
ts = Timestamp("2016-01-01 12:00:00", tz='US/Pacific')
exp = DatetimeIndex([ts, ts])
res = maybe_downcast_to_dtype(exp, exp.dtype)
tm.assert_index_equal(res, exp)
res = maybe_downcast_to_dtype(exp.asi8, exp.dtype)
tm.assert_index_equal(res, exp)
class TestInferDtype(object):
def testinfer_dtype_from_scalar(self):
# Test that infer_dtype_from_scalar is returning correct dtype for int
# and float.
for dtypec in [np.uint8, np.int8, np.uint16, np.int16, np.uint32,
np.int32, np.uint64, np.int64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == type(data)
data = 12
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.int64
for dtypec in [np.float16, np.float32, np.float64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == dtypec
data = np.float(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.float64
for data in [True, False]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.bool_
for data in [np.complex64(1), np.complex128(1)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.complex_
for data in [np.datetime64(1, 'ns'), Timestamp(1),
datetime(2000, 1, 1, 0, 0)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == 'M8[ns]'
for data in [np.timedelta64(1, 'ns'), Timedelta(1),
timedelta(1)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == 'm8[ns]'
for tz in ['UTC', 'US/Eastern', 'Asia/Tokyo']:
dt = Timestamp(1, tz=tz)
dtype, val = infer_dtype_from_scalar(dt, pandas_dtype=True)
assert dtype == 'datetime64[ns, {0}]'.format(tz)
assert val == dt.value
dtype, val = infer_dtype_from_scalar(dt)
assert dtype == np.object_
assert val == dt
for freq in ['M', 'D']:
p = Period('2011-01-01', freq=freq)
dtype, val = infer_dtype_from_scalar(p, pandas_dtype=True)
assert dtype == 'period[{0}]'.format(freq)
assert val == p.ordinal
dtype, val = infer_dtype_from_scalar(p)
dtype == np.object_
assert val == p
# misc
for data in [date(2000, 1, 1),
Timestamp(1, tz='US/Eastern'), 'foo']:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.object_
def testinfer_dtype_from_scalar_errors(self):
with pytest.raises(ValueError):
infer_dtype_from_scalar(np.array([1]))
@pytest.mark.parametrize(
"arr, expected, pandas_dtype",
[('foo', np.object_, False),
(b'foo', np.object_, False),
(1, np.int_, False),
(1.5, np.float_, False),
([1], np.int_, False),
(np.array([1], dtype=np.int64), np.int64, False),
([np.nan, 1, ''], np.object_, False),
(np.array([[1.0, 2.0]]), np.float_, False),
(pd.Categorical(list('aabc')), np.object_, False),
(pd.Categorical([1, 2, 3]), np.int64, False),
(pd.Categorical(list('aabc')), 'category', True),
(pd.Categorical([1, 2, 3]), 'category', True),
(Timestamp('20160101'), np.object_, False),
(np.datetime64('2016-01-01'), np.dtype('<M8[D]'), False),
(pd.date_range('20160101', periods=3),
np.dtype('<M8[ns]'), False),
(pd.date_range('20160101', periods=3, tz='US/Eastern'),
'datetime64[ns, US/Eastern]', True),
(pd.Series([1., 2, 3]), np.float64, False),
(pd.Series(list('abc')), np.object_, False),
(pd.Series(pd.date_range('20160101', periods=3, tz='US/Eastern')),
'datetime64[ns, US/Eastern]', True)])
def test_infer_dtype_from_array(self, arr, expected, pandas_dtype):
dtype, _ = infer_dtype_from_array(arr, pandas_dtype=pandas_dtype)
assert is_dtype_equal(dtype, expected)
def test_cast_scalar_to_array(self):
arr = cast_scalar_to_array((3, 2), 1, dtype=np.int64)
exp = np.ones((3, 2), dtype=np.int64)
tm.assert_numpy_array_equal(arr, exp)
arr = cast_scalar_to_array((3, 2), 1.1)
exp = np.empty((3, 2), dtype=np.float64)
exp.fill(1.1)
tm.assert_numpy_array_equal(arr, exp)
arr = cast_scalar_to_array((2, 3), Timestamp('2011-01-01'))
exp = np.empty((2, 3), dtype='datetime64[ns]')
exp.fill(np.datetime64('2011-01-01'))
tm.assert_numpy_array_equal(arr, exp)
# pandas dtype is stored as object dtype
obj = Timestamp('2011-01-01', tz='US/Eastern')
arr = cast_scalar_to_array((2, 3), obj)
exp = np.empty((2, 3), dtype=np.object)
exp.fill(obj)
tm.assert_numpy_array_equal(arr, exp)
obj = Period('2011-01-01', freq='D')
arr = cast_scalar_to_array((2, 3), obj)
exp = np.empty((2, 3), dtype=np.object)
exp.fill(obj)
| tm.assert_numpy_array_equal(arr, exp) | pandas.util.testing.assert_numpy_array_equal |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 10 14:24:56 2021
@author: <NAME>
Script created for determination of optimal power generation mix looking at
interannual power production variability of DK1 and DK2.
- Plots the generation mix as function of time
- Plots the average optimal capacity with standard deviation as function of
technology
Reads data for the period 2015-2019 dowloaded from
data.open-power-system-data.org
Capacity factor is determined using installed capacity per production type
data from www.transparency.entsoe.eu
"""
#%% Import and define
import pypsa
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
def annuity(n,r):
"""Calculate the annuity factor for an asset with lifetime n years and
discount rate of r, e.g. annuity(20,0.05)*20 = 1.6"""
if r > 0:
return r/(1. - 1./(1.+r)**n)
else:
return 1/n
# Create network and snapshot
network = pypsa.Network()
hours_in_2015 = pd.date_range('2015-01-01T00:00Z','2015-12-31T23:00Z', freq='H')
hours_in_2016 = pd.date_range('2016-01-01T00:00Z','2016-12-31T23:00Z', freq='H')
hours_in_2017 = pd.date_range('2017-01-01T00:00Z','2017-12-31T23:00Z', freq='H')
hours_in_2018 = pd.date_range('2018-01-01T00:00Z','2018-12-31T23:00Z', freq='H')
hours_in_2019 = pd.date_range('2019-01-01T00:00Z','2019-12-31T23:00Z', freq='H')
dk1_off_max = [843,843,843,1277,1277]
dk1_ons_max = [2966,2966,2966,3664,3669]
dk1_sol_max = [421,421,421,664,672]
dk2_off_max = [428,428,428,423,423]
dk2_ons_max = [608,608,608,759,757]
dk2_sol_max = [180,180,180,338,342]
hours_in = [hours_in_2015,hours_in_2016,hours_in_2017,hours_in_2018,hours_in_2019]
years = [2015, 2016, 2017, 2018, 2019]
# Load data: Demand and generators for 6 regions
df_elec = | pd.read_csv('data/data/annual_renewable_generation_dk1_dk2.csv', sep=',', index_col=0) | pandas.read_csv |
"""This module provides tests for the array_stats module."""
import pytest
import pandas as pd
from fractalis.analytics.tasks.shared import array_stats
# noinspection PyMissingOrEmptyDocstring,PyMethodMayBeStatic,PyMissingTypeHints
class TestArrayStats:
def test_get_limma_stats_raises_for_invalid_subsets(self):
df = pd.DataFrame([[5, 10, 15, 20]], index=['foo'],
columns=[0, 1, 2, 3])
subsets = [[0, 1]]
with pytest.raises(ValueError) as e:
array_stats.get_limma_stats(df=df, subsets=subsets)
assert 'requires at least two' in e
def test_get_limma_stats_raises_for_invalid_df(self):
df = pd.DataFrame([], index=['foo'], columns=[])
subsets = [[0], [0]]
with pytest.raises(ValueError) as e:
array_stats.get_limma_stats(df=df, subsets=subsets)
assert 'dimension 1x2 or more' in e
def test_get_limma_stats_returns_correct_for_2_groups(self):
df = pd.DataFrame([[5, 10, 15, 20]], index=['foo'],
columns=[0, 1, 2, 3])
subsets = [[0, 1], [2, 3]]
stats = array_stats.get_limma_stats(df=df, subsets=subsets)
assert all(stat in list(stats) for stat in
['feature', 'logFC', 'AveExpr', 't', 'P.Value', 'adj.P.Val',
'B'])
def test_get_limma_stats_returns_correct_for_3_groups(self):
df = pd.DataFrame([[5, 10, 15, 20]], index=['foo'],
columns=[0, 1, 2, 3])
subsets = [[0, 1], [2], [3]]
stats = array_stats.get_limma_stats(df=df, subsets=subsets)
assert all(stat in list(stats) for stat in
['feature', 'AveExpr', 'F', 'P.Value', 'adj.P.Val'])
assert all(stat not in list(stats) for stat in ['logFC', 'B', 't'])
def test_get_limma_stats_returns_correct_for_4_groups(self):
df = pd.DataFrame([[5, 10, 15, 20]], index=['foo'],
columns=[0, 1, 2, 3])
subsets = [[0, 1], [1, 2], [2, 3], [3, 0]]
stats = array_stats.get_limma_stats(df=df, subsets=subsets)
assert all(stat in list(stats) for stat in
['feature', 'AveExpr', 'F', 'P.Value', 'adj.P.Val'])
assert all(stat not in list(stats) for stat in ['logFC', 'B', 't'])
def test_get_deseq2_stats_returns_correct_for_2_groups(self):
df = pd.DataFrame([[500, 1, 1, 500],
[1, 500, 500, 1]],
columns=[0, 1, 2, 3])
subsets = [[0, 1], [2, 3]]
stats = array_stats.get_deseq2_stats(df=df, subsets=subsets)
assert all(stat in list(stats) for stat in
['baseMean', 'log2FoldChange', 'lfcSE',
'stat', 'pvalue', 'padj'])
def test_deseq2_requires_exactly_2_subsets(self):
with pytest.raises(ValueError):
array_stats.get_deseq2_stats(df= | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import requests,re,json,pickle,os
from lxml import etree
from pymongo import MongoClient
from bs4 import BeautifulSoup
from CrawlFunctions import getSoup,getEtreeHtml,getSoup
from multiprocessing.dummy import Lock,Pool
import numpy as np
import pandas as pd
from datetime import datetime
client=MongoClient()
db=client.foodstore
clct = client.foodstore.bjstore
shop_result_file = '/Users/xuegeng/Downloads/result-0.json'
with open(shop_result_file,'r') as f:
origin_is_shop_waimai = json.load(f)
#for k,v in origin_is_shop_waimai.iteritems():
# if not v:
# print k
def to_df():
global clct,mongo_lock
cur = clct.find({})
return pd.DataFrame(list(cur))
df = to_df()
df.shop_name = df.shop_name.apply(lambda x:x.replace(u"添加分店","").replace("\n",""))
del df['_id']
df = df.drop_duplicates('shop_id')
#df.to_excel('/Users/xuegeng/Desktop/toxzm.xlsx')
df2 = pd.DataFrame(origin_is_shop_waimai.items(),columns=['shop_id','has_waimai'])
df2.shop_id = df2.shop_id.apply(lambda x:x.split('/')[-1])
df3 = | pd.merge(df,df2,on='shop_id') | pandas.merge |
# Multiscale sampling (MSS) with VASP and LAMMPS
# <NAME>
# Getman Research Group
# Mar 10, 2019
import sys, os
import numpy as np
import pandas as pd
import itertools
from datetime import datetime
from readInput import ReadInput
class VaspToLmps(ReadInput):
"""
read POSCAR/CONTCAR and extract information
"""
def __init__(self, curr_dir):
poscar_file = os.path.join(curr_dir, '2-lammps/0-add-sol/POSCAR_0.POSCAR') # solvated poscar file
mss_input = os.path.join(curr_dir, 'input/master_input.txt')
ddec_file = os.path.join(curr_dir, '1-vasp-vac/singpt_for_charge/ddec/DDEC6_even_tempered_net_atomic_charges.xyz')
super().__init__(poscar_file, mss_input) # solvated poscar file
self.curr_dir = curr_dir
self.coords = pd.DataFrame() # new reordered coords, set in reorderCoords()
### process from coords
self.bonds = [] #set in getBondedList(self)
self.angles= [] #set in getBondedList(self)
self.dihedrals = [] #set in getBondedList(self)
self.ddec = [] # set in self.readDDEC()
self.readDDEC(ddec_file)
self.getBondedList()
def readDDEC(self, ddec_file):
with open(ddec_file) as f:
for i, line in enumerate(f):
if i == 0:
num = int(line.strip().split()[0])
elif 2 <= i < num+2:
charge = float(line.strip().split()[-1])
self.ddec.append(charge)
def reorderCoords(self):
temp_vac_coord = [] #store for temporary vac atom
temp_sol_coord = [] #store for temporary solvent
cum_vac_atom = np.cumsum(list(self.atom['num'][0:self.vac_type])) ## [27 30 33 38 40]
cum_atom = np.cumsum(self.elem['num']) ## [ 27 45 90 211]
count = 0 # match pt-ads vac atom type with info in master_input.txt file
for i in range(len(self.old_coords)):
x = float(self.old_coords[i][0]) # use float in case fracToCartesian() is not called
y = float(self.old_coords[i][1])
z = float(self.old_coords[i][2])
mol_type = int(self.old_coords[i][-2][1:]) # split #1
group_type = int(self.old_coords[i][-1])
if group_type == -1: # vac atom
count += 1
# print('count: ', count)
# print(np.where(count <= cum_vac_atom))
idx = min(np.where(count <= cum_vac_atom)[0])
temp_vac_coord.append([x, y, z, mol_type, group_type, list(self.atom['type'])[idx]])
else: # sol atom
start_type = self.vac_type # index from 0. index solvent atom type, need initialize here
for j in range(group_type): # if multi solvent, add index after last sol
start_type += self.solvent.loc[j,'obj'].total_atom_type
idx = min(np.where((i+1) <= cum_atom)[0])
if self.solvent.loc[group_type, 'name'] == 'water':
if self.elem['elem'][idx] == 'O':
sol_atom_type = start_type + 1
else:
sol_atom_type = start_type + 2
############ MODIFY BELOW FOR NON CxHyOz solvent ############
else: #non-water solvent
if self.elem['elem'][idx] == 'C':
sol_atom_type = start_type + 1
elif self.elem['elem'][idx] == 'O':
sol_atom_type = start_type + 2
elif self.elem['elem'][idx] == 'H':
sol_atom_type = start_type + 3 # decide later for H_C or H_O
############ try not add atom name and elem in the coords
temp_sol_coord.append([x, y, z, mol_type, group_type, sol_atom_type]) #['x','y','z','mol','grp','type']
# print("self.elem\n")
# print(self.elem) #{'elem': ['Pt', 'C', 'O', 'H'], 'num': [27, 18, 45, 121]}
# print("\n")
# for elem in self.elem['elem']:
# print(elem)
df_vac = pd.DataFrame(temp_vac_coord)
df_sol = pd.DataFrame(temp_sol_coord)
# sort by group, then molecule, than element
df_sol.sort_values(by=[4, 3, 5], inplace=True) #columns = ['x','y','z','mol','grp','type']
self.coords = | pd.concat([df_vac, df_sol]) | pandas.concat |
#!/usr/bin/env python
import datetime
import numpy as np
import pandas as pd
from dateutil import parser
from linear_segment import SegmentedLinearRegressor
def get_utc_days(format='%Y-%m-%d'):
utc = datetime.datetime.utcnow()
yesterday = utc.date() - datetime.timedelta(1)
return utc.strftime(format), yesterday.strftime(format)
def index2int(index):
"""Helper function to convert timeindexes to ints"""
return index.view(np.int64).reshape(-1,1)
def int2dt(array):
"""Helper function to convert int64 to timeseries[ns]"""
return pd.to_datetime(pd.Series(array.squeeze(), name='timestamp'),utc=True)
def pandas_s3_glob(globstring):
'''Imports and reads all csv files in s3 folder'''
from s3fs import S3FileSystem
s3 = S3FileSystem(anon=False)
file_list = s3.glob(globstring)
if len(file_list) == 0:
print("No files to consolidate")
return
return pd.concat([pd.read_csv('s3://'+f) for f in file_list],axis=0, ignore_index=True)
def consolidate_s3_csv(s3_folder):
df = pandas_s3_glob('s3://'+s3_folder+'/*.csv')
if df is None:
return
subfolder = s3_folder.split('/')[-1] # Get subfolder e.g. date here
root = '/'.join(s3_folder.split('/')[:-1]) # Rejoin all other tokens to get root folder
df.to_csv('s3://'+root+'/'+subfolder+'.csv', index=False) # Write consolidated csv back out
def list_s3_subfolders(s3_folder='whisky-pricing'):
from s3fs import S3FileSystem
s3 = S3FileSystem(anon=False)
return s3.glob(f's3://{s3_folder}/*[0-9]')
def remove_old_s3_date_subfolders(bucket='whisky-pricing', days_old=2):
import datetime
from s3fs import S3FileSystem
s3 = S3FileSystem(anon=False)
oldest_date = datetime.datetime.utcnow() - datetime.timedelta(days_old)
subfolders = [x.split('/')[-1] for x in list_s3_subfolders(bucket)]
for subf in subfolders:
if datetime.datetime.strptime(subf, '%Y-%m-%d') < oldest_date:
print(f'Removing subfolder: s3://{bucket}/{subf}/...')
s3.rm(f's3://{bucket}/{subf}', recursive=True)
def regroup_df_hourly(df, time_column='time'):
# Get list of unique timestamps
unique_time = df.time.unique()
# Ensure we have UTC aware timestamps
tzinfos = {'BST': 3600, 'GMT': 0}
fix_time = pd.Series([pd.Timestamp(parser.parse(x, tzinfos=tzinfos)).tz_convert('UTC') for x in unique_time],dtype='datetime64[ns, UTC]', name='fix_time')
fix_time = fix_time.dt.floor('h') # Get hour only
unique_time = pd.concat([pd.Series(unique_time, name=time_column), fix_time], axis=1)
unique_time = unique_time.groupby('fix_time').first().reset_index()
df = df.merge(unique_time, left_on=time_column, right_on=time_column, how='inner') # Drop all timestamps not on the hour
df.drop(time_column, axis=1, inplace=True)
df.rename({'fix_time': time_column}, axis=1, inplace=True)
return df
def get_hourly():
# Load up full data
print("Reading existing hourly spread data...")
df = pd.read_csv('s3://whisky-pricing/spreads.csv', parse_dates=['time'])
# Get latest imported date
print("Importing non consolidated dates...")
# Save last N months only
now = pd.to_datetime('now').tz_localize('UTC')
lastN = now - pd.DateOffset(months=12)
df = df.query("time >= @lastN")
# Get missing days
missing_days = []
delta = now - lastN
unique_days = df.time.dt.date.unique()
for i in range(delta.days):
day = (lastN + pd.Timedelta(i+1, unit='D')).date()
if day in unique_days:
print(f'{day} already imported.')
else:
try:
df_day = | pd.read_csv(f's3://whisky-pricing/{day}.csv', parse_dates=False) | pandas.read_csv |
import numpy as np
import pandas as pd
import bisect
import tqdm
import utils.utils as utils
import _settings
import ipdb
import torch
_LocalConformal = "LocalConformal"
_LocalConformalMAD = "LocalConformalMAD"
class NaiveKernel():
def __init__(self, type='Gaussian'):
self._device = utils.gpuid_to_device(-1)
self.type = type
def to(self, device):
self._device = device
def K(self, x1, x2=None):
if self.type == 'Gaussian':
if x2 is None: return 1.
with torch.no_grad():
diff = torch.tensor(x1 - x2, device=self._device, dtype=torch.float)
kij = torch.exp(-torch.pow(diff, 2).sum(-1))
return float(kij.detach())
raise NotImplementedError()
def Ki(self, xi, Xs, speedup_info=None):
if self.type == 'Gaussian':
with torch.no_grad():
if speedup_info is None:
speedup_info = torch.tensor(Xs, device=self._device, dtype=torch.float, requires_grad=False)
Xs = speedup_info
if Xs.device != self._device:
Xs = Xs.to(self._device)
xi = torch.tensor(xi, device=self._device, dtype=torch.float)
diff_i = xi - Xs
# kij = torch.exp(-torch.pow(self.A(diff_i), 2).sum(-1))
kij = torch.exp(-torch.pow(diff_i, 2).sum(-1))
return kij, Xs
def weighted_quantile_faster(arr, weights, q):
idx = np.argsort(arr)
arr, weights = arr[idx], weights[idx]
return weighted_quantile_faster2(arr, weights, q)
def weighted_quantile_faster2(arr, weights, q):
qs = np.cumsum(weights)
idx = bisect.bisect_left(qs, q, lo=0, hi=len(qs)-1)
return arr[idx]
class PIConstructor:
def __init__(self):
pass
def PI(self, x, alpha):
raise NotImplementedError()
@classmethod
def eval(cls, x, y, PI, alpha=0.05, quiet=True, PI_kwargs={}, PI_list_kwargs={}):
if not quiet:
iter_ = tqdm.tqdm(enumerate(x), desc='eval_conformal', total=len(x))
else:
iter_ = enumerate(x)
res = []
for i, xi in iter_:
PI_params = utils.merge_dict_inline({k:v[i] for k,v in PI_list_kwargs.items()}, PI_kwargs)
lb, ub, yhat, extra = PI(xi, alpha=alpha, **PI_params)
res.append({'lo': lb, 'hi': ub, 'y': y[i], 'yhat': yhat, 'extra': extra, 'index': i})
if xi.shape[0] == 1: res[-1].update({"x": xi[0]})
return | pd.DataFrame(res) | pandas.DataFrame |
import argparse
import json
import pandas as pd
import numpy as np
from numpy.random.mtrand import RandomState
from shapely.geometry import Polygon
from shapely.ops import cascaded_union
from sklearn.model_selection import KFold
box_lon1, box_lat1 = 4.385218620300293, 51.85078428900754
box_lon2, box_lat2 = 4.404788017272949, 51.86644856213264
border_box_lon1, border_box_lat1 = 4.385776519775391, 51.850837307588726
border_box_lon2, border_box_lat2 = 4.407234191894531, 51.864938032294326
def iou(poly_true: Polygon, poly_pred: Polygon):
int_area = poly_pred.intersection(poly_true).area
polygons = [poly_pred, poly_true]
u = cascaded_union(polygons)
return float(int_area / u.area)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--csv', default="coordinates.csv")
args = parser.parse_args()
df = | pd.read_csv(args.csv) | pandas.read_csv |
import pandas as pd
import requests
import lxml.html
from datetime import datetime
from urllib.parse import urljoin
pd.set_option('display.max_colwidth', 50)
pd.set_option("display.expand_frame_repr", False)
#Urls
phishing = "https://www.scmagazine.com/topic/phishing"
patch_management = "https://www.scmagazine.com/topic/patch-management/"
ransomware = "https://www.scmagazine.com/ransomware"
software_dev = "https://sdtimes.com/"
cyware = "https://cyware.com/cyber-security-news-articles"
threatpost = "https://threatpost.com/"
hacker_news = "https://thehackernews.com/"
exfiltration = "https://www.bleepingcomputer.com/tag/data-exfiltration/"
podcast = "https://www.itworldcanada.com/podcasts#cyber-security-today"
fbi_flash = "https://www.fbi.gov/investigate/cyber/news"
urls = [
## Phishing
phishing,
## Patch Management
patch_management,
## Ransomware
ransomware, #! Weird site is broken for now
## SDLC
software_dev,
## General CyberSec News
cyware,
threatpost,
hacker_news,
fbi_flash,
## Exfiltration
exfiltration,
## CyberSec Podcast
podcast
]
#Converts each link in list to each own with different text
def add_htmllink(x):
#htmllink = [f"<a href={htmllink} target='_blank'>Link</a>" for htmllink in x]
htmllink = [f"<a href={htmllink} target='_blank'><button>Link</button></a>" for htmllink in x]
return htmllink
#Style Source Title
def style_source(y):
source_title = f"<h1 style='color:add8e6;font-size:20px;text-align:center;text-decoration: underline;'>{y}</h1>"
return source_title
## General CyberSec News
#To parse multiple sites but to note !# each site might have their own special way
for p in urls:
page = requests.get(p, headers={'User-Agent': 'Mozilla/5.0'})
doc = lxml.html.fromstring(page.text) #Change page.content to page.text fixed the encoding problem
#If checks the variable p for which website to scrape
#Phishing
if p == phishing:
p_title = doc.xpath('//h5[@class="ContentTeaser_title__3Gv3Q"]/text()')[:3] #limits to 3 posts
p_descrip = doc.xpath('//div/div[@class="ContentTeaser_summary__34nbw"]/p/text()')[:3]
p_links = doc.xpath('//a[@class="ContentTeaser_titleLink__30KhQ"]/@href')[:3]
p_titlesplit = [p_title.strip() for p_title in p_title] #if brackets added remove split
p_linkhtml = ['https://www.scmagazine.com' + p_links if p_links.startswith('/') else p_links for p_links in p_links]
ph = pd.DataFrame({
"Title": p_titlesplit,
"Description": p_descrip,
"Link": add_htmllink(p_linkhtml) #will probably need a for loop for each link
})
phrow = pd.DataFrame({
"Title": " ",
"Description": style_source("Phishing - SCMagazine"),
"Link": " "
}, index = [0])
ph = pd.concat([phrow, ph]).reset_index(drop = True)
#Patch Management
elif p == patch_management:
pm_title = doc.xpath('//h5[@class="ContentTeaser_title__3Gv3Q"]/text()')[:3]
pm_descrip = doc.xpath('//div/div[@class="ContentTeaser_summary__34nbw"]/p/text()')[:3]
pm_links = doc.xpath('//a[@class="ContentTeaser_titleLink__30KhQ"]/@href')[:3]
pm_titlesplit = [pm_title.strip() for pm_title in pm_title] #if brackets added remove split
pm_linkhtml = ['https://www.scmagazine.com' + pm_links if pm_links.startswith('/') else pm_links for pm_links in pm_links]
pm = pd.DataFrame({
"Title": pm_titlesplit,
"Description": pm_descrip,
"Link": add_htmllink(pm_linkhtml)
})
pmrow = pd.DataFrame({
"Title": " ",
"Description": style_source("Patch Management - SCMagazine"),
"Link": " "
}, index = [0])
pm = pd.concat([pmrow, pm]).reset_index(drop = True)
#Ransomware
elif p == ransomware:
rm_title = doc.xpath('//h5[@class="ContentTeaser_title__3Gv3Q"]/text()')[:3]
rm_descrip = doc.xpath('//div/div[@class="ContentTeaser_summary__34nbw"]/p/text()')[:3]
rm_links = doc.xpath('//a[@class="ContentTeaser_titleLink__30KhQ"]/@href')[:3]
rm_titlesplit = [rm_title.strip() for rm_title in rm_title] #if brackets added remove split
rm_linkhtml = ['https://www.scmagazine.com' + rm_links if rm_links.startswith('/') else rm_links for rm_links in rm_links]
rm = pd.DataFrame({
"Title": rm_titlesplit,
"Description": rm_descrip,
"Link": add_htmllink(rm_linkhtml)
})
rmrow = pd.DataFrame({
"Title": " ",
"Description": style_source("Ransomeware - SCMagazine"),
"Link": " "
}, index = [0])
rm = pd.concat([rmrow, rm]).reset_index(drop = True)
#SDLC
elif p == software_dev:
sdlc_title = doc.xpath('//div[@class="col-lg-8 col-md-7 col-sm-9 col-xs-12"]/h4//text()')[:3]
sdlc_descrip = doc.xpath('//div[@class="col-lg-8 col-md-7 col-sm-9 col-xs-12"]/p//text()')[:3]
sdlc_links = doc.xpath('//div[@class="col-lg-8 col-md-7 col-sm-9 col-xs-12"]/h4//@href')[:3] #! Broken
sdlc_titlesplit = [sdlc_title.strip() for sdlc_title in sdlc_title] #if brackets added remove split
sdlc = pd.DataFrame({
"Title": sdlc_titlesplit,
"Description": sdlc_descrip,
"Link": add_htmllink(sdlc_links)
})
sdlc_row = pd.DataFrame({
"Title": " ",
"Description": style_source("SDLC"),
"Link": " "
}, index = [0])
sdlc = pd.concat([sdlc_row, sdlc]).reset_index(drop = True)
#CyberSec News
elif p == cyware:
title = doc.xpath('//h1[@class="cy-card__title m-0 cursor-pointer pb-3"]/text()')[:5]
descrip = doc.xpath('//div[@class="cy-card__description"]/text()')[:5]
links = doc.xpath('//div[@class="cy-panel__body"]/a[not(contains(@href,"alerts"))]/@href')[:5]
#Cleaning up the data
titlestrip = [title.lstrip().rstrip() for title in title] #removes the new line and spaces from left and right
descripstrip = [descrip.lstrip().rstrip() for descrip in descrip] #removes the new line and spaces from left and right
linkhtml = ['https://cyware.com' + links if links.startswith('/') else links for links in links] #If it starts with a / add https://cyware.com/, broken links
#Create a dataframe for the data
gc = pd.DataFrame({
"Title": titlestrip,
"Description": descripstrip,
"Link": add_htmllink(linkhtml)
})
Siterow = pd.DataFrame({
"Title": " ",
"Description": style_source("Cyware"),
"Link": " "
}, index = [0])
gc = | pd.concat([Siterow, gc]) | pandas.concat |
from ehr_functions.features import occurrence
import pandas as pd
def test_nth_occurrence():
df = pd.DataFrame({
'PatientID': [1, 1, 2, 2],
'EncounterDate': ['01/01/2020', '01/05/2020', '01/01/2020', '01/02/2020'],
'Diagnosis1': ['A', 'F', 'D', 'C'],
'Diagnosis2': [None, 'B', 'B', 'A'],
'Diagnosis3': [None, None, 'C', 'A'],
})
df['EncounterDate'] = | pd.to_datetime(df['EncounterDate']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import json
import os
import tarfile
import tempfile
from io import BytesIO
import numpy as np
import pandas as pd
import skimage.io as io
import warnings
from skimage.util import img_as_uint
from skimage.util import img_as_ubyte
import skimage.measure as measure
from pcnaDeep.tracker import track_mask
def relabel_trackID(label_table):
"""Relabel trackID in tracking table, starting from 1.
Args:
label_table (pandas.DataFrame): tracked object table.
Returns:
pandas.DataFrame: tracked object table with relabeled trackID.
"""
dic = {}
ori = list(np.unique(label_table['trackId']))
for i in range(1, len(ori) + 1):
dic[ori[i - 1]] = i
dic[0] = 0
for i in range(label_table.shape[0]):
label_table.loc[i, 'trackId'] = dic[label_table['trackId'][i]]
label_table.loc[i, 'parentTrackId'] = dic[label_table['parentTrackId'][i]]
label_table.loc[i, 'lineageId'] = dic[label_table['lineageId'][i]]
return label_table
def label_by_track(mask, label_table):
"""Label objects in mask with track ID
Args:
mask (numpy.ndarray): uint8 np array, output from main model.
label_table (pandas.DataFrame): track table.
Returns:
numpy.ndarray: uint8/16 dtype based on track count.
"""
assert mask.shape[0] == np.max(label_table['frame'] + 1)
if np.max(label_table['trackId']) * 2 > 254:
mask = mask.astype('uint16')
for i in np.unique(label_table['frame']):
sub_table = label_table[label_table['frame'] == i]
sl = mask[i, :, :].copy()
lbs = np.unique(sl).tolist()
if lbs[-1] + 1 != len(lbs):
raise ValueError('Mask is not continuously or wrongly labeled.')
ori_labels = set(lbs) - {0}
untracked = list(ori_labels - set(list(sub_table['continuous_label'])))
# remove untracked
for j in untracked:
sl[mask[i, :, :] == j] = 0
# update tracked
for j in sub_table.index:
sl[mask[i, :, :] == sub_table.loc[j, 'continuous_label']] = sub_table.loc[j, 'trackId']
mask[i, :, :] = sl.copy()
return mask
def get_lineage_dict(label_table):
"""Deprecated
Generate lineage dictionary in Deepcell tracking format.
Args:
label_table (pandas.DataFrame): table processed.
Returns:
dict: lineage dictionary that fits Deepcell.
"""
out = {}
for i in list(np.unique(label_table['trackId'])):
i = int(i)
sub_table = label_table[label_table['trackId'] == i]
out[i] = {'capped': False, 'daughters': [], 'frame_div': None, 'frames': list(sub_table['frame']), 'label': i,
'parent': None}
if list(sub_table['parentTrackId'])[0] != 0:
out[i]['parent'] = list(sub_table['parentTrackId'])[0]
return out
def get_lineage_txt(label_table):
"""Generate txt table in Cell Tracking Challenge (CTC) format.
Args:
label_table (pandas.DataFrame): table processed, should not has gaped tracks.
Returns:
pandas.DataFrame: lineage table in .txt format that fits CTC.
"""
dic = {'id': [], 'appear': [], 'disappear': [], 'parent': []}
for i in np.unique(label_table['trackId']):
sub = label_table[label_table['trackId'] == i]
begin = np.min(sub['frame'])
end = np.max(sub['frame'])
parent = np.unique(sub['parentTrackId'])
dic['id'].append(i)
dic['appear'].append(int(begin))
dic['disappear'].append(int(end))
dic['parent'].append(int(parent))
return pd.DataFrame(dic)
def save_trks(filename, lineages, raw, tracked):
"""Deprecated
Copied from deepcell_tracking.utils, version 0.3.1. Author <NAME>.
Modification: changed trks to trk to fit caliban labeler.
Saves raw, tracked, and lineage data into one trk_file.
Args:
filename (str): full path to the final trk files.
lineages (dict): a list of dictionaries saved as a json.
raw (numpy.ndarray): 4D raw images data. THWC
tracked (numpy.ndarray): 4D annotated image data. THWC
Raises:
ValueError: filename does not end in ".trk".
"""
if not str(filename).lower().endswith('.trk'):
raise ValueError('filename must end with `.trk`. Found %s' % filename)
with tarfile.open(filename, 'w') as trks:
with tempfile.NamedTemporaryFile('w', delete=False) as lineages_file:
json.dump(lineages, lineages_file, indent=4)
lineages_file.flush()
lineages_file.close()
trks.add(lineages_file.name, 'lineage.json')
os.remove(lineages_file.name)
with tempfile.NamedTemporaryFile(delete=False) as raw_file:
np.save(raw_file, raw)
raw_file.flush()
raw_file.close()
trks.add(raw_file.name, 'raw.npy')
os.remove(raw_file.name)
with tempfile.NamedTemporaryFile(delete=False) as tracked_file:
np.save(tracked_file, tracked)
tracked_file.flush()
tracked_file.close()
trks.add(tracked_file.name, 'tracked.npy')
os.remove(tracked_file.name)
def load_trks(filename):
"""Deprecated
Copied from deepcell_tracking.utils, version 0.3.1. Author <NAME>en Lab
Load a trk/trks file.
Args:
filename (str): full path to the file including .trk/.trks.
Returns:
dict: A dictionary with raw, tracked, and lineage data.
"""
with tarfile.open(filename, 'r') as trks:
# numpy can't read these from disk...
array_file = BytesIO()
array_file.write(trks.extractfile('raw.npy').read())
array_file.seek(0)
raw = np.load(array_file)
array_file.close()
array_file = BytesIO()
array_file.write(trks.extractfile('tracked.npy').read())
array_file.seek(0)
tracked = np.load(array_file)
array_file.close()
# trks.extractfile opens a file in bytes mode, json can't use bytes.
_, file_extension = os.path.splitext(filename)
if file_extension == '.trks':
trk_data = trks.getmember('lineages.json')
lineages = json.loads(trks.extractfile(trk_data).read().decode())
# JSON only allows strings as keys, so convert them back to ints
for i, tracks in enumerate(lineages):
lineages[i] = {int(k): v for k, v in tracks.items()}
elif file_extension == '.trk':
trk_data = trks.getmember('lineage.json')
lineage = json.loads(trks.extractfile(trk_data).read().decode())
# JSON only allows strings as keys, so convert them back to ints
lineages = [{int(k): v for k, v in lineage.items()}]
return {'lineages': lineages, 'X': raw, 'y': tracked}
def lineage_dic2txt(lineage_dic):
"""Convert deepcell .trk lineage format to CTC txt format.
Args:
lineage_dic (list[dict]): Dictionary in Deepcell format extracted from .trk file.
Returns:
pandas.DataFrame: dictionary with three columns: track ID, appear frame, disappear frame.
"""
lineage_dic = lineage_dic[0]
dic = {'id': [], 'appear': [], 'disappear': []}
pars = {}
for d in lineage_dic.values():
i = d['label']
begin = np.min(d['frames'])
end = np.max(d['frames'])
dic['id'].append(i)
dic['appear'].append(int(begin))
dic['disappear'].append(int(end))
if d['daughters']:
for dg in d['daughters']:
pars[dg] = i
dic = pd.DataFrame(dic)
dic['parents'] = 0
# resolve parents
for dg in list(pars.keys()):
dic.loc[dic.index[dic['id'] == dg], 'parents'] = pars[dg]
return dic
def break_track(label_table):
"""Break tracks in a lineage table into single tracks, where
NO gaped tracks allowed. All gaps will be transferred into parent-daughter
relationship.
Args:
label_table (pandas.DataFrame): tracked object table to process.
Algorithm:
1. Rename raw parentTrackId to mtParTrk.
2. Initiate new parentTrackId column with 0.
3. Separate all tracks individually.
Notes:
In original lineage table, single track can be gaped, lineage only associates
mitosis tracks, not gaped tracks.
Returns:
pandas.DataFrame: processed tracked object table.
"""
# For parent track that has one daughter extrude into the parent frame,
# e.g. parent: t1-10; daughter1: t8-20; daughter2: t11-20.
# re-organize the track by trimming parent and add to daughter,
# i.e. parent: t1-7; daughter1: t8-20; daughter2: t8-10, t11-20
# If both daughter extrude, e.g. daughter2: t9-20, then trim parent directly
# to t1-8. Since this indicates faulty track, warning shown
# *** this should NOT usually happen
for l in np.unique(label_table['trackId']):
daugs = np.unique(label_table[label_table['parentTrackId'] == l]['trackId'])
if len(daugs) == 2:
daug1 = label_table[label_table['trackId'] == daugs[0]]['frame'].iloc[0]
daug2 = label_table[label_table['trackId'] == daugs[1]]['frame'].iloc[0]
par = label_table[label_table['trackId'] == l]
par_frame = par['frame'].iloc[-1]
if par_frame >= daug1 and par_frame >= daug2:
label_table.drop(par[(par['frame'] >= daug1) | (par['frame'] >= daug2)].index, inplace=True)
raise UserWarning('Faluty mitosis, check parent: ' + str(l) +
', daughters: ' + str(daugs[0]) + '/' + str(daugs[1]))
elif par_frame >= daug1:
# migrate par to daug2
label_table.loc[par[par['frame'] >= daug1].index, 'trackId'] = daugs[1]
label_table.loc[par[par['frame'] >= daug1].index, 'parentTrackId'] = l
elif par_frame >= daug2:
# migrate par to daug1
label_table.loc[par[par['frame'] >= daug2].index, 'trackId'] = daugs[0]
label_table.loc[par[par['frame'] >= daug2].index, 'parentTrackId'] = l
label_table = label_table.sort_values(by=['trackId', 'frame'])
# break tracks individually
max_trackId = np.max(label_table['trackId'])
label_table['mtParTrk'] = label_table['parentTrackId']
label_table['parentTrackId'] = 0
label_table['ori_trackId'] = label_table['trackId']
new_table = pd.DataFrame()
for l in np.unique(label_table['trackId']):
tr = label_table[label_table['trackId'] == l].copy()
if np.max(tr['frame']) - np.min(tr['frame']) + 1 != tr.shape[0]:
sep, max_trackId = separate(list(tr['frame']).copy(), list(tr['mtParTrk']).copy(), l, base=max_trackId)
tr.loc[:, 'frame'] = sep['frame']
tr.loc[:, 'trackId'] = sep['trackId']
tr.loc[:, 'parentTrackId'] = sep['parentTrackId']
tr.loc[:, 'mtParTrk'] = sep['mtParTrk']
new_table = new_table.append(tr)
# For tracks that have mitosis parents, find new ID of their parents
for l in np.unique(new_table['trackId']):
tr = new_table[new_table['trackId'] == l].copy()
ori_par = list(tr['mtParTrk'])[0]
if ori_par != 0:
app = np.min(tr['frame'])
search = new_table[new_table['ori_trackId'] == ori_par]
new_par = search.iloc[np.argmin(abs(search['frame'] - app))]['trackId']
new_table.loc[tr.index, 'mtParTrk'] = new_par
for i in range(new_table.shape[0]):
new_table.loc[i, 'parentTrackId'] = np.max(
((new_table['parentTrackId'][i]), new_table['mtParTrk'][i])) # merge mitosis information in to parent
return new_table
def separate(frame_list, mtPar_list, ori_id, base):
"""For single gaped track, separate it into all complete tracks.
Args:
frame_list (list): frames list, length equals to label table.
mtPar_list (list): mitosis parent list, for solving mitosis relationship.
ori_id (int): original track ID.
base (int): base track ID, will assign new track ID sequentially from base + 1.
Returns:
dict: Dictionary of having following keys: frame, trackId, parentTrackId, mtParTrk.
"""
trackId = [ori_id for _ in range(len(frame_list))]
parentTrackId = [0 for _ in range(len(frame_list))]
for i in range(1, len(frame_list)):
if frame_list[i] - frame_list[i - 1] != 1:
trackId[i:] = [base + 1 for s in range(i, len(trackId))]
parentTrackId[i:] = [trackId[i - 1] for s in range(i, len(trackId))]
mtPar_list[i:] = [0 for s in range(i, len(trackId))]
base += 1
rt = {'frame': frame_list, 'trackId': trackId, 'parentTrackId': parentTrackId, 'mtParTrk': mtPar_list}
return rt, base
def save_seq(stack, out_dir, prefix, dig_num=3, dtype='uint16', base=0, img_format='.tif', keep_chn=True, sep='-'):
"""Save image stack and label sequentially.
Args:
stack (numpy array) : image stack in THW format (Time, Height, Width).
out_dir (str) : output directory.
prefix (str) : prefix of single slice, output will be prefix-000x.tif/png.
(see sep below for separator).
dig_num (int) : digit number (3 -> 00x) for labeling image sequentially.
dtype (numpy.dtype) : data type to save, either 'uint8' or 'uint16'.
base (int) : base number of the label (starting from).
img_format (str): image format, '.tif' or '.png', remind the dot.
keep_chn (bool): whether to keep full channel or not.
sep (str): separator between file name and id, default '-'.
"""
if len(stack.shape) == 4 and not keep_chn:
stack = stack[:, :, :, 0]
for i in range(stack.shape[0]):
fm = ("%0" + str(dig_num) + "d") % (i + base)
name = os.path.join(out_dir, prefix + sep + fm + img_format)
if dtype == 'uint16':
img = img_as_uint(stack[i, :])
elif dtype == 'uint8':
img = img_as_ubyte(stack[i, :])
else:
raise ValueError("Seq save only accepts uint8 or uint16 format.")
io.imsave(name, img)
return
def generate_calibanTrk(raw, mask, out_dir, dt_id, digit_num=3, displace=100, gap_fill=3, track=None,
render_phase=False):
"""Deprecated
Generate caliban .trk format for annotation from raw and ground truth mask.
Args:
raw (numpy.ndarray): raw image stack.
mask (numpy.ndarray): mask of the image, can be either.
out_dir (str): output directory.
digit_num (int): digit of ID in the output image prefix.
displace (int): maximum movement for tracking ground truth mask.
gap_fill (int): gap filling for tracking ground truth mask.
track (pandas.DataFrame): tracked object table, if None, will track the mask first.
render_phase (bool): whether to render cell cycle phase from the mask, will pass to `pcnaDeep.tracker.track_mask`.
Outputs:
File of deepcell caliban .trk format.
Returns:
pandas.DataFrame: processed tracked object table, no gaped tracks in the table (as required by .trk).
"""
fm = ("%0" + str(digit_num) + "d") % dt_id
if track is None:
track, mask = track_mask(mask, displace=displace, gap_fill=gap_fill, render_phase=render_phase)
track_new = relabel_trackID(track.copy())
track_new = break_track(track_new.copy())
tracked_mask = label_by_track(mask.copy(), track_new.copy())
dic = get_lineage_dict(track_new.copy())
if len(raw.shape) < 4:
raw = np.expand_dims(raw, axis=3)
save_trks(os.path.join(out_dir, fm + '.trk'), dic, raw, np.expand_dims(tracked_mask, axis=3))
return track_new
def findM(gt_cls, direction='begin'):
"""Find M exit/entry from ground truth classification.
The method assumes that all mitosis classification is continuous, therefore only suitable.
for processing classification ground truth. For processing prediction, use `pcnaDeep.refiner.deduce_transition`.
Args:
gt_cls (list): list of classifications.
direction (str): begin/end, search M from which terminal of the classification list.
Returns:
int: index of the mitosis entry/exit.
"""
# possible for parent end with 'G', but daughter must begin with 'M'
i = 0
if direction == 'begin':
if gt_cls[0] != 'M':
return None
while gt_cls[i] == 'M':
i += 1
if i == len(gt_cls):
break
return i - 1
else:
gt_cls = gt_cls[::-1]
if 'M' not in gt_cls:
return None
i = gt_cls.index('M')
while gt_cls[i] == 'M':
i += 1
if i == len(gt_cls):
break
return -i
def check_continuous_track(table):
"""Check if every track is continuous (no gap). Returns trackID list that is gaped.
"""
out = []
for i in list(np.unique(table['trackId'])):
f = table[table['trackId'] == i]['frame'].tolist()
if f[-1] - f[0] != len(f) - 1:
out.append(i)
return out
def mergeTrkAndTrack(trk_path, table_path, return_mask=False):
"""Deprecated
Merge ground truth .trk and tracked table. Used to generate ground truth tracked table.
Args:
trk_path (str): path to deepcell-label Caliban .trk file.
table_path (str): path to tracked object table.
return_mask (bool): whether to return mask.
Returns:
pandas.DataFrame: tracked object table, trackId, parentTrackId, mtParTrk
corrected by ground truth.
numpy.ndarray: tracked mask. If parameter return_mask=True, will not return mask.
dict: standard table for initializing `pcnaDeep.resolver.Resolver`.
list: Mitosis daughter that does not has mitosis classification. This should not happen if
the annotation is correct.
"""
trk = load_trks(trk_path)
lin = trk['lineages'][0]
mask = trk['y'][:, :, :, 0]
del trk
table = | pd.read_csv(table_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""House Prices Isabel.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1crlL-Zf_EXl_hSIAIwKw17wb81Bvnqvg
"""
import pandas as pd
import numpy as np
from sklearn import neighbors, tree
from sklearn.linear_model import LinearRegression
from sklearn import datasets, linear_model, svm
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
from sklearn.gaussian_process import GaussianProcessRegressor
from scipy import stats
from scipy.stats import norm, skew
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
sns.set_style('darkgrid')
import warnings
warnings.filterwarnings("ignore")
train = | pd.read_csv("train.csv") | pandas.read_csv |
"""
v167 2022.01.18
automatic .csv to AWS-dynamoDB -- a python AWS-lambda-function
this lambda function will need these permissions:
AmazonDynamoDBFullAccess
AWSLambdaDynamoDBExecutionRole
AWSLambdaInvocation-DynamoDB
AWSLambdaBasicExecutionRole
AmazonS3FullAccess
AmazonS3ObjectLambdaExecutionRolePolicy
The idea is to automate the process of moving .csv files into dynamoDB,
starting with a set of clean .csv files
and ending with metadata files, file sent to a 'completed' folder,
and the data entered into a new dynamoDB table. (simplified example)
# example metadata_csv file, for making custom files:
```
column_name,AWS_column_dtype,pandas_column_dtype,example_item_list,mixed_datatype_flag_list,missing_data_flag_list,duplicate_data_flag_list
row_id,S,object,1,True,False,False
YearsExperience,N,float64,1.1,False,False,True
Salary,N,int64,39343,False,False,False
```
# Examples of input
input = {
directory_name = "YOUR_S3_DIRECTORY_NAME",
s3_bucket_name = "YOUR_BUCKET_NAME"
}
e.g.
{
"s3_bucket_name": "YOUR_BUCKET_NAME",
"target_directory": "YOUR_S3_DIRECTORY_NAME/OPTIONAL_SUBFOLDER_NAME"
}
or
{
"s3_bucket_name": "YOUR_BUCKET_NAME",
"target_directory": "YOUR_S3_DIRECTORY_NAME/OPTIONAL_SUBFOLDER_NAME",
"default_folder_for_completed_csv_files": "COMPLETED_FILES_FOLDER_NAME/"
}
or: if you are going from_to within an input file
{
"s3_bucket_name": "YOUR_BUCKET_NAME",
"target_directory": "YOUR_S3_DIRECTORY_NAME/OPTIONAL_SUBFOLDER_NAME",
"default_folder_for_completed_csv_files": "COMPLETED_FILES_FOLDER_NAME/",
"multi_part_or_split_csv_flag": "True"
}
or: if you are going from_to within an input file
{
"s3_bucket_name": "YOUR_BUCKET_NAME",
"target_directory": "YOUR_S3_DIRECTORY_NAME/OPTIONAL_SUBFOLDER_NAME",
"default_folder_for_completed_csv_files": "COMPLETED_FILES_FOLDER_NAME/",
"multi_part_or_split_csv_flag": "True",
"FROM_here_in_csv": 0,
"TO_here_in_csv": 4
}
or
{
"s3_bucket_name": "YOUR_BUCKET_NAME",
"target_directory": "YOUR_S3_DIRECTORY_NAME/OPTIONAL_SUBFOLDER_NAME/",
"multi_part_or_split_csv_flag": "True",
"FROM_here_in_csv": 0,
"TO_here_in_csv": 4
}
or
{
"s3_bucket_name": "YOUR_BUCKET_NAME",
"target_directory": "YOUR_S3_DIRECTORY_NAME/OPTIONAL_SUBFOLDER_NAME",
"default_folder_for_completed_csv_files": "COMPLETED_FILES_FOLDER_NAME/",
"multi_part_or_split_csv_flag": "True",
"FROM_here_in_csv": 0,
"TO_here_in_csv": 4,
"set_split_threshold_default_is_10k_rows": 5000
}
https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBMapper.DataTypes.html
https://www.tutorialspoint.com/dynamodb/dynamodb_data_types.htm
https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes
"""
"""# Rules / Instructions
## Cheat Sheet Instruction Summary
1. If you have not yet examined your files (file's metadata) yet,
you can use the tool to make your metadata files (so you can then look at them), by setting the "just_make_metadata_files_flag" to "True":
```
{
"s3_bucket_name": "YOUR_S3_BUCKET_NAME_HERE",
"target_directory": "YOUR_FOLDER_NAME_HERE/",
"just_make_metadata_files_flag": "True"
}
```
2. Put .csv files in the S3(AWS) folder.
3. Point the lambda function at the correct S3 bucket and your folder:
```
{
"s3_bucket_name": "YOUR_S3_BUCKET_NAME_HERE",
"target_directory": "YOUR_FOLDER_NAME_HERE/"
}
```
4. Run the lamabda Function (Hit the "Go" button.)
## Full Instructions
Instruction for using the .csv auto-load Tool:
Please read and follow these instructions,
and please report any errors you recieve.
1. input file(s) must be one or more .csv files (no other formats,
other files will be ignored (or might break the tool))
Do NOT put files into the tool that you do NOT want the tool to process.
2. input file(s) (.csv files) must be in a directory in an AWS-S3 folder(directory)
3. this tool is an AWS lambda function which is or operates like an api-endpoint
4. json-input: the json input for the lambda function(or endpoint),
which directs the tool to your .csv files,
must look like this:
```
{
"s3_bucket_name": "YOUR_S3_BUCKET_NAME_HERE",
"target_directory": "YOUR_FOLDER_NAME_HERE/"
}
```
You can also make or use more sub-folders (directories) to organize your files.
In this case combine all folders (the path) to the target directory
```
{
"s3_bucket_name": "YOUR_S3_BUCKET_NAME_HERE",
"target_directory": "YOUR_FOLDER_NAME_HERE/YOUR_SUB_FOLDER_NAME_HERE/"
}
```
Here is an example using all optional fields (to be explained below):
```
{
"s3_bucket_name": "YOUR_BUCKET_NAME",
"target_directory": "YOUR_S3_DIRECTORY_NAME/OPTIONAL_SUBFOLDER_NAME",
"default_folder_for_completed_csv_files": "COMPLETED_FILES_FOLDER_NAME/",
"multi_part_or_split_csv_flag": "True",
"FROM_here_in_csv": 0,
"TO_here_in_csv": 4,
"set_split_threshold_default_is_10k_rows": 5000,
"just_make_metadata_files_flag": False
}
```
But to not mix levels of directories. S3 is not a real file system, and any sub-folder in the folder that you want will be seen as just more files in that main folder (not files in a subfolder).
5. Table Name = File Name:
The csv-tool makes a data-table in an AWS-database from your .csv file (this is the overall goal).
Make the name of your .csv file the same as what you want the AWS database table to be called.
Naming Rules: The name of each file must be:
```
"Between 3 and 255 characters, containing only letters, numbers, underscores (_), hyphens (-), and periods (.)"
```
This is because the database table is given the same name as the .csv file.
Every table must have a unique name (so each .csv file must have a unique name).
6. The (database) table must have a unique primary key. And the first column (of your .csv) must be that unique key. A unique row-ID number will work if there is no meaningful unique row key.
So you may need to add a unique row. There is a feature in the tool to add a unique row. The tool will exit after adding the row if this option is selected.
e.g.
```
{
"s3_bucket_name": "YOUR_BUCKET_NAME",
"target_directory": "YOUR_S3_DIRECTORY_NAME/OPTIONAL_SUBFOLDER_NAME",
"just_make_new_primary_key_first_column": "CSV_FILE_THAT_NEEDS_THE_ROW"
}
```
7. The tool will scan for 3 types of primary key errors and give you a warning to fix the file: missing data,
duplicate rows, and
mixed text/number data (e.g. text in a number column).
Finding a warning here halts the whole process, so not all files will have been checked.
8. Completed files (fully check and moved into a table) will be moved into
a new directory called (by default) "default_folder_for_completed_csv_files/",
but you can pick a new destination in your endpoint-json if you want:
e.g.
```
"default_folder_for_completed_csv_files" : "THIS_FOLDER/"
```
Files not yet moved into AWS will remain in the original directory.
Please do NOT change the the destination folder to be INSIDE of your sub-folder.
e.g.
```
{
"s3_bucket_name": "YOUR_BUCKET_NAME",
"target_directory": "YOUR_S3_DIRECTORY_NAME/OPTIONAL_SUBFOLDER_NAME",
"default_folder_for_completed_csv_files": "COMPLETED_FILES_FOLDER_NAME/"
}
```
9. Please read the output of the function clearly
(to see if there was an error or if the process completed).
Some errors will regard your files and you can fix them.
Other errors may indicate updates needed for the tool.
Please report all errors we can understand this process well.
10. Please check the new data-tables in AWS to make sure they look as you want them to look.
11. From To:
The default mode is to put one data-csv file into one dynamoDB table,
however you can select from-to for which rows you want to select to upload.
This from-to will disable moving completed files.
From-to cannot be used along with split-multi files.
Starting at 1 or 0 have the same effect, starting from the begining.
```
{
"s3_bucket_name": "YOUR_BUCKET_NAME",
"target_directory": "YOUR_FOLDER/OPTIONAL_SUB_FOLDER/",
"FROM_here_in_csv": 3,
"TO_here_in_csv": 7
}
```
12. Split Files & auto-split:
As with meta-data, file splitting can be automatic or manual. The automatic splitting does not require any addition steps, beyond re-running the function if it times out to keep going through the files.
Sometimes csv files are very large and it is best to split them into pieces before uploading them (so that the tool does not time-out in the middle of a file).
This csv uploader tool is designed to work with (and includes a version of) this csv splitter:
https://github.com/lineality/split_csv_python_script
As a rule of thumb
files with more than 10,000 row should be split. The auto-splitter will do this automatically, though you custom set threshold, which is default set to 10,000 rows. this json choice is called "set_split_threshold_default_is_10k_rows"
If there are many parts and the tool times out, just keep running it until all the parts get completed and moved to the 'completed files" folder.
You can manually split the file yourself and put all the split files into the target direction,
hit GO (proverbially) and the tool will put them all into the same table.
Each part must be suffixed with _split__###.csv
This function also works to put two data-csv files into the SAME table BUT: be careful not to overwrite an existing table, and multiple component files (when putting multiple data-csv files into one dynamoDB table) must be given the same name and individually put into S3 and run separately.
Note: be careful about mixing split and many other non-split files together, as processing split-files will turn off the protection against over-writing an existing table.
Timing Out: The reason why large files must be split is that a lambda function has a maximum time (15min) for how long it can run. If a big .csv file takes more time than 15min, the process will crash in the middle and no progress can be made by re-running the tool. On the other hand, if the S3 folder contains many small files (that each take much less than 15 minutes to run) then running the lambda function over and over will gradually process all of the files. Note that there will still be an error returned when the lambda function times out.
13. You will get an error if you try to use split-file and from-to at the same time.
14. just_make_metadata_files_flag:
If you want to use the tool to make your file inspection meta_data files and stop there (so you can examine those meta_data files before proceeding), then turn on (set to True) the just_make_metadata_files_flag == True
```
{
"s3_bucket_name": "YOUR_BUCKET_NAME",
"target_directory": "YOUR_S3_DIRECTORY_NAME/OPTIONAL_SUBFOLDER_NAME",
"just_make_metadata_files_flag": "False"
}
```
# Workflow (How the Tool Works under the hood)
1. preemptively clear the /tmp/ directory in lambda-function
2. user inputs a folder (a target s3 directory, in which are .csv files)
3. scan S3 folder for .csv files (ignore non csv files)
4. make a list of .csv files NOT including "metadata_" at the start. We will later iterate through this file 3 times.
5. track the many forms of names keeping track of the root name, the lambda name, the old s3 name and the new s3 name, split names, table names, etc.
6. make a list of files that need meta-data files (there are several such related lists, and these are re-created after files are auto split if that happens)
#### Two steps need to happen before processing the files and iterating through the files to upload them to AWS datatables.
7. There is the option to just create meta-data files before trying to load them. In this case, each data file has a metadata file made which includes primary key type warnings for all columns.
8. Auto Splitting: In the case of file splitting, the previous file listing steps need to be repeated from scratch and a new list of files made before proceeding.
#### Iterating Though and Processing Files
9. iteration 1: iterate through list of data-csv files and check to see if there are any name collisions between file names and dynamoDB tables to be created (exit to error message if collision found) Note: extra logic for where from-to or multiple input files are used.
10. iteration 2 and auto-split: iterate through files_that_need_metadata_files_list of data-csv files to make a list of unpaired files: use pandas to create a table with AWS datatypes, and move that metadata_file to s3. The goals here is to allow users to upload custom metadata files, but to default to automating that process.
Plus auto-split
check each data csv file's shape to see if the file has more than 10,000 rows. If so: split/replace the file in S3.
11. iteration 3: when all data-csv files are paired with a metadata_ file: iterate through the list of all root files (see below)
#### The next steps are done for each (iterating through each) data file in the 3rd and last pass through the list of data-csv files:
12. The primary-key column/field is error-checked for 3 types of primary key errors and gives the user a warning to fix the file: missing data, duplicate rows, and mixed text/number data (e.g. text in a number column). Finding and outputting a warning here halts the whole process, so not all files will have been checked.
13. lambda creates a new dynamoDB table with a name the same as the .csv file. Note: extra logic to skip this for from-to or multi-file-to-one-table input.
14. lambda uses metadata_ file and data-csv file to load data into dynamoDB. Note: by default this is the whole file, but optionally a from_row and to_row can be specified by row number in csv
15. after file is loaded successfully into dynamoDB, data-csv and metadata_csv are moved to a new directory (folder) called 'transferred files' (or whatever the name ends up being) (this involves copying to the new location and then deleting the old file from S3). Note: this is skipped when using from-to or multi-file-to-one-table as the whole upload process is not completed in one step.
16. the aws Lambda /tmp/ copy of the file is deleted (to not overwhelm the fragile lambda-function)
#### These steps are done at the very end of the whole process after all files are processed (if there is no error that stops the process)
17. remove all files from lambda /tmp/ directory
18. output: list of tables created OR error message
"""
# Import Libraries and Packages for Python
import boto3
import datetime
import glob
import json
import io
import numpy as np
import os
import pandas as pd
import re
import time
###################
# Helper Functions
###################
# helper function
def make_new_primary_key_first_column(df):
# Create a new row containing row numbers:
df['Row_Number'] = np.arange(df.shape[0]) + 1
# Select new row to be moved to the front (made the first column)
new_first_column = df.pop('Row_Number')
# Move new row to the front (make it the first column)
df.insert(0, 'Row_Number', new_first_column)
return df
# helper function
def get_csv_from_S3_to_lambda_tmp(s3_client, s3_bucket_name, S3_file_name, lambda_tmp_file_name):
# Get .csv from S3 (note: not just readable text from it, but the whole file)
response = s3_client.get_object(Bucket = s3_bucket_name, Key = S3_file_name)
raw_csv = response['Body'].read().decode('utf-8')
# save file in /tmp/ directory because AWS requires
with open(lambda_tmp_file_name, 'w') as data:
data.write(raw_csv)
# helper function
def remove_split_from_name(name):
"""
if last part of name is _split__###
return: name - "split"
Else, return: name
"""
if name[-15:-7] == "_split__":
return name[:-15] + ".csv"
else:
return name
# helper function to make name
def make_new_names(name):
"""this splits the name into the next two split number
1 -> 1, 2
x*2-1, x*2
plus padding
"""
# look for if the name is already split
if name[-15:-7] == "_split__":
# extract old number
three_numbers = int( name[-7:-4] )
# new numbers
new_first_file_number = (three_numbers * 2) - 1
new_second_file_number = (three_numbers * 2)
# get number of digits
first_number_of_digits = len( str( new_first_file_number ) )
second_number_of_digits = len( str( new_second_file_number ) )
# check for size error
# for terminal
if (first_number_of_digits > 3) or (second_number_of_digits > 3):
print("split error, trying to split more than 999 times.")
return "split_error", "split_error"
name_root = remove_split_from_name( name )[:-4]
if first_number_of_digits == 3:
first_new_name = f'{name_root}_split__{new_first_file_number}.csv'
second_new_name = f'{name_root}_split__{new_first_file_number}.csv'
if first_number_of_digits == 2:
first_new_name = f'{name_root}_split__0{new_first_file_number}.csv'
if second_number_of_digits == 2:
second_new_name = f'{name_root}_split__0{new_second_file_number}.csv'
if first_number_of_digits == 1:
first_new_name = f'{name_root}_split__00{new_first_file_number}.csv'
if second_number_of_digits == 1:
second_new_name = f'{name_root}_split__00{new_second_file_number}.csv'
return first_new_name, second_new_name
else:
# if the file is original, leave the new name
# as _split__001 or _split__002
first_new_name = f'{name[:-4]}_split__001.csv'
second_new_name = f'{name[:-4]}_split__002.csv'
return first_new_name, second_new_name
# helper function
def combine_csv():
# glob all csv files
remaining_csv_list = glob.glob("*.csv")
# make new name: output .csv name
old_name = remaining_csv_list[0]
new_name = old_name[:-15] + ".csv"
# sort all files
remaining_csv_list.sort()
# open the first file
df = pd.read_csv(remaining_csv_list[0])
# # inspection
print("df shape (combine_csv 1): ", df.shape)
# remove first file from list
remaining_csv_list.pop(0)
# iterate through remaining files
for this_file in remaining_csv_list:
# print("and I combined THIS file", this_file, "!")
# load next file
df2 = | pd.read_csv(this_file) | pandas.read_csv |
#!/usr/bin/env python
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DialogFlow API Detect Intent Python sample with text inputs.
Examples:
python detect_intent_texts.py -h
python detect_intent_texts.py --project-id PROJECT_ID \
--session-id SESSION_ID \
"hello" "book a meeting room" "Mountain View"
python detect_intent_texts.py --project-id PROJECT_ID \
--session-id SESSION_ID \
"tomorrow" "10 AM" "2 hours" "10 people" "A" "yes"
"""
import argparse
import os
import uuid
from time import sleep
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "Chatbot-012b76b52f9f.json"
# [START dialogflow_detect_intent_text]
def detect_intent_texts(project_id, session_id, language_code):
"""Returns the result of detect intent with texts as inputs.
Using the same `session_id` between requests allows continuation
of the conversation."""
import dialogflow_v2 as dialogflow
session_client = dialogflow.SessionsClient()
session_id = '1234567890'
session = session_client.session_path(project_id, session_id)
print('Session path: {}\n'.format(session))
import pandas as pd
df = pd.read_csv('NLU_V2/200924v3_test_original_split_kfold1_nlu_1076sample_64intents.csv')
correct_count = 0
total_count = 0
true_labels = []
predictions = []
with open('log.txt', 'w') as log:
for index, row in df.iterrows():
text = str(row.text)
label = row.labels
if len(text) >= 256:
continue
true_labels.append(label)
intent = str(label)
total_count += 1
text_input = dialogflow.types.TextInput(
text=text, language_code=language_code)
query_input = dialogflow.types.QueryInput(text=text_input)
response = session_client.detect_intent(
session=session, query_input=query_input)
print('=' * 20)
# print(response)
intent_predicted = response.query_result.fulfillment_text
if intent_predicted.strip():
predictions.append(int(intent_predicted))
else:
predictions.append(-1)
if intent == intent_predicted:
correct_count += 1
else:
log.write('True:' + intent + '\n')
log.write('Predicted:' + intent_predicted + '\n\n')
print('Query text: {}'.format(response.query_result.query_text))
print('Query Entities: {0}'.format(response.query_result.parameters))
print('Detected intent: {} (confidence: {})\n'.format(
response.query_result.intent.display_name,
response.query_result.intent_detection_confidence))
print('Fulfillment text: {}\n'.format(
response.query_result.fulfillment_text))
print("Total count:{}, Correct count:{}".format(total_count, correct_count))
print("Accuracy:{}".format(correct_count/total_count))
pred_df = pd.Series(predictions)
true_df = | pd.Series(true_labels) | pandas.Series |
# -*- coding: utf-8 -*-
"""
dwx_analytics.py - Pythonic access to raw DARWIN analytics data via FTP
--
@author: <NAME> (www.darwinex.com)
Last Updated: October 17, 2019
Copyright (c) 2017-2019, Darwinex. All rights reserved.
Licensed under the BSD 3-Clause License, you may not use this file except
in compliance with the License.
You may obtain a copy of the License at:
https://opensource.org/licenses/BSD-3-Clause
"""
import gzip
import json, os
import pandas as pd
from tqdm import tqdm
from ftplib import FTP
from io import BytesIO
from matplotlib import pyplot as plt
import logging
logger = logging.getLogger()
class DWX_Darwin_Data_Analytics_API():
'''This API has the ability to download DARWIN data and analyze it.'''
def __init__(self, dwx_ftp_user, dwx_ftp_pass, dwx_ftp_hostname, dwx_ftp_port):
"""Initialize variables, setup byte buffer and FTP connection.
Parameters
----------
ftp_server : str
FTP server that houses raw DARWIN data
ftp_username : str
Your Darwinex username
ftp_password : str
Your FTP password (NOT your Darwinex password)
ftp_port : int
Port to connect to FTP server on.
--
"""
# Analytics Headers
self.analytics_headers = {'AVG_LEVERAGE': ['timestamp','periods','darwin_vs_eurusd_volatility'],
'ORDER_DIVERGENCE': ['timestamp','instrument','usd_volume','latency','divergence'],
'RETURN_DIVERGENCE': ['timestamp','quote','quote_after_avg_divergence'],
'MONTHLY_DIVERGENCE': ['timestamp','average_divergence','monthly_divergence'],
'DAILY_FIXED_DIVERGENCE': ['timestamp','profit_difference'],
'DAILY_REAL_DIVERGENCE': ['timestamp','profit_difference'],
'POSITIONS': ['timestamp','periods','array','total_pos_number','max_open_trades'],
'TRADES': ['timestamp','periods','array'],
'TRADE_CONSISTENCY': ['timestamp','periods','array'],
'ROTATION': ['timestamp','periods','daily_rotation']}
# Setup data container
self.retbuf = BytesIO()
# Setup data access mode (file or FTP)
self.mode = 0 # Default is file.
try:
self.server = FTP(dwx_ftp_hostname)
self.server.login(dwx_ftp_user, dwx_ftp_pass)
# 200+ codes signify success.
if str(self.server.lastresp).startswith('2'):
logger.warning('[KERNEL] FTP Connection Successful. Data will now be pulled from Darwinex FTP Server.')
self.mode = 1 # 1 = FTP, 0
logger.warning(f'[KERNEL] Last FTP Status Code: {self.server.lastresp} | Please consult https://en.wikipedia.org/wiki/List_of_FTP_server_return_codes for code definitions.')
except Exception as ex:
logger.warning(f'Exception: {ex}')
exit(-1)
##########################################################################
"""Parse a line containing a list. Only works for max one list or one list of lists."""
def parse_line(self, line):
for start, end in [['[[', ']]'], ['[', ']']]:
if start in line:
ls = line.split(start)
ls1 = ls[1].split(end)
return ls[0].split(',')[:-1] + [json.loads(start+ls1[0].replace("'", '"')+end)] + ls1[1].split(',')[1:]
return line.split(',')
def get_data_from_ftp(self, darwin, data_type):
"""Connect to FTP server and download requested data for DARWIN.
For example, darwin='PLF' and data_type='AVG_LEVERAGE' results in this
code retrieving the file 'PLF/AVG_LEVERAGE' from the FTP server.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
data_type : str
Must be a key in self.analytics_headers dictionary.
Returns
-------
df
Pandas DataFrame
--
"""
# Clear / reinitialize buffer
self.retbuf = BytesIO()
self.server.retrbinary(f"RETR {darwin}/{data_type}", self.retbuf.write)
self.retbuf.seek(0)
# Extract data from BytesIO object
ret = []
while True:
line = self.retbuf.readline()
if len(line) > 1:
ret.append(self.parse_line(line.strip().decode()))
else:
break
# Return as Dataframe
return pd.DataFrame(ret)
def get_data_from_file(self, darwin, data_type):
"""Read data from local file stored in path darwin/filename
For example, darwin='PLF' and data_type='AVG_LEVERAGE' results in this
code retrieving the file 'PLF/AVG_LEVERAGE' from the current directory.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
data_type : str
Must be a key in self.analytics_headers dictionary.
Returns
-------
df
Pandas DataFrame
--
"""
if self.mode == 0:
logger.warning(f'Retrieving data from file for DARWIN {darwin}...')
return pd.read_csv(f'{str(darwin).upper()}/{str(data_type).upper()}', header=None)
else:
logger.warning(f'Retrieving data from FTP Server for DARWIN {darwin}...')
return self.get_data_from_ftp(str(darwin).upper(), str(data_type).upper())
def save_data_to_csv(self, dataframe_to_save, which_path, filename):
# Save:
if which_path:
# It will save the data to the specified path:
dataframe_to_save.to_csv(which_path + filename + '.csv')
else:
# It will save the data in the working directory:
dataframe_to_save.to_csv(filename + '.csv')
##########################################################################
def get_analytics(self, darwin, data_type):
"""Get, index and prepare requested data.
For example, darwin='PLF' and data_type='AVG_LEVERAGE' results in:
- the code retrieving the file 'PLF/AVG_LEVERAGE'
- converting millisecond timestamps column to Pandas datetime
- Setting the above converted timestamps as the index
- Dropping the timestamp column itself.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
data_type : str
Must be a key in self.analytics_headers dictionary.
Returns
-------
df
Pandas DataFrame
--
"""
df = self.get_data_from_file(darwin, data_type)
df.columns = self.analytics_headers[data_type]
df.set_index(pd.to_datetime(df['timestamp'], unit='ms'), inplace=True)
df.drop(['timestamp'], axis=1, inplace=True)
return df
##########################################################################
def get_darwin_vs_eurusd_volatility(self, darwin, plot=True):
"""Get the evolution of the given DARWIN's volatility vs that of the EUR/USD.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
plot : bool
If true, produce a chart as defined in the method.
Returns
-------
df
Pandas DataFrame
--
"""
# Set required data type
data_type = 'AVG_LEVERAGE'
# Get raw data into pandas dataframe
df = self.get_analytics(darwin, data_type)
# DARWIN vs EURUSD volatility is a list. We need the last value
df.loc[:,self.analytics_headers[data_type][-1]] = \
df.loc[:,self.analytics_headers[data_type][-1]].apply(eval).apply(lambda x: x[-1])
if plot:
df['darwin_vs_eurusd_volatility'].plot(title=f'${darwin}: DARWIN vs EUR/USD Volatility',
figsize=(10,8))
# Return processed data
return df
##############################################################################
def get_order_divergence(self, darwin,
plot=True):
"""Get the evolution of the given DARWIN's replication latency and investor
divergence, per order executed by the trader.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
plot : bool
If true, produce a chart as defined in the method.
Returns
-------
df
Pandas DataFrame
--
"""
# Set required data type
data_type = 'ORDER_DIVERGENCE'
# Get raw data into pandas dataframe
df = self.get_analytics(darwin, data_type)
# Convert values to numeric
df[['latency','usd_volume','divergence']] = df[['latency','usd_volume','divergence']].apply(pd.to_numeric, errors='coerce')
# Plot
if plot:
fig = plt.figure(figsize=(10,12))
# 2x1 grid, first plot
ax1 = fig.add_subplot(211)
ax1.xaxis.set_label_text('Replication Latency (ms)')
# 2x1 grid, second plot
ax2 = fig.add_subplot(212)
ax2.xaxis.set_label_text('Investor Divergence')
# Plot Median Replication Latency by Instrument
df.groupby('instrument').latency.median()\
.sort_values(ascending=True).plot(kind='barh',\
title=f'${darwin} | Median Order Replication Latency (ms)',\
ax=ax1)
# Plot Median Investor Divergence by Instrument
df.groupby('instrument').divergence.median()\
.sort_values(ascending=True).plot(kind='barh',\
title=f'${darwin} | Median Investor Divergence per Order',\
ax=ax2)
fig.subplots_adjust(hspace=0.2)
# Return processed data
return df.dropna()
##########################################################################
def get_return_divergence(self, darwin, plot=True):
"""Get the evolution of the given DARWIN's Quote and Quote after applying
average investors' divergence.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
plot : bool
If true, produce a chart as defined in the method.
Returns
-------
df
Pandas DataFrame
--
"""
# Set required data type
data_type = 'RETURN_DIVERGENCE'
# Get raw data into pandas dataframe
df = self.get_analytics(darwin, data_type).apply(pd.to_numeric, errors='coerce')
if plot:
df.plot(title=f'${darwin} | Quote vs Quote with Average Divergence',
figsize=(10,8))
return df
##########################################################################
def get_monthly_divergence(self, darwin):
"""Get the evolution of the given DARWIN's average and monthly divergence.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
Returns
-------
df
Pandas DataFrame
--
"""
# Set required data type
data_type = 'MONTHLY_DIVERGENCE'
# Get raw data into pandas dataframe
df = self.get_analytics(darwin, data_type).apply(pd.to_numeric, errors='coerce')
return df
##########################################################################
def get_daily_fixed_divergence(self, darwin, plot=True):
"""Analyses the effect of applying a fixed divergence (10e-5) on the profit.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
plot : bool
If true, produce a chart as defined in the method.
Returns
-------
df
Pandas DataFrame
--
"""
# Set required data type
data_type = 'DAILY_FIXED_DIVERGENCE'
# Get raw data into pandas dataframe
df = self.get_analytics(darwin, data_type).apply(pd.to_numeric, errors='coerce')
if plot:
df.plot(title=f'${darwin} | Effect of 10e-5 Fixed Divergence on profit',
figsize=(10,8))
return df
##########################################################################
def get_daily_real_divergence(self, darwin, plot=True):
"""Analyse the effect of applying the investors' divergence on the profit.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
plot : bool
If true, produce a chart as defined in the method.
Returns
-------
df
Pandas DataFrame
--
"""
# Set required data type
data_type = 'DAILY_REAL_DIVERGENCE'
# Get raw data into pandas dataframe
df = self.get_analytics(darwin, data_type).apply(pd.to_numeric, errors='coerce')
if plot:
df.plot(title=f'${darwin} | Effect of Investor Divergence on profit',
figsize=(10,8))
return df
##########################################################################
def get_quotes_from_ftp(self,
darwin='PLF',
suffix='4.1',
monthly=True, # If set to False, month/year used.
month='01',
year='2019',
former_or_new='former'):
"""Download Quote data for any DARWIN directly via FTP.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
suffix : str
Reflects risk, '4.1' being for 10% VaR DARWIN assets.
monthly : bool
If set to True, month and year arguments are ignored, and ALL
data available for the DARWIN is downloaded.
month : str
Data on the FTP server has the following directory structure:
DARWIN_Symbol -> {year}-{month} -> *.csv.gz files
e.g. PLF/2019-01/PLF....csv.gz
Specifies month for {year}-{month} tuple as above.
year : str
Data on the FTP server has the following directory structure:
DARWIN_Symbol -> {year}-{month} -> *.csv.gz files
e.g. PLF/2019-01/PLF....csv.gz
Specifies year for {year}-{month} tuple as above.
former_or_new : str
Access the former var10 DARWIN data or new var6.5 DARWIN data.
Returns
-------
df
Pandas DataFrame containing Quotes, indexed by timeframe.
--
"""
if former_or_new == 'former':
quote_files = []
roots = []
if monthly:
tqdm.write(f'\n[KERNEL] Searching for Quote data for DARWIN (FORMER VAR_10) {darwin}, please wait..', end='')
self.server.retrlines(f'NLST {darwin}/_{darwin}_former_var10/quotes/', roots.append)
roots_pbar = tqdm(roots, position=0, leave=True)
for root in roots_pbar:
try:
roots_pbar.set_description("Getting filenames for month: %s" % root)
root_files = []
self.server.retrlines(f'NLST {darwin}/_{darwin}_former_var10/quotes/{root}', root_files.append)
# Finalize filenames
quote_files += [f'{darwin}/_{darwin}_former_var10/quotes/{root}/{root_file}'\
for root_file in root_files if '{}.{}'.format(darwin, suffix) in root_file]
except Exception as ex:
logger.warning(ex)
return
elif pd.to_numeric(month) > 0 and | pd.to_numeric(year) | pandas.to_numeric |
import os
from django.conf import settings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, preprocessing
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
companies_list = [
{'value':"AMBUJACEM", 'name':"<NAME>"},
{'value':"ASIANPAINT", 'name':"Asian Paints"},
{'value':"BANKBARODA", 'name':"Bank Of Baroda"},
{'value':"HDIL", 'name':"Housing Development & Infrastructure Ltd."},
{'value':"HEROMOTOCO", 'name':"Hero Motor Corporation"},
{'value':"HINDUNILVR", 'name':"Hindustan Unilever"},
{'value':"INFY", 'name':"Infosys"},
{'value':"ITC", 'name':"ITC"},
{'value':"MARUTI", 'name':"Maruti Suzuki Ltd."},
{'value':"TCS", 'name':"Tata Consultancy Services"},
]
company_namees = {
"AMBUJACEM":"<NAME>",
"ASIANPAINT" :"Asian Paints",
"BANKBARODA" :"Bank Of Baroda",
"HDIL" :"Housing Development & Infrastructure Ltd.",
"HEROMOTOCO" :"Hero Motor Corporation",
"HINDUNILVR" :"Hindustan Unilever",
"INFY" :"Infosys",
"ITC" :"ITC",
"MARUTI" :"Maruti Suzuki Ltd.",
"TCS" :"Tata Consultancy Services"
}
def predict(company):
#Enter the prediction here, you will be getting the company code as input Eg: TCS,INFY,HEROMOTOCO
'''
:param company: company code
:return: list
Output is a list with dictionaries
Eg:
[
{'day':"Tomorrow", 'value':"58.55"},
{'day':"5 days later", 'value':"58.55"},
{'day':"10 days later", 'value':"58.55"},
{'day':"15 days later", 'value':"58.55"},
.
.
.
.
]
'''
print("Company name"+company)
#dict1={'AMBUJACEM':'.csv','INFY':'infosys2.csv'}
FEATURES =['Close','X1','X2','X3','X4','X5','X6','X7','X8','X9','X10','X11','X12','X13','X14','X15','X16','X17','X18','X19','X20','X21','X22','X23','X24','M5','M10','M15','M20','One Day Momentum','Five Day Momentum','Ten Day Momentum','Fifteen Day Momentum','Twenty Day Momentum']
#df = pd.DataFrame.from_csv(dict1[company])
name=company+".csv"
#df=pd.DataFrame.from_csv(name)
df = pd.read_csv(settings.MEDIA_ROOT + name)
#test_size = 200
df=df.replace([np.inf,-np.inf],np.nan)
df=df.replace('#DIV/0!',np.nan)
df=df.dropna()
predictions=[]
list_index=['Next Day Price','5 Day Price','10 Day Price','15 Day Price','20 Day Price']
'''
for h in list_index:
if(h=='Next Day Price'):
X=np.array(df[FEATURES].values)
y=(df[h].values)
reg=linear_model.Lasso(alpha=0.1)
reg.fit(X[40:len(df)-1],y[40:len(df)-1])
price=reg.predict(X[len(df)-1])[0]
dict2={'day':"",'value':""}
print(X[len(df)-1])
dict2['day']=h
dict2['value']=price
predictions.append(dict2)
elif(h=='5 Day Price'):
X = np.array(df[FEATURES].values)
y = (df[h].values)
reg=linear_model.Lasso(alpha=0.1)
reg.fit(X[40:len(df)-5],y[40:len(df)-5])
price=reg.predict(X[len(df)-1])[0]
dict2={'day':"",'value':""}
print(X[len(df)-1])
dict2['day']=h
dict2['value']=price
predictions.append(dict2)
elif(h=='10 Day Price'):
X = np.array(df[FEATURES].values)
y = (df[h].values)
reg=linear_model.Lasso(alpha=0.1)
reg.fit(X[40:len(df)-10],y[40:len(df)-10])
price=reg.predict(X[len(df)-1])[0]
dict2={'day':"",'value':""}
print(X[len(df)-1])
dict2['day']=h
dict2['value']=price
predictions.append(dict2)
elif(h=='15 Day Price'):
X = np.array(df[FEATURES].values)
y = (df[h].values)
reg=linear_model.Lasso(alpha=0.1)
reg.fit(X[40:len(df)-15],y[40:len(df)-15])
price=reg.predict(X[len(df)-1])[0]
dict2={'day':"",'value':""}
print(X[len(df)-1])
dict2['day']=h
dict2['value']=price
predictions.append(dict2)
else:
X = np.array(df[FEATURES].values)
y = (df[h].values)
reg=linear_model.Lasso(alpha=0.1)
reg.fit(X[40:len(df)-20],y[40:len(df)-20])
price=reg.predict(X[len(df)-1])[0]
dict2={'day':"",'value':""}
print(X[len(df)-1])
dict2['day']=h
dict2['value']=price
predictions.append(dict2)
print(predictions)
'''
for h in list_index:
X = np.array(df[FEATURES].values)
y = (df[h].values)
reg=linear_model.Lasso(alpha=0.1)
reg.fit(X[40:len(df)-20],y[40:len(df)-20])
data = np.array(X[len(df)-1])
data = data.reshape(1, -1)
price=reg.predict(data)[0]
dict2={'day':"",'value':""}
print(X[len(df)-1])
dict2['day']=h
dict2['value']=price
predictions.append(dict2)
'''
predictions = [
{'day':"Tomorrow", 'value':"58.55"},
{'day':"5 days later", 'value':"58.55"},
{'day':"10 days later", 'value':"58.55"},
{'day':"15 days later", 'value':"58.55"},
]
'''
return predictions
def stock_prices(company):
#return the list of stock prices and dates(in a dict), list of dicts
# dict1={'AMB':'ambuja2.csv','INFY':'infosys2.csv'}
name=company+'.csv'
df = pd.DataFrame.from_csv(settings.MEDIA_ROOT + name)
#test_size = 200
#df=df.replace([np.inf,-np.inf],np.nan)
#df=df.replace('#DIV/0!',np.nan)
#df=df.dropna()
#print(df.keys())
#print(df['Date'][1])
prices=[]
for i in range(0,len(df)):
dict2={'day':"",'value':""}
dict2['day']=df['Date'][i]
dict2['value']=df['Close'][i]
prices.append(dict2)
# print(prices)
return prices
def company_latest(company):
#details = {'name': "<NAME>", 'price':"222.5", 'change': "+", 'change_price':"22.5"}
details={'name':"",'price':"",'change':"","change_price":"","stock_name":""}
name=company+".csv"
#df=pd.DataFrame.from_csv(name)
df = pd.read_csv(settings.MEDIA_ROOT + name)
#df=pd.DataFrame.from_csv("ambuja2.csv")
df1=df.tail(1)
x=df1.index[0]
print(x)
print(df['Date'][x])
p1=df['Close'][x]
p2=df['Close'][x-1]
diff=p1-p2
sign=""
if(p1-p2>=0):
sign="+"
elif(p1-p2<0):
sign="-"
details['name']=company_namees[company]
details['stock_name']=company+".NS"
details['price']=p1
details['change']=sign
details['change_price']=diff
return details
def plot(company):
#dict1={'AMB':'ambuja2.csv','INFY':'infosys2.csv'}
name=company+".csv"
df = | pd.read_csv(settings.MEDIA_ROOT + name) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 14 12:04:33 2018
@author: gurunath.lv
"""
try :
import base64
import datetime
import io
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import plotly.graph_objs as go
import numpy as np
import pandas as pd
import json
# from tf_universal_sent_emb import get_similar_records
# from spacy_text_classifier_cnn import train_cnn_for_given_label,predict
import glob
import os
# from custom_classifier import customKNN,ParagraphVectors
# from dashboard import Dashboard
from flask import Flask
import flask
#import glob
from sklearn.pipeline import Pipeline
import pickle
from lime.lime_text import LimeTextExplainer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
except ImportError as e:
print("some packages are not installed -to use this textclf Annotator \
! please install relevant libraries",e)
server = Flask(__name__)
app=dash.Dash(name = __name__, server = server)
if not os.path.exists('tmp'):
os.mkdir('tmp')
DIRECTORY_PATH=r'tmp\\'
#app = dash.Dash()
app.scripts.config.serve_locally = True
app.config['suppress_callback_exceptions']=True
# custom_dush=Dashboard()
#prodapt=html.Div(html.Img(src='http://www.prodapt.com/wp-content/uploads/logo_prodapt.png')),
#vs=html.H1('vs')
#reinfer=html.Div(html.Img(src='https://d1qb2nb5cznatu.cloudfront.net/startups/i/703763-82fa920eed7d56e7cdcee1b1d9a30b14-medium_jpg.jpg?buster=1440002957')),
#logo=custom_dush.three_columns_grid(prodapt,vs,reinfer)
def transform_using_tfidf(text_series):
tfidf=TfidfVectorizer(stop_words='english')
array=tfidf.fit_transform(text_series.tolist()).toarray()
return array,tfidf
def similarity_measure(inp_sent,array,tfidf,top_n):
inp_vec=tfidf.transform([inp_sent]).toarray()
cs=cosine_similarity(inp_vec,array)
top_match_index=np.flip(np.argsort(cs,axis=1)[:,-top_n:],axis=1)
return top_match_index
def get_similar_records(inp_sent,total_text,top_n=10):
array,tfidf=transform_using_tfidf(total_text)
top_match_index=similarity_measure(inp_sent,array,tfidf,top_n)
return total_text.iloc[top_match_index.ravel()]
app.layout = html.Div([
# logo,
dcc.Upload(
id='upload-data',
children=html.Div([
'Drag and Drop or ',
html.A('Select Files')
]),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
# Allow multiple files to be uploaded
multiple=True
),
html.Div(id='output-data-upload'),
html.Div(dt.DataTable(rows=[{}]), style={'display': 'none'}),
dcc.Input(id='user-input-for-similarity',
value='Enter the sentence', type='text',
style={'width': '49%','align':'center'}),
html.Div(id='similar-docs'),
html.Br(),
html.H3('Training dataset'),
html.Div(id='output'),
html.Button('Train',id='train-button'),
html.Br(),
dcc.Input(id='user-input-for-prediction',
value='Enter the sentence to predict', type='text',
style={'width': '49%','align':'center'}),
html.H1(id='train-output'),
# html.Button('Del data',id='delete-button'),
html.H1(id='del-output'),
dcc.Graph(id='predict-output'),
html.Br(),
dcc.Link('Why ML made this Prediction !!!!', href='/explain'),
# html.Div([
# html.Pre(id='output', className='two columns'),
# html.Div(
# dcc.Graph(
# id='graph',
# style={
# 'overflow-x': 'wordwrap'
# }
# ),
# className='ten columns'
# )
# ], className='row')
])
def parse_contents(contents, file_name, date):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
global df
global filename
try:
if 'csv' in file_name:
# Assume that the user uploaded a CSV file
df = pd.read_csv(
io.StringIO(decoded.decode('utf8')))
elif 'xls' in file_name:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
"""
pass similar contents df not file uploaded df
To be filled
"""
# df.to_csv(r'{}'.format(filename),index=False)
filename=file_name
return html.Div([
html.H5(filename),
html.H6(datetime.datetime.fromtimestamp(date)),
# Use the DataTable prototype component:
# github.com/plotly/dash-table-experiments
dt.DataTable(rows=df.to_dict('records'),id='edit-table'),
html.Hr(), # horizontal line
# For debugging, display the raw contents provided by the web browser
# html.Div('Raw Content'),
# html.Pre(contents[0:200] + '...', style={
# 'whiteSpace': 'pre-wrap',
# 'wordBreak': 'break-all'
# })
])
@app.callback(
Output(component_id='similar-docs', component_property='children'),
[Input(component_id='user-input-for-similarity', component_property='value')])
def get_similar_docs(sent):
print('similar docs called ',sent)
# path=glob.glob(r'*.csv')
# df=pd.read_csv(path[0],encoding='ISO-8859-1')
global similar_df
similar_series=get_similar_records(sent,df[df.columns[0]])
similar_df=pd.DataFrame(columns=['Similar_sentences','labels'])
similar_df['Similar_sentences']=similar_series
print('check',similar_df.head())
# similar_df.to_csv
return html.Div(dt.DataTable(rows=similar_df.to_dict('records'),id='edit-table-similar'),)
def train_custom_classifier(similar_df,filename):
texts, labels =similar_df.iloc[:,0].values,similar_df.iloc[:,1].values
print(type(labels),type(texts),labels)
if glob.glob(r'{}{}_knn_centroid.pkl'.format(DIRECTORY_PATH,filename)):
dict_=pickle.load(open(glob.glob(r'{}*{}_knn_centroid.pkl'.format(DIRECTORY_PATH,filename))[0],'rb'))
pipe=Pipeline(steps=[('pv',ParagraphVectors(filename=filename)),('knn',customKNN(label_to_vect_dict=dict_))])
label_encoding=pickle.load(open(glob.glob(r'{}*{}_label_encoding.pkl'.format(DIRECTORY_PATH,filename))[0],'rb'))
for idx,lab in enumerate(set(labels)):
label_encoding[idx+len(label_encoding)]=lab
else:
label_encoding=dict()
pipe=Pipeline(steps=[('pv',ParagraphVectors(filename=filename)),('knn',customKNN())])
for idx,lab in enumerate(set(labels)):
label_encoding[idx]=lab
look_up=dict()
for k,v in label_encoding.items():
look_up[v]=k
pipe.fit(texts,pd.Series(labels).map(look_up))
dict_=pipe.named_steps.knn.get_centroid()
pickle.dump(dict_,open(r'{}{}_knn_centroid.pkl'.format(DIRECTORY_PATH,filename),'wb'))
pickle.dump(label_encoding,open(r'{}{}_label_encoding.pkl'.format(DIRECTORY_PATH,filename),'wb'))
#train_custom_classifier(user_story,filename)
def explain_prediction(sent,pipe,filename):
# vect=transform_inp_sent_to_vect(sent)
label_encoding=pickle.load(open(glob.glob(r'{}{}_label_encoding.pkl'.format(DIRECTORY_PATH,filename))[0],'rb'))
labels=list(label_encoding.values())
explainer = LimeTextExplainer(class_names=labels)
exp = explainer.explain_instance(sent, pipe.predict_proba,labels=labels)
return exp.save_to_file(r'{}explanation.html'.format(DIRECTORY_PATH))
def predict_custom_classifier(sent_list,filename):
print('inside fn',filename)
dict_=pickle.load(open(glob.glob(r'{}{}_knn_centroid.pkl'.format(DIRECTORY_PATH,filename))[0],'rb'))
pipe=Pipeline(steps=[('pv',ParagraphVectors(filename=filename)),('knn',customKNN(label_to_vect_dict=dict_))])
label_encoding=pickle.load(open(glob.glob(r'{}{}_label_encoding.pkl'.format(DIRECTORY_PATH,filename))[0],'rb'))
# pred_dict=label_encoding.copy()
pred=[]
for sent in sent_list:
explain_prediction(sent,pipe,filename)
pred.append(pipe.predict_proba(sent))
return pred,list(label_encoding.values())
@app.callback(Output('output-data-upload', 'children'),
[Input('upload-data', 'contents'),
Input('upload-data', 'filename'),
Input('upload-data', 'last_modified')])
def update_output(list_of_contents, list_of_names, list_of_dates):
if list_of_contents is not None:
children = [
parse_contents(c, n, d) for c, n, d in
zip(list_of_contents, list_of_names, list_of_dates)]
return children
def prediction_bar_chart(xaxis,labels):
trace1 = go.Bar(
x = xaxis,
y = labels,
text= labels,
marker=dict(
color='rgb(158,202,225)',
line=dict(
color='rgb(8,48,107)',
width=1.5,
)
),
orientation='h',
)
layout = go.Layout(
title='probabilities',
xaxis=dict(title='probability',),
# yaxis=dict(title='priority')
)
fig=go.Figure(data=[trace1],layout=layout)
return fig
@app.callback(
Output('train-output', 'children'),
[Input('train-button', 'n_clicks')])
def call_spacy_model_training(n_clicks):
# print('button clicks:',n_clicks)
if n_clicks!=None:
# path=glob.glob(r'train.csv')
# dff=pd.read_csv(path[0],encoding='ISO-8859-1')
# filename=path[0].split('.')[0].split('_')[0]
# filename=path[0].split('\\')[-1].split('_train_data')[0]
train_custom_classifier(similar_df,filename)
return 'custom classifier trained'
@app.server.route('/explain')
def upload_file():
return flask.send_from_directory(DIRECTORY_PATH,'explanation.html')
#@<EMAIL>.callback(
# Output('del-output', 'children'),
# [Input('delete-button', 'n_clicks')])
#def delete_data_tmp_folder(n):
# print('delete:',n)
# path=glob.glob(r'D:\Testing_frameworks\Testcase-Vmops\Insight\src\features\tmp\*.csv')
# os.remove(path[0])
# return 'data deleted'
@app.callback(
Output('predict-output', 'figure'),
[Input('user-input-for-prediction', component_property='value')])
def predict_cat(sent):
print(sent)
# path=glob.glob(r'D:\Testing_frameworks\Testcase-Vmops\Insight\src\features\tmp\*.csv')
# filename=path[0].split('\\')[-1].split('.')[0]
pred,labels=predict_custom_classifier([sent],filename)
# print(dict_)
print(pred,labels)
return prediction_bar_chart(list(np.absolute(pred[0].ravel())),labels) #json.dumps(predict([sent],filename)[0],indent=2)
def generate_table(dataframe, max_rows=10):
return (
# Header
[html.Tr([html.Th(col) for col in dataframe.columns])] +
# Body
[html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))]
)
@app.callback(
Output('output', 'children'),
[Input('edit-table-similar', 'rows')])
def update_selected_row_indices(rows):
# path=glob.glob(r'D:\Testing_frameworks\Testcase-Vmops\Insight\src\features\tmp\*.csv')
# filename=path[0].split('\\')[-1].split('.')[0]
df= | pd.DataFrame(rows) | pandas.DataFrame |
import pandas as pd
from itertools import chain
from pgmpy.models import BayesianModel
from pgmpy.models import DynamicBayesianNetwork as DBN
from pgmpy.inference import DBNInference
from pgmpy.estimators import ParameterEstimator
from pgmpy.factors.discrete import TabularCPD
from sklearn.preprocessing import KBinsDiscretizer
import numpy as np
import json
# ------------------------------------------------------------------------------------------------------
# File Handling
def load_model(model_name, path='../../Models/'):
'''
Inputs:
model_name: model name (string).
path: path to model (optional).
1 - Load model from path.
2 - Check if model is valid.
Returns: PGMPY model object.
'''
import pickle
pickle_in = open(f"{path}{model_name}.pickle", "rb")
model = pickle.load(pickle_in)
if model.check_model():
print("Model Loaded and Checked!")
return model
'''def build_porcentagem(data):
date=data['DATA']
date=date.iloc[12:len(date)]
date = date.reset_index(drop=True)
data=data.drop(['DATA'],axis=1)
dados=[]
for linha in range(0,len(data)-12):
valores=[]
for coluna in data:
#x = dataframe.loc[linha][coluna]
x = ((data.iloc[linha + 12][coluna] * 100) / data.iloc[linha][coluna])-100
valores.append('%.2f'%float(x))
dados.append(valores)
dataset=pd.DataFrame(dados,columns=data.columns)
dataset=pd.concat([date,dataset],axis=1)
return dataset'''
def build_porcentagem(mes,data):
month=['January','February','March','April','May','June','July','August','September','October','November','December']
date=data['DATA']
date= | pd.DatetimeIndex(date) | pandas.DatetimeIndex |
import torch
import numpy as np
import scipy as sp
import pandas as pd
import scanpy as sc
from sklearn.model_selection import train_test_split
#from sklearn.preprocessing import scale
class GeneCountData(torch.utils.data.Dataset):
"""Dataset of GeneCounts for DCA"""
def __init__(self, path='data/francesconi/francesconi_withDropout.csv', device='cpu',
transpose=True, check_count=False, test_split=True, loginput=True,
norminput=True, filter_min_counts=True, first_col_names=True):
"""
Args:
"""
adata = read_dataset(path,
transpose=transpose, # assume gene x cell by default
check_counts=check_count,
test_split=True,
first_col_names=first_col_names)
adata = normalize(adata,
filter_min_counts=filter_min_counts, #TODO: set True whennot testing
size_factors=True,
logtrans_input=loginput,
normalize_input=norminput)
self.adata = adata
self.data = torch.from_numpy(np.array(adata.X)).to(device)
self.size_factors = torch.from_numpy(np.array(adata.obs.size_factors)).to(device)
self.target = torch.from_numpy(np.array(adata.raw.X)).to(device)
self.gene_num = self.data.shape[1]
if test_split:
adata = adata[adata.obs.dca_split == 'train']
train_idx, test_idx = train_test_split(np.arange(adata.n_obs), test_size=0.1, random_state=42)
spl = pd.Series(['train'] * adata.n_obs)
spl.iloc[test_idx] = 'test'
adata.obs['dca_split'] = spl.values
self.val_data = torch.from_numpy(np.array(adata[adata.obs.dca_split == 'test'].X)).to(device)
self.val_target = torch.from_numpy(np.array(adata[adata.obs.dca_split == 'test'].raw.X)).to(device)
self.val_size_factors = torch.from_numpy(np.array(adata[adata.obs.dca_split == 'test'].obs.size_factors)).to(device)
self.train_data = torch.from_numpy(np.array(adata[adata.obs.dca_split == 'train'].X)).to(device)
self.train_target = torch.from_numpy(np.array(adata[adata.obs.dca_split == 'train'].raw.X)).to(device)
self.train_size_factors = torch.from_numpy(np.array(adata[adata.obs.dca_split == 'train'].obs.size_factors)).to(device)
self.train = 0
self.val = 1
self.test = 2
self.mode = self.test
def set_mode(self, mode):
if mode == self.train:
self.mode = self.train
elif mode == self.val:
self.mode = self.val
elif mode == self.test:
self.mode = self.test
def __len__(self):
if self.mode == self.train:
return self.train_data.shape[0]
elif self.mode == self.val:
return self.val_data.shape[0]
else:
return self.data.shape[0]
def __getitem__(self, idx):
if self.mode == self.train:
data = self.train_data[idx]
target = self.train_target[idx]
size_factors = self.train_size_factors[idx]
elif self.mode == self.val:
data = self.val_data[idx]
target = self.val_target[idx]
size_factors = self.val_size_factors[idx]
else:
data = self.data[idx]
target = self.target[idx]
size_factors = self.size_factors[idx]
return data, target, size_factors
def read_dataset(adata, transpose=False, test_split=False, copy=False, check_counts=True, first_col_names=True):
if isinstance(adata, sc.AnnData):
if copy:
adata = adata.copy()
elif isinstance(adata, str):
adata = sc.read(adata, first_column_names=first_col_names)
else:
raise NotImplementedError
if check_counts:
# check if observations are unnormalized using first 10
X_subset = adata.X[:10]
norm_error = 'Make sure that the dataset (adata.X) contains unnormalized count data.'
if sp.sparse.issparse(X_subset):
assert (X_subset.astype(int) != X_subset).nnz == 0, norm_error
else:
assert np.all(X_subset.astype(int) == X_subset), norm_error
if transpose: adata = adata.transpose()
if test_split:
train_idx, test_idx = train_test_split(np.arange(adata.n_obs), test_size=0.1, random_state=42)
spl = | pd.Series(['train'] * adata.n_obs) | pandas.Series |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += | pd.offsets.MonthEnd(5) | pandas.offsets.MonthEnd |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from jsonschema import validate as js_validate
import warnings
import uuid
import time as ttime
import pandas as pd
from ..utils import sanitize_np, apply_to_dict_recursively
class DatumNotFound(Exception):
pass
def doc_or_uid_to_uid(doc_or_uid):
"""Given Document or uid return the uid
Parameters
----------
doc_or_uid : dict or str
If str, then assume uid and pass through, if not, return
the 'uid' field
Returns
-------
uid : str
A string version of the uid of the given document
"""
if not isinstance(doc_or_uid, six.string_types):
doc_or_uid = doc_or_uid['uid']
return doc_or_uid
def _get_datum_from_datum_id(col, datum_id, datum_cache, logger):
try:
datum = datum_cache[datum_id]
except KeyError:
# find the current document
edoc = col.find_one({'datum_id': datum_id})
if edoc is None:
raise DatumNotFound(
"No datum found with datum_id {!r}".format(datum_id))
# save it for later
datum = dict(edoc)
res = edoc['resource']
count = 0
for dd in col.find({'resource': res}):
count += 1
d_id = dd['datum_id']
if d_id not in datum_cache:
datum_cache[d_id] = dict(dd)
if count > datum_cache.max_size:
logger.warn("More datum in a resource than your "
"datum cache can hold.")
datum.pop('_id', None)
return datum
def retrieve(col, datum_id, datum_cache, get_spec_handler, logger):
datum = _get_datum_from_datum_id(col, datum_id, datum_cache, logger)
handler = get_spec_handler(datum['resource'])
return handler(**datum['datum_kwargs'])
def resource_given_datum_id(col, datum_id, datum_cache, logger):
datum_id = doc_or_uid_to_uid(datum_id)
datum = _get_datum_from_datum_id(col, datum_id, datum_cache, logger)
res = datum['resource']
return res
def resource_given_uid(col, resource):
uid = doc_or_uid_to_uid(resource)
ret = col.find_one({'uid': uid})
ret.pop('_id', None)
ret['id'] = ret['uid']
return ret
def bulk_insert_datum(col, resource, datum_ids,
datum_kwarg_list):
resource_id = doc_or_uid_to_uid(resource)
def datum_factory():
for d_id, d_kwargs in zip(datum_ids, datum_kwarg_list):
datum = dict(resource=resource_id,
datum_id=str(d_id),
datum_kwargs=dict(d_kwargs))
apply_to_dict_recursively(datum, sanitize_np)
yield datum
col.insert(datum_factory())
def bulk_register_datum_table(datum_col,
resource_uid,
dkwargs_table,
validate):
if validate:
raise
d_ids = [str(uuid.uuid4()) for j in range(len(dkwargs_table))]
dkwargs_table = | pd.DataFrame(dkwargs_table) | pandas.DataFrame |
from linearmodels.compat.statsmodels import Summary
from itertools import product
import struct
from typing import Optional
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
import scipy.sparse as sp
from scipy.sparse import csc_matrix
from linearmodels.iv._utility import annihilate
from linearmodels.iv.absorbing import (
_VARIABLE_CACHE,
AbsorbingLS,
AbsorbingRegressor,
Interaction,
category_continuous_interaction,
category_interaction,
category_product,
clear_cache,
)
from linearmodels.iv.model import _OLS
from linearmodels.iv.results import AbsorbingLSResults, OLSResults
from linearmodels.panel.utility import (
AbsorbingEffectError,
AbsorbingEffectWarning,
dummy_matrix,
)
from linearmodels.shared.exceptions import MissingValueWarning
from linearmodels.shared.utility import AttrDict
NOBS = 100
pytestmark = pytest.mark.filterwarnings(
"ignore:the matrix subclass:PendingDeprecationWarning"
)
class Hasher(object):
@property
def hash_func(self):
try:
import xxhash
return xxhash.xxh64()
except ImportError:
import hashlib
return hashlib.sha256()
def single(self, value):
h = self.hash_func
h.update(np.ascontiguousarray(value))
return h.hexdigest()
hasher = Hasher()
@pytest.fixture(scope="function")
def random_gen(request):
return np.random.RandomState(12345678)
def random_cat(ncat, size, frame=False, rs=None):
if rs is None:
rs = np.random.RandomState()
series = pd.Series(pd.Categorical(rs.randint(0, ncat, size)))
if frame:
return pd.DataFrame(series)
return series
def random_cont(size, rs=None):
if rs is None:
rs = np.random.RandomState()
series = pd.Series(rs.standard_normal(size))
return pd.DataFrame(series)
@pytest.fixture(scope="module", params=[1, 2, 3])
def cat(request):
rs = np.random.RandomState(0)
return pd.DataFrame(
{str(i): random_cat(4, NOBS, rs=rs) for i in range(request.param)}
)
@pytest.fixture(scope="module", params=[1, 2])
def cont(request):
rs = np.random.RandomState(0)
return pd.DataFrame(
{
"cont" + str(i): pd.Series(rs.standard_normal(NOBS))
for i in range(request.param)
}
)
@pytest.fixture(scope="module", params=[True, False])
def weights(request):
if not request.param:
return None
rs = np.random.RandomState(0)
return rs.chisquare(10, NOBS) / 10.0
@pytest.fixture(scope="module", params=[0, 1, 2])
def interact(request):
if not request.param:
return None
rs = np.random.RandomState(0)
interactions = []
for _ in range(request.param):
cat = random_cat(4, 100, frame=True, rs=rs)
cont = random_cont(100, rs=rs)
interactions.append(Interaction(cat, cont))
return interactions
def generate_data(
k=3,
const=True,
nfactors=1,
factor_density=10,
nobs=2000,
cont_interactions=1,
factor_format="interaction",
singleton_interaction=False,
weighted=False,
ncont=0,
):
rs = np.random.RandomState(1234567890)
density = [factor_density] * max(nfactors, cont_interactions)
x = rs.standard_normal((nobs, k))
if const:
x = np.column_stack([np.ones(nobs), x])
e = rs.standard_normal(nobs)
y = x.sum(1) + e
factors = []
for i in range(nfactors):
ncat = nobs // density[min(i, len(density) - 1)]
fact = rs.randint(ncat, size=nobs)
effects = rs.standard_normal(ncat)
y += effects[fact]
factors.append(pd.Series(pd.Categorical(fact)))
for i in range(ncont):
cont = rs.standard_normal(size=nobs)
factors.append(pd.Series(cont))
if factors:
factors = pd.concat(factors, axis=1)
if factor_format == "interaction":
if nfactors and ncont:
factors = Interaction(
factors.iloc[:, :nfactors], factors.iloc[:, nfactors:]
)
elif nfactors:
factors = Interaction(factors, None)
else:
factors = Interaction(None, factors)
else:
factors = None
interactions = []
for i in range(cont_interactions):
ncat = nobs // density[min(i, len(density) - 1)]
fact = rs.randint(ncat, size=nobs)
effects = rs.standard_normal(nobs)
y += effects
df = pd.DataFrame(
pd.Series(pd.Categorical(fact)), columns=["fact{0}".format(i)]
)
df_eff = pd.DataFrame(effects[:, None], columns=["effect_{0}".format(i)])
interactions.append(Interaction(df, df_eff))
if factor_format == "pandas":
for i, interact in enumerate(interactions):
interactions[i] = pd.concat([interact.cat, interact.cont], axis=1)
interactions = interactions if interactions else None
if interactions and singleton_interaction:
interactions = interactions[0]
if weighted:
weights = pd.DataFrame(rs.chisquare(10, size=(nobs, 1)) / 10)
else:
weights = None
return AttrDict(
y=y, x=x, absorb=factors, interactions=interactions, weights=weights
)
# Permutations, k in (0,3), const in (True,False), factors=(0,1,2), interactions in (0,1)
# k=3, const=True, nfactors=1, factor_density=10, nobs=2000, cont_interactions=1,
# format='interaction', singleton_interaction=False
configs = product(
[0, 3], # k
[False, True], # constant
[1, 2, 0], # factors
[10], # density
[2000], # nobs
[0, 1], # cont interactions
["interaction", "pandas"], # format
[False, True], # singleton
[False, True], # weighted
[0, 1], # ncont
)
data_configs = [c for c in configs if (c[2] or c[5] or c[9])]
id_str = (
"k: {0}, const: {1}, nfactors: {2}, density: {3}, nobs: {4}, "
"cont_interacts: {5}, format:{6}, singleton:{7}, weighted: {8}, ncont: {9}"
)
data_ids = [id_str.format(*config) for config in configs]
@pytest.fixture(scope="module", params=data_configs, ids=data_ids)
def data(request):
return generate_data(*request.param)
configs_ols = product(
[0, 3], # k
[False, True], # constant
[1, 2, 0], # factors
[50], # density
[500], # nobs
[0, 1], # cont interactions
["interaction"], # format
[False], # singleton
[False, True], # weighted
[0, 1], # ncont
)
configs_ols_data = [c for c in configs_ols if (c[0] or c[1])]
id_str = (
"k: {0}, const: {1}, nfactors: {2}, density: {3}, nobs: {4}, "
"cont_interacts: {5}, format:{6}, singleton:{7}, weighted: {8}, ncont: {9}"
)
ids_ols_data = [id_str.format(*config) for config in configs_ols]
@pytest.fixture(scope="module", params=configs_ols_data, ids=ids_ols_data)
def ols_data(request):
return generate_data(*request.param)
@pytest.mark.smoke
def test_smoke(data):
mod = AbsorbingLS(
data.y,
data.x,
absorb=data.absorb,
interactions=data.interactions,
weights=data.weights,
)
res = mod.fit()
assert isinstance(res.summary, Summary)
assert isinstance(str(res.summary), str)
def test_absorbing_exceptions(random_gen):
with pytest.raises(TypeError):
absorbed = random_gen.standard_normal((NOBS, 2))
assert isinstance(absorbed, np.ndarray)
AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
absorb=absorbed,
)
with pytest.raises(ValueError):
AbsorbingLS(
random_gen.standard_normal(NOBS), random_gen.standard_normal((NOBS - 1, 2))
)
with pytest.raises(ValueError):
AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
absorb=pd.DataFrame(random_gen.standard_normal((NOBS - 1, 1))),
)
with pytest.raises(ValueError):
AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
interactions=random_cat(10, NOBS - 1, frame=True, rs=random_gen),
)
mod = AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
interactions=random_cat(10, NOBS, frame=True, rs=random_gen),
)
with pytest.raises(RuntimeError):
assert isinstance(mod.absorbed_dependent, pd.DataFrame)
with pytest.raises(RuntimeError):
assert isinstance(mod.absorbed_exog, pd.DataFrame)
with pytest.raises(TypeError):
interactions = random_gen.randint(0, 10, size=(NOBS, 2))
assert isinstance(interactions, np.ndarray)
AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
interactions=interactions,
)
def test_clear_cache():
_VARIABLE_CACHE["key"] = {"a": np.empty(100)}
clear_cache()
assert len(_VARIABLE_CACHE) == 0
def test_category_product(cat):
prod = category_product(cat)
if cat.shape[1] == 1:
assert_series_equal(prod, cat.iloc[:, 0], check_names=False)
else:
alt = cat.iloc[:, 0].astype("int64")
for i in range(1, cat.shape[1]):
alt += 10 ** (4 * i) * cat.iloc[:, i].astype("int64")
alt = pd.Categorical(alt)
alt = pd.Series(alt)
df = pd.DataFrame([prod.cat.codes, alt.cat.codes], index=["cat_prod", "alt"]).T
g = df.groupby("cat_prod").alt
assert (g.nunique() == 1).all()
g = df.groupby("alt").cat_prod
assert (g.nunique() == 1).all()
def test_category_product_too_large(random_gen):
dfc = {}
for i in range(20):
dfc[str(i)] = random_cat(10, 1000)
cat = pd.DataFrame(dfc)
with pytest.raises(ValueError):
category_product(cat)
def test_category_product_not_cat(random_gen):
cat = pd.DataFrame(
{str(i): pd.Series(random_gen.randint(0, 10, 1000)) for i in range(3)}
)
with pytest.raises(TypeError):
category_product(cat)
def test_category_interaction():
c = pd.Series(pd.Categorical([0, 0, 0, 1, 1, 1]))
actual = category_interaction(c, precondition=False).A
expected = np.zeros((6, 2))
expected[:3, 0] = 1.0
expected[3:, 1] = 1.0
assert_allclose(actual, expected)
actual = category_interaction(c, precondition=True).A
cond = np.sqrt((expected**2).sum(0))
expected /= cond
assert_allclose(actual, expected)
def test_category_continuous_interaction():
c = pd.Series(pd.Categorical([0, 0, 0, 1, 1, 1]))
v = pd.Series(np.arange(6.0))
actual = category_continuous_interaction(c, v, precondition=False)
expected = np.zeros((6, 2))
expected[:3, 0] = v[:3]
expected[3:, 1] = v[3:]
assert_allclose(actual.A, expected)
actual = category_continuous_interaction(c, v, precondition=True)
cond = np.sqrt((expected**2).sum(0))
expected /= cond
assert_allclose(actual.A, expected)
def test_category_continuous_interaction_interwoven():
c = pd.Series(pd.Categorical([0, 1, 0, 1, 0, 1]))
v = pd.Series(np.arange(6.0))
actual = category_continuous_interaction(c, v, precondition=False)
expected = np.zeros((6, 2))
expected[::2, 0] = v[::2]
expected[1::2, 1] = v[1::2]
assert_allclose(actual.A, expected)
def test_interaction_cat_only(cat):
interact = Interaction(cat=cat)
assert interact.nobs == cat.shape[0]
assert_frame_equal(cat, interact.cat)
expected = category_interaction(category_product(cat), precondition=False)
actual = interact.sparse
assert isinstance(actual, csc_matrix)
assert_allclose(expected.A, actual.A)
def test_interaction_cont_only(cont):
interact = Interaction(cont=cont)
assert interact.nobs == cont.shape[0]
assert_frame_equal(cont, interact.cont)
expected = cont.to_numpy()
actual = interact.sparse
assert isinstance(actual, csc_matrix)
assert_allclose(expected, actual.A)
def test_interaction_cat_cont(cat, cont):
interact = Interaction(cat=cat, cont=cont)
assert interact.nobs == cat.shape[0]
assert_frame_equal(cat, interact.cat)
assert_frame_equal(cont, interact.cont)
base = category_interaction(category_product(cat), precondition=False).A
expected = []
for i in range(cont.shape[1]):
element = base.copy()
element[np.where(element)] = cont.iloc[:, i].to_numpy()
expected.append(element)
expected = np.column_stack(expected)
actual = interact.sparse
assert isinstance(actual, csc_matrix)
assert_allclose(expected, interact.sparse.A)
def test_interaction_from_frame(cat, cont):
base = Interaction(cat=cat, cont=cont)
interact = Interaction.from_frame(pd.concat([cat, cont], axis=1))
assert_allclose(base.sparse.A, interact.sparse.A)
def test_interaction_cat_bad_nobs():
with pytest.raises(ValueError):
Interaction()
with pytest.raises(ValueError):
Interaction(cat=np.empty((100, 0)), cont=np.empty((100, 0)))
def test_empty_interaction():
interact = Interaction(nobs=100)
assert isinstance(interact.sparse, csc_matrix)
assert interact.sparse.shape == (100, 0)
def test_interaction_cat_cont_convert(cat, cont):
base = Interaction(cat, cont)
interact = Interaction(cat.to_numpy(), cont)
assert_allclose(base.sparse.A, interact.sparse.A)
def test_absorbing_regressors(cat, cont, interact, weights):
areg = AbsorbingRegressor(
cat=cat, cont=cont, interactions=interact, weights=weights
)
rank = areg.approx_rank
expected_rank = 0
expected = []
for i, col in enumerate(cat):
expected_rank += pd.Series(cat[col].cat.codes).nunique() - (i > 0)
expected.append(dummy_matrix(cat, precondition=False)[0])
expected_rank += cont.shape[1]
expected.append(csc_matrix(cont))
if interact is not None:
for inter in interact:
interact_mat = inter.sparse
expected_rank += interact_mat.shape[1]
expected.append(interact_mat)
expected = sp.hstack(expected, format="csc")
if weights is not None:
expected = (sp.diags(np.sqrt(weights)).dot(expected)).asformat("csc")
actual = areg.regressors
assert expected.shape == actual.shape
assert_array_equal(expected.indptr, actual.indptr)
assert_array_equal(expected.indices, actual.indices)
assert_allclose(expected.A, actual.A)
assert expected_rank == rank
def test_absorbing_regressors_hash(cat, cont, interact, weights):
areg = AbsorbingRegressor(
cat=cat, cont=cont, interactions=interact, weights=weights
)
# Build hash
hashes = []
for col in cat:
hashes.append((hasher.single(cat[col].cat.codes.to_numpy().data),))
for col in cont:
hashes.append((hasher.single(cont[col].to_numpy().data),))
hashes = sorted(hashes)
if interact is not None:
for inter in interact:
hashes.extend(inter.hash)
if weights is not None:
hashes.append((hasher.single(weights.data),))
hashes = tuple(sorted(hashes))
assert hashes == areg.hash
def test_empty_absorbing_regressor():
areg = AbsorbingRegressor()
assert areg.regressors.shape == (0, 0)
assert areg.hash == tuple()
def test_against_ols(ols_data):
mod = AbsorbingLS(
ols_data.y,
ols_data.x,
absorb=ols_data.absorb,
interactions=ols_data.interactions,
weights=ols_data.weights,
)
res = mod.fit()
absorb = []
has_dummy = False
if ols_data.absorb is not None:
absorb.append(ols_data.absorb.cont.to_numpy())
if ols_data.absorb.cat.shape[1] > 0:
dummies = dummy_matrix(ols_data.absorb.cat, precondition=False)[0]
assert isinstance(dummies, sp.csc_matrix)
absorb.append(dummies.A)
has_dummy = ols_data.absorb.cat.shape[1] > 0
if ols_data.interactions is not None:
for interact in ols_data.interactions:
absorb.append(interact.sparse.A)
_x = ols_data.x
if absorb:
absorb = np.column_stack(absorb)
if np.any(np.ptp(_x, 0) == 0) and has_dummy:
if ols_data.weights is None:
absorb = annihilate(absorb, np.ones((absorb.shape[0], 1)))
else:
root_w = np.sqrt(mod.weights.ndarray)
wabsorb = annihilate(root_w * absorb, root_w)
absorb = (1.0 / root_w) * wabsorb
rank = np.linalg.matrix_rank(absorb)
if rank < absorb.shape[1]:
a, b = np.linalg.eig(absorb.T @ absorb)
order = np.argsort(a)[::-1]
a, b = a[order], b[:, order]
z = absorb @ b
absorb = z[:, :rank]
_x = np.column_stack([_x, absorb])
ols_mod = _OLS(ols_data.y, _x, weights=ols_data.weights)
ols_res = ols_mod.fit()
assert_results_equal(ols_res, res)
def test_cache():
gen = generate_data(
2, True, 2, factor_format="pandas", ncont=0, cont_interactions=1
)
first = len(_VARIABLE_CACHE)
mod = AbsorbingLS(
gen.y, gen.x, absorb=gen.absorb.iloc[:, :1], interactions=gen.interactions
)
mod.fit()
second = len(_VARIABLE_CACHE)
mod = AbsorbingLS(gen.y, gen.x, absorb=gen.absorb, interactions=gen.interactions)
mod.fit()
third = len(_VARIABLE_CACHE)
assert third - second == 1
assert second - first == 1
mod = AbsorbingLS(gen.y, gen.x, absorb=gen.absorb, interactions=gen.interactions)
mod.fit()
fourth = len(_VARIABLE_CACHE)
assert fourth - third == 0
def test_instrments():
gen = generate_data(
2, True, 2, factor_format="pandas", ncont=0, cont_interactions=1
)
mod = AbsorbingLS(
gen.y, gen.x, absorb=gen.absorb.iloc[:, :1], interactions=gen.interactions
)
assert mod.instruments.shape[1] == 0
def assert_results_equal(
o_res: OLSResults, a_res: AbsorbingLSResults, k: Optional[int] = None
) -> None:
if k is None:
k = a_res.params.shape[0]
attrs = [v for v in dir(o_res) if not v.startswith("_")]
callables = ["conf_int"]
skip = [
"summary",
"test_linear_constraint",
"predict",
"model",
"f_statistic",
"wald_test",
"method",
"kappa",
]
for attr in attrs:
if attr in skip:
continue
left = getattr(o_res, attr)
right = getattr(a_res, attr)
if attr in callables:
left = left()
right = right()
if isinstance(left, np.ndarray):
raise NotImplementedError
elif isinstance(left, pd.DataFrame):
if attr == "conf_int":
left = left.iloc[:k]
elif attr == "cov":
left = left.iloc[:k, :k]
assert_allclose(left, right, rtol=2e-4, atol=1e-6)
elif isinstance(left, pd.Series):
assert_allclose(left.iloc[:k], right.iloc[:k], rtol=1e-5)
else:
if isinstance(left, float):
assert_allclose(left, right, atol=1e-10)
else:
assert left == right
assert isinstance(a_res.summary, Summary)
assert isinstance(str(a_res.summary), str)
assert isinstance(a_res.absorbed_effects, pd.DataFrame)
tol = 1e-4 if (8 * struct.calcsize("P")) < 64 else 0.0
assert a_res.absorbed_rsquared <= (a_res.rsquared + tol)
def test_center_cov_arg():
gen = generate_data(
2, True, 2, factor_format="pandas", ncont=0, cont_interactions=1
)
mod = AbsorbingLS(gen.y, gen.x, absorb=gen.absorb, interactions=gen.interactions)
res = mod.fit(center=True)
assert "center" not in res.cov_config
def test_drop_missing():
gen = generate_data(
2, True, 2, factor_format="pandas", ncont=0, cont_interactions=1
)
gen.y[::53] = np.nan
gen.x[::79] = np.nan
with pytest.warns(MissingValueWarning):
AbsorbingLS(gen.y, gen.x, absorb=gen.absorb, interactions=gen.interactions)
gen = generate_data(
2, True, 2, factor_format="pandas", ncont=0, cont_interactions=1
)
for col in gen.absorb:
gen.absorb[col] = gen.absorb[col].astype("int64").astype("object")
col_iloc = gen.absorb.columns.get_loc(col)
gen.absorb.iloc[::91, col_iloc] = np.nan
gen.absorb[col] = pd.Categorical(gen.absorb[col].to_numpy())
with pytest.warns(MissingValueWarning):
AbsorbingLS(gen.y, gen.x, absorb=gen.absorb, interactions=gen.interactions)
def test_drop_absorb(random_gen):
absorb = random_gen.randint(0, 10, size=1000)
x = random_gen.standard_normal((1000, 3))
y = random_gen.standard_normal((1000))
dfd = {f"x{i}": pd.Series(x[:, i]) for i in range(3)}
dfd.update({"c": pd.Series(absorb, dtype="category"), "y": pd.Series(y)})
df = pd.DataFrame(dfd)
y = df.y
x = df.iloc[:, :3]
x = pd.concat([x, pd.get_dummies(df.c).iloc[:, :2]], axis=1)
mod = AbsorbingLS(y, x, absorb=df[["c"]], drop_absorbed=True)
with pytest.warns(AbsorbingEffectWarning):
res = mod.fit()
assert len(res.params) == 3
assert all(f"x{i}" in res.params for i in range(3))
assert isinstance(str(res.summary), str)
mod = AbsorbingLS(y, x, absorb=df[["c"]])
with pytest.raises(AbsorbingEffectError):
mod.fit()
mod = AbsorbingLS(y, x.iloc[:, -2:], absorb=df[["c"]])
with pytest.raises(AbsorbingEffectError):
mod.fit()
def test_fully_absorb(random_gen):
absorb = random_gen.randint(0, 10, size=1000)
x = random_gen.standard_normal((1000, 3))
y = random_gen.standard_normal((1000))
dfd = {f"x{i}": pd.Series(x[:, i]) for i in range(3)}
dfd.update({"c": pd.Series(absorb, dtype="category"), "y": pd.Series(y)})
df = pd.DataFrame(dfd)
y = df.y
x = pd.get_dummies(df.c, drop_first=False)
mod = AbsorbingLS(y, x, absorb=df[["c"]], drop_absorbed=True)
with pytest.raises(ValueError, match="All columns in exog"):
mod.fit()
def test_lsmr_options(random_gen):
absorb = random_gen.randint(0, 10, size=1000)
x = random_gen.standard_normal((1000, 3))
y = random_gen.standard_normal((1000))
dfd = {f"x{i}": pd.Series(x[:, i]) for i in range(3)}
dfd.update({"c": pd.Series(absorb, dtype="category"), "y": pd.Series(y)})
df = | pd.DataFrame(dfd) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = | algos.value_counts(factor) | pandas.core.algorithms.value_counts |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Implement DataFrame public API as Pandas does.
Almost all docstrings for public and magic methods should be inherited from Pandas
for better maintability. So some codes are ignored in pydocstyle check:
- D101: missing docstring in class
- D102: missing docstring in public method
- D105: missing docstring in magic method
Manually add documentation for methods which are not presented in pandas.
"""
import pandas
from pandas.core.common import apply_if_callable
from pandas.core.dtypes.common import (
infer_dtype_from_object,
is_dict_like,
is_list_like,
is_numeric_dtype,
)
from pandas.core.indexes.api import ensure_index_from_sequences
from pandas.util._validators import validate_bool_kwarg
from pandas.io.formats.printing import pprint_thing
from pandas._libs.lib import no_default
from pandas._typing import Label
import itertools
import functools
import numpy as np
import sys
from typing import Optional, Sequence, Tuple, Union, Mapping
import warnings
from modin.error_message import ErrorMessage
from modin.utils import _inherit_docstrings, to_pandas, hashable
from modin.config import IsExperimental
from .utils import (
from_pandas,
from_non_pandas,
)
from .iterator import PartitionIterator
from .series import Series
from .base import BasePandasDataset, _ATTRS_NO_LOOKUP
from .groupby import DataFrameGroupBy
from .accessor import CachedAccessor, SparseFrameAccessor
@_inherit_docstrings(pandas.DataFrame, excluded=[pandas.DataFrame.__init__])
class DataFrame(BasePandasDataset):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""
Distributed DataFrame object backed by Pandas dataframes.
Parameters
----------
data: NumPy ndarray (structured or homogeneous) or dict:
Dict can contain Series, arrays, constants, or list-like
objects.
index: pandas.Index, list, ObjectID
The row index for this DataFrame.
columns: pandas.Index
The column names for this DataFrame, in pandas Index object.
dtype: Data type to force.
Only a single dtype is allowed. If None, infer
copy: bool
Copy data from inputs. Only affects DataFrame / 2d ndarray input.
query_compiler: query_compiler
A query compiler object to manage distributed computation.
"""
if isinstance(data, (DataFrame, Series)):
self._query_compiler = data._query_compiler.copy()
if index is not None and any(i not in data.index for i in index):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if isinstance(data, Series):
# We set the column name if it is not in the provided Series
if data.name is None:
self.columns = [0] if columns is None else columns
# If the columns provided are not in the named Series, pandas clears
# the DataFrame and sets columns to the columns provided.
elif columns is not None and data.name not in columns:
self._query_compiler = from_pandas(
DataFrame(columns=columns)
)._query_compiler
if index is not None:
self._query_compiler = data.loc[index]._query_compiler
elif columns is None and index is None:
data._add_sibling(self)
else:
if columns is not None and any(i not in data.columns for i in columns):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if index is None:
index = slice(None)
if columns is None:
columns = slice(None)
self._query_compiler = data.loc[index, columns]._query_compiler
# Check type of data and use appropriate constructor
elif query_compiler is None:
distributed_frame = from_non_pandas(data, index, columns, dtype)
if distributed_frame is not None:
self._query_compiler = distributed_frame._query_compiler
return
warnings.warn(
"Distributing {} object. This may take some time.".format(type(data))
)
if is_list_like(data) and not is_dict_like(data):
old_dtype = getattr(data, "dtype", None)
values = [
obj._to_pandas() if isinstance(obj, Series) else obj for obj in data
]
if isinstance(data, np.ndarray):
data = np.array(values, dtype=old_dtype)
else:
try:
data = type(data)(values, dtype=old_dtype)
except TypeError:
data = values
elif is_dict_like(data) and not isinstance(
data, (pandas.Series, Series, pandas.DataFrame, DataFrame)
):
data = {
k: v._to_pandas() if isinstance(v, Series) else v
for k, v in data.items()
}
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __repr__(self):
from pandas.io.formats import console
num_rows = pandas.get_option("display.max_rows") or 10
num_cols = pandas.get_option("display.max_columns") or 20
if pandas.get_option("display.max_columns") is None and pandas.get_option(
"display.expand_frame_repr"
):
width, _ = console.get_console_size()
width = min(width, len(self.columns))
col_counter = 0
i = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i += 1
num_cols = i
i = len(self.columns) - 1
col_counter = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i -= 1
num_cols += len(self.columns) - i
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self): # pragma: no cover
num_rows = pandas.get_option("max_rows") or 60
num_cols = pandas.get_option("max_columns") or 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_columns(self):
"""
Get the columns for this DataFrame.
Returns
-------
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_columns(self, new_columns):
"""
Set the columns for this DataFrame.
Parameters
----------
new_columns: The new index to set this
"""
self._query_compiler.columns = new_columns
columns = property(_get_columns, _set_columns)
@property
def ndim(self):
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
def drop_duplicates(
self, subset=None, keep="first", inplace=False, ignore_index=False
):
return super(DataFrame, self).drop_duplicates(
subset=subset, keep=keep, inplace=inplace
)
@property
def dtypes(self):
return self._query_compiler.dtypes
def duplicated(self, subset=None, keep="first"):
import hashlib
df = self[subset] if subset is not None else self
# if the number of columns we are checking for duplicates is larger than 1, we must
# hash them to generate a single value that can be compared across rows.
if len(df.columns) > 1:
hashed = df.apply(
lambda s: hashlib.new("md5", str(tuple(s)).encode()).hexdigest(), axis=1
).to_frame()
else:
hashed = df
duplicates = hashed.apply(lambda s: s.duplicated(keep=keep)).squeeze(axis=1)
# remove Series name which was assigned automatically by .apply
duplicates.name = None
return duplicates
@property
def empty(self):
return len(self.columns) == 0 or len(self.index) == 0
@property
def axes(self):
return [self.index, self.columns]
@property
def shape(self):
return len(self.index), len(self.columns)
def add_prefix(self, prefix):
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
axis = self._get_axis_number(axis)
query_compiler = super(DataFrame, self).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
if not isinstance(query_compiler, type(self._query_compiler)):
return query_compiler
# This is the simplest way to determine the return type, but there are checks
# in pandas that verify that some results are created. This is a challenge for
# empty DataFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which means that the return type won't change from
# type(self), so we catch that error and use `type(self).__name__` for the return
# type.
try:
if axis == 0:
init_kwargs = {"index": self.index}
else:
init_kwargs = {"columns": self.columns}
return_type = type(
getattr(pandas, type(self).__name__)(**init_kwargs).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
).__name__
except Exception:
return_type = type(self).__name__
if return_type not in ["DataFrame", "Series"]:
return query_compiler.to_pandas().squeeze()
else:
result = getattr(sys.modules[self.__module__], return_type)(
query_compiler=query_compiler
)
if isinstance(result, Series):
if axis == 0 and result.name == self.index[0] or result.name == 0:
result.name = None
elif axis == 1 and result.name == self.columns[0] or result.name == 0:
result.name = None
return result
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze: bool = no_default,
observed=False,
dropna: bool = True,
):
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
axis = self._get_axis_number(axis)
idx_name = None
# Drop here indicates whether or not to drop the data column before doing the
# groupby. The typical pandas behavior is to drop when the data came from this
# dataframe. When a string, Series directly from this dataframe, or list of
# strings is passed in, the data used for the groupby is dropped before the
# groupby takes place.
drop = False
if (
not isinstance(by, (pandas.Series, Series))
and is_list_like(by)
and len(by) == 1
):
by = by[0]
if callable(by):
by = self.index.map(by)
elif isinstance(by, str):
drop = by in self.columns
idx_name = by
if (
self._query_compiler.has_multiindex(axis=axis)
and by in self.axes[axis].names
or hasattr(self.axes[axis], "name")
and self.axes[axis].name == by
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__getitem__(by)._query_compiler
elif isinstance(by, Series):
drop = by._parent is self
idx_name = by.name
by = by._query_compiler
elif is_list_like(by):
# fastpath for multi column groupby
if (
not isinstance(by, Series)
and axis == 0
and all(
(
(isinstance(o, str) and (o in self))
or (isinstance(o, Series) and (o._parent is self))
)
for o in by
)
):
# We can just revert Series back to names because the parent is
# this dataframe:
by = [o.name if isinstance(o, Series) else o for o in by]
by = self.__getitem__(by)._query_compiler
drop = True
else:
mismatch = len(by) != len(self.axes[axis])
if mismatch and all(
isinstance(obj, str)
and (
obj in self
or (hasattr(self.index, "names") and obj in self.index.names)
)
for obj in by
):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch and any(
isinstance(obj, str) and obj not in self.columns for obj in by
):
names = [o.name if isinstance(o, Series) else o for o in by]
raise KeyError(next(x for x in names if x not in self))
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
drop=drop,
dropna=dropna,
)
def keys(self):
return self.columns
def transpose(self, copy=False, *args):
return DataFrame(query_compiler=self._query_compiler.transpose(*args))
T = property(transpose)
def add(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"add",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
def append(self, other, ignore_index=False, verify_integrity=False, sort=False):
if sort is False:
warnings.warn(
"Due to https://github.com/pandas-dev/pandas/issues/35092, "
"Pandas ignores sort=False; Modin correctly does not sort."
)
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is not None:
# other must have the same index name as self, otherwise
# index name will be reset
name = other.name
# We must transpose here because a Series becomes a new row, and the
# structure of the query compiler is currently columnar
other = other._query_compiler.transpose()
other.index = pandas.Index([name], name=self.index.name)
else:
# See note above about transpose
other = other._query_compiler.transpose()
elif isinstance(other, list):
if not all(isinstance(o, BasePandasDataset) for o in other):
other = DataFrame(pandas.DataFrame(other))._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
appended_index = (
self.index.append(other.index)
if not isinstance(other, list)
else self.index.append([o.index for o in other])
)
is_valid = next((False for idx in appended_index.duplicated() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".format(
appended_index[appended_index.duplicated()]
)
)
query_compiler = self._query_compiler.concat(
0, other, ignore_index=ignore_index, sort=sort
)
return DataFrame(query_compiler=query_compiler)
def assign(self, **kwargs):
df = self.copy()
for k, v in kwargs.items():
if callable(v):
df[k] = v(df)
else:
df[k] = v
return df
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
backend=None,
**kwargs,
):
return to_pandas(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
backend=backend,
**kwargs,
)
def combine(self, other, func, fill_value=None, overwrite=True):
return super(DataFrame, self).combine(
other, func, fill_value=fill_value, overwrite=overwrite
)
def compare(
self,
other: "DataFrame",
align_axis: Union[str, int] = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> "DataFrame":
return self._default_to_pandas(
pandas.DataFrame.compare,
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def corr(self, method="pearson", min_periods=1):
return self.__constructor__(
query_compiler=self._query_compiler.corr(
method=method,
min_periods=min_periods,
)
)
def corrwith(self, other, axis=0, drop=False, method="pearson"):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.corrwith, other, axis=axis, drop=drop, method=method
)
def cov(self, min_periods=None, ddof: Optional[int] = 1):
numeric_df = self.drop(
columns=[
i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i])
]
)
is_notna = True
if all(numeric_df.notna().all()):
if min_periods is not None and min_periods > len(numeric_df):
result = np.empty((numeric_df.shape[1], numeric_df.shape[1]))
result.fill(np.nan)
return numeric_df.__constructor__(result)
else:
cols = numeric_df.columns
idx = cols.copy()
numeric_df = numeric_df.astype(dtype="float64")
denom = 1.0 / (len(numeric_df) - ddof)
means = numeric_df.mean(axis=0)
result = numeric_df - means
result = result.T._query_compiler.conj().dot(result._query_compiler)
else:
result = numeric_df._query_compiler.cov(min_periods=min_periods)
is_notna = False
if is_notna:
result = numeric_df.__constructor__(
query_compiler=result, index=idx, columns=cols
)
result *= denom
else:
result = numeric_df.__constructor__(query_compiler=result)
return result
def dot(self, other):
if isinstance(other, BasePandasDataset):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("Matrices are not aligned")
qc = other.reindex(index=common)._query_compiler
if isinstance(other, DataFrame):
return self.__constructor__(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=False
)
)
else:
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=True
)
)
other = np.asarray(other)
if self.shape[1] != other.shape[0]:
raise ValueError(
"Dot product shape mismatch, {} vs {}".format(self.shape, other.shape)
)
if len(other.shape) > 1:
return self.__constructor__(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
def eq(self, other, axis="columns", level=None):
return self._binary_op(
"eq", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def equals(self, other):
if isinstance(other, pandas.DataFrame):
# Copy into a Modin DataFrame to simplify logic below
other = DataFrame(other)
return (
self.index.equals(other.index)
and self.columns.equals(other.columns)
and self.eq(other).all().all()
)
def explode(self, column: Union[str, Tuple], ignore_index: bool = False):
return self._default_to_pandas(
pandas.DataFrame.explode, column, ignore_index=ignore_index
)
def eval(self, expr, inplace=False, **kwargs):
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
return_type = type(
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.eval(expr, **kwargs)
).__name__
if return_type == type(self).__name__:
return self._create_or_update_from_compiler(new_query_compiler, inplace)
else:
if inplace:
raise ValueError("Cannot operate inplace if there is no assignment")
return getattr(sys.modules[self.__module__], return_type)(
query_compiler=new_query_compiler
)
def floordiv(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"floordiv",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
@classmethod
def from_dict(
cls, data, orient="columns", dtype=None, columns=None
): # pragma: no cover
ErrorMessage.default_to_pandas("`from_dict`")
return from_pandas(
pandas.DataFrame.from_dict(
data, orient=orient, dtype=dtype, columns=columns
)
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
): # pragma: no cover
ErrorMessage.default_to_pandas("`from_records`")
return from_pandas(
pandas.DataFrame.from_records(
data,
index=index,
exclude=exclude,
columns=columns,
coerce_float=coerce_float,
nrows=nrows,
)
)
def ge(self, other, axis="columns", level=None):
return self._binary_op(
"ge", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def gt(self, other, axis="columns", level=None):
return self._binary_op(
"gt", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def hist(
self,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
**kwds,
): # pragma: no cover
return self._default_to_pandas(
pandas.DataFrame.hist,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
**kwds,
)
def info(
self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
):
def put_str(src, output_len=None, spaces=2):
src = str(src)
return src.ljust(output_len if output_len else len(src)) + " " * spaces
def format_size(num):
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return f"{num:3.1f} {x}"
num /= 1024.0
return f"{num:3.1f} PB"
output = []
type_line = str(type(self))
index_line = self.index._summary()
columns = self.columns
columns_len = len(columns)
dtypes = self.dtypes
dtypes_line = f"dtypes: {', '.join(['{}({})'.format(dtype, count) for dtype, count in dtypes.value_counts().items()])}"
if max_cols is None:
max_cols = 100
exceeds_info_cols = columns_len > max_cols
if buf is None:
buf = sys.stdout
if null_counts is None:
null_counts = not exceeds_info_cols
if verbose is None:
verbose = not exceeds_info_cols
if null_counts and verbose:
# We're gonna take items from `non_null_count` in a loop, which
# works kinda slow with `Modin.Series`, that's why we call `_to_pandas()` here
# that will be faster.
non_null_count = self.count()._to_pandas()
if memory_usage is None:
memory_usage = True
def get_header(spaces=2):
output = []
head_label = " # "
column_label = "Column"
null_label = "Non-Null Count"
dtype_label = "Dtype"
non_null_label = " non-null"
delimiter = "-"
lengths = {}
lengths["head"] = max(len(head_label), len(pprint_thing(len(columns))))
lengths["column"] = max(
len(column_label), max(len(pprint_thing(col)) for col in columns)
)
lengths["dtype"] = len(dtype_label)
dtype_spaces = (
max(lengths["dtype"], max(len(pprint_thing(dtype)) for dtype in dtypes))
- lengths["dtype"]
)
header = put_str(head_label, lengths["head"]) + put_str(
column_label, lengths["column"]
)
if null_counts:
lengths["null"] = max(
len(null_label),
max(len(pprint_thing(x)) for x in non_null_count)
+ len(non_null_label),
)
header += put_str(null_label, lengths["null"])
header += put_str(dtype_label, lengths["dtype"], spaces=dtype_spaces)
output.append(header)
delimiters = put_str(delimiter * lengths["head"]) + put_str(
delimiter * lengths["column"]
)
if null_counts:
delimiters += put_str(delimiter * lengths["null"])
delimiters += put_str(delimiter * lengths["dtype"], spaces=dtype_spaces)
output.append(delimiters)
return output, lengths
output.extend([type_line, index_line])
def verbose_repr(output):
columns_line = f"Data columns (total {len(columns)} columns):"
header, lengths = get_header()
output.extend([columns_line, *header])
for i, col in enumerate(columns):
i, col, dtype = map(pprint_thing, [i, col, dtypes[col]])
to_append = put_str(" {}".format(i), lengths["head"]) + put_str(
col, lengths["column"]
)
if null_counts:
non_null = | pprint_thing(non_null_count[col]) | pandas.io.formats.printing.pprint_thing |
import logging as logger
import re
import regex
import unicodedata
from abc import abstractmethod
from collections import defaultdict
import pandas as pd
import nltk
# noinspection PyPackageRequirements
from iso639 import languages
from langdetect import detect, DetectorFactory
from nltk.corpus import stopwords
# noinspection PyPackageRequirements
from spellchecker import SpellChecker
from textstat import textstat
from langdetect.lang_detect_exception import LangDetectException
from shift_detector.precalculations.precalculation import Precalculation
from shift_detector.precalculations.text_precalculation import TokenizeIntoLowerWordsPrecalculation
from shift_detector.utils import ucb_list
from shift_detector.utils.column_management import ColumnType
from shift_detector.utils.text_metadata_utils import most_common_n_to_string_frequency, \
most_common_n_to_string_alphabetically, delimiters
class GenericTextMetadata(Precalculation):
def __eq__(self, other):
return isinstance(other, self.__class__)
def __hash__(self):
return hash(self.__class__)
def __lt__(self, other):
return self.metadata_name() < other.metadata_name()
def __le__(self, other):
return self.metadata_name() <= other.metadata_name()
def __gt__(self, other):
return self.metadata_name() > other.metadata_name()
def __ge__(self, other):
return self.metadata_name() >= other.metadata_name()
@staticmethod
@abstractmethod
def metadata_name() -> str:
raise NotImplementedError
@abstractmethod
def metadata_return_type(self) -> ColumnType:
raise NotImplementedError
@abstractmethod
def metadata_function(self, text):
raise NotImplementedError
def process(self, store):
metadata1 = pd.DataFrame()
metadata2 = pd.DataFrame()
df1, df2 = store[ColumnType.text]
columns = store.column_names(ColumnType.text)
for column in columns:
clean1 = df1[column].dropna()
clean2 = df2[column].dropna()
logger.info(self.metadata_name() + ' analysis for ' + column)
metadata1[column] = [self.metadata_function(text) for text in clean1]
metadata2[column] = [self.metadata_function(text) for text in clean2]
return metadata1, metadata2
class GenericTextMetadataWithTokenizing(GenericTextMetadata):
@staticmethod
@abstractmethod
def metadata_name() -> str:
raise NotImplementedError
@abstractmethod
def metadata_return_type(self) -> ColumnType:
raise NotImplementedError
@abstractmethod
def metadata_function(self, words):
raise NotImplementedError
def process(self, store):
metadata1 = pd.DataFrame()
metadata2 = pd.DataFrame()
df1, df2 = store[TokenizeIntoLowerWordsPrecalculation()]
for column in df1.columns:
logger.info(self.metadata_name() + ' analysis for ' + column)
metadata1[column] = [self.metadata_function(words) for words in df1[column]]
metadata2[column] = [self.metadata_function(words) for words in df2[column]]
return metadata1, metadata2
class GenericTextMetadataWithTokenizingAndLanguage(GenericTextMetadata):
def __init__(self, language='en', infer_language=False):
self.language = language
self.infer_language = infer_language
@staticmethod
@abstractmethod
def metadata_name() -> str:
raise NotImplementedError
@abstractmethod
def metadata_return_type(self) -> ColumnType:
raise NotImplementedError
@abstractmethod
def metadata_function(self, language, words):
raise NotImplementedError
def process(self, store):
metadata1 = pd.DataFrame()
metadata2 = pd.DataFrame()
df1, df2 = store[TokenizeIntoLowerWordsPrecalculation()]
columns = store.column_names(ColumnType.text)
if self.infer_language:
lang1, lang2 = store[LanguageMetadata()]
for column in columns:
logger.info(self.metadata_name() + ' analysis for ' + column)
temp_column1 = []
temp_column2 = []
for i in range(len(df1)):
if self.infer_language:
temp_column1.append(self.metadata_function(lang1[column][i], df1[column][i]))
temp_column2.append(self.metadata_function(lang2[column][i], df2[column][i]))
else:
temp_column1.append(self.metadata_function(self.language, df1[column][i]))
temp_column2.append(self.metadata_function(self.language, df2[column][i]))
metadata1[column] = temp_column1
metadata2[column] = temp_column2
return metadata1, metadata2
class GenericTextMetadataWithLanguage(GenericTextMetadata):
def __init__(self, language='en', infer_language=False):
self.language = language
self.infer_language = infer_language
@staticmethod
@abstractmethod
def metadata_name() -> str:
raise NotImplementedError
@abstractmethod
def metadata_return_type(self) -> ColumnType:
raise NotImplementedError
@abstractmethod
def metadata_function(self, language, text):
raise NotImplementedError
def process(self, store):
metadata1 = pd.DataFrame()
metadata2 = pd.DataFrame()
df1, df2 = store[ColumnType.text]
columns = store.column_names(ColumnType.text)
if self.infer_language:
lang1, lang2 = store[LanguageMetadata()]
for column in columns:
logger.info(self.metadata_name() + ' analysis for ' + column)
temp_column1 = []
temp_column2 = []
for i in range(len(df1)):
if self.infer_language:
temp_column1.append(self.metadata_function(lang1[column][i], df1[column][i]))
temp_column2.append(self.metadata_function(lang2[column][i], df2[column][i]))
else:
temp_column1.append(self.metadata_function(self.language, df1[column][i]))
temp_column2.append(self.metadata_function(self.language, df2[column][i]))
metadata1[column] = temp_column1
metadata2[column] = temp_column2
return metadata1, metadata2
class NumCharsMetadata(GenericTextMetadata):
@staticmethod
def metadata_name() -> str:
return 'num_chars'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, text):
if not isinstance(text, str):
return float('nan')
return len(text)
class RatioUppercaseLettersMetadata(GenericTextMetadata):
@staticmethod
def metadata_name() -> str:
return 'ratio_upper'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, text):
if not isinstance(text, str):
return float('nan')
if text == "":
return 0
alpha = sum(1 for c in text if c.isalpha())
upper = sum(1 for c in text if c.isupper())
return upper / alpha
class UnicodeCategoriesMetadata(GenericTextMetadata):
@staticmethod
def metadata_name() -> str:
return 'unicode_categories'
def metadata_return_type(self) -> ColumnType:
return ColumnType.categorical
@staticmethod
def unicode_category_histogram(text):
if not isinstance(text, str):
return float('nan')
characters = defaultdict(int)
for c in text:
category = unicodedata.category(c)
characters[category] += 1
return characters
def metadata_function(self, text):
return most_common_n_to_string_frequency(self.unicode_category_histogram(text), 5)
class UnicodeBlocksMetadata(GenericTextMetadata):
@staticmethod
def metadata_name() -> str:
return 'unicode_blocks'
def metadata_return_type(self) -> ColumnType:
return ColumnType.categorical
@staticmethod
def unicode_block_histogram(text):
if not isinstance(text, str):
return float('nan')
characters = defaultdict(int)
for c in text:
block = ucb_list.block(c)
characters[block] += 1
return characters
def metadata_function(self, text):
return most_common_n_to_string_frequency(self.unicode_block_histogram(text), 5)
class NumWordsMetadata(GenericTextMetadataWithTokenizing):
@staticmethod
def metadata_name() -> str:
return 'num_words'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, words):
if not isinstance(words, list):
return float('nan')
return len(words)
class DistinctWordsRatioMetadata(GenericTextMetadataWithTokenizing):
@staticmethod
def metadata_name() -> str:
return 'distinct_words'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, words):
if not isinstance(words, list):
return float('nan')
distinct_words = set()
if len(words) == 0:
return 0.0
for word in words:
if word not in distinct_words:
distinct_words.add(word)
return len(distinct_words) / len(words)
class UniqueWordsRatioMetadata(GenericTextMetadataWithTokenizing):
@staticmethod
def metadata_name() -> str:
return 'unique_words'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, words):
if not isinstance(words, list):
return float('nan')
if len(words) == 0:
return 0.0
seen_once = set()
seen_often = set()
for word in words:
if word not in seen_often:
if word not in seen_once:
seen_once.add(word)
else:
seen_once.remove(word)
seen_often.add(word)
return len(seen_once) / len(words)
class UnknownWordRatioMetadata(GenericTextMetadataWithTokenizingAndLanguage):
@staticmethod
def metadata_name() -> str:
return 'unknown_word_ratio'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, language, words):
# pyspellchecker supports multiple languages including English, Spanish, German, French, and Portuguese
if not isinstance(words, list):
return float('nan')
try:
spell = SpellChecker(language)
except ValueError as error:
return float('nan')
if len(words) == 0:
return 0.0
misspelled = spell.unknown(words)
return len(misspelled) / len(words)
class StopwordRatioMetadata(GenericTextMetadataWithTokenizingAndLanguage):
@staticmethod
def metadata_name() -> str:
return 'stopword_ratio'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, language, words):
# not working for every language
if not isinstance(words, list):
return float('nan')
stopword_count = 0
try:
stopwords_for_language_lower = stopwords.words(languages.get(part1=language).name.lower())
if len(words) == 0:
return 0.0
for word in words:
if word in stopwords_for_language_lower:
stopword_count += 1
return stopword_count / len(words)
except OSError as error:
return float('nan')
class DelimiterTypeMetadata(GenericTextMetadata):
@staticmethod
def metadata_name() -> str:
return 'delimiter_type'
def metadata_return_type(self) -> ColumnType:
return ColumnType.categorical
def metadata_function(self, text):
if not isinstance(text, str):
return float('nan')
for key, value in delimiters.items():
if regex.compile(value).search(text):
return key
return 'no delimiter'
class NumPartsMetadata(GenericTextMetadata):
# Calculates the delimiter of the text and then splits the text by its delimiter
# to calculate the number of parts in the text
@staticmethod
def metadata_name() -> str:
return 'num_parts'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, text):
if not isinstance(text, str):
return float('nan')
delimiter = DelimiterTypeMetadata().metadata_function(text)
for key, value in delimiters.items():
if key == delimiter:
return len(regex.split(regex.compile(value), text))
return 0
class LanguagePerParagraph(GenericTextMetadata):
# Depending on the texts delimiter splits the text into parts and calculates the language for each part.
# Returns a string with the languages, sorted by their frequency
def __init__(self, seed=0):
self.seed = seed
@staticmethod
def metadata_name() -> str:
return 'language'
def metadata_return_type(self) -> ColumnType:
return ColumnType.categorical
@staticmethod
def detect_languages(text):
if not isinstance(text, str) or len(text) == 0:
return float('nan')
if DelimiterTypeMetadata().metadata_function(text) == 'HTML':
parts = re.split(r'<\s*br\s*/?\s*>', text)
else:
parts = re.split(r'[\n\r]+', text)
parts = [x.strip() for x in parts if x.strip()]
detected_languages = defaultdict(int)
for part in parts:
try:
lang = detect(part)
detected_languages[lang] += 1
except LangDetectException:
continue
if detected_languages == {}:
return float('nan')
return detected_languages
def metadata_function(self, text, seed=0):
DetectorFactory.seed = self.seed
return most_common_n_to_string_alphabetically(self.detect_languages(text), 3)
class LanguageMetadata(GenericTextMetadata):
def __init__(self, seed=0):
self.seed = seed
@staticmethod
def metadata_name() -> str:
return 'language'
def metadata_return_type(self) -> ColumnType:
return ColumnType.categorical
def metadata_function(self, text):
if not isinstance(text, str):
return float('nan')
DetectorFactory.seed = self.seed
return detect(text)
class ComplexityMetadata(GenericTextMetadataWithLanguage):
@staticmethod
def metadata_name() -> str:
return 'complexity'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, language, text):
if not isinstance(text, str) or language != 'en':
return float('nan')
return textstat.text_standard(text, True)
class PartOfSpeechMetadata(GenericTextMetadataWithLanguage):
@staticmethod
def metadata_name() -> str:
return 'part_of_speech_tags'
def metadata_return_type(self) -> ColumnType:
return ColumnType.categorical
@staticmethod
def tag_histogram(text):
tokenized_text = nltk.word_tokenize(text)
tagged_text = nltk.pos_tag(tokenized_text)
simplified_tagged_text = [(word, nltk.map_tag('en-ptb', 'universal', tag)) for word, tag in tagged_text]
tagdict = defaultdict(int)
for word in simplified_tagged_text:
tagdict[word[1]] += 1
return tagdict
def metadata_function(self, language, text):
if not isinstance(text, str) or language != 'en':
return float('nan')
return most_common_n_to_string_frequency(self.tag_histogram(text), 5)
class TextMetadata(Precalculation):
def __init__(self, text_metadata_types=None, language='en', infer_language=False):
if text_metadata_types is None:
self.text_metadata_types = frozenset([NumCharsMetadata(), NumWordsMetadata(), DistinctWordsRatioMetadata()])
else:
self.text_metadata_types = frozenset(text_metadata_types)
if infer_language or language != 'en':
for mdtype in self.text_metadata_types:
if isinstance(mdtype, GenericTextMetadataWithTokenizingAndLanguage):
mdtype.language = language
mdtype.infer_language = infer_language
def __eq__(self, other):
return isinstance(other, self.__class__) and self.text_metadata_types == other.text_metadata_types
def __hash__(self):
return hash((self.__class__, self.text_metadata_types))
def process(self, store):
columns = store.column_names(ColumnType.text)
metadata_names = sorted([mdtype.metadata_name() for mdtype in self.text_metadata_types])
index = | pd.MultiIndex.from_product([columns, metadata_names], names=['column', 'metadata']) | pandas.MultiIndex.from_product |
"""title
https://adventofcode.com/2021/day/19
"""
import numpy as np
import pandas as pd
import itertools
import re
SMALL_INPUT = open('small_input.txt').read()
ORIENTATIONS = """
x, y, z
x, z,-y
x,-y,-z
x,-z, y
y,-x, z
y, z, x
y, x,-z
y,-z,-x
z, y,-x
z,-x,-y
z,-y, x
z, x, y
-x, y,-z
-x, z, y
-x,-y, z
-x,-z,-y
-y,-x,-z
-y, z,-x
-y, x, z
-y,-z, x
-z, y, x
-z,-x, y
-z,-y,-x
-z, x,-y
"""
ORIENTATIONS = re.findall(r'(.)(.),(.)(.),(.)(.)', ORIENTATIONS)
def parse(data):
result = {}
scanners = data.strip().split('\n\n')
for i, s in enumerate(scanners):
coords = []
for row in re.findall(r'(-?\d+),(-?\d+),(-?\d+)', s):
coords.append(list(map(int, row)))
coords.sort()
a = np.array(coords)
result[i] = a
return result
def get_axis(a, sign, axis):
axis_index = 'xyz'.find(axis)
sign = -1 if sign == '-' else 1
return sign * a[:, axis_index]
def get_orientations(scanner):
for xsig, xax, ysig, yax, zsig, zax in ORIENTATIONS:
b = np.zeros(scanner.shape, scanner.dtype)
b[:, 0] = get_axis(scanner, xsig, xax)
b[:, 1] = get_axis(scanner, ysig, yax)
b[:, 2] = get_axis(scanner, zsig, zax)
yield b
def match(s1, s2):
for origin1 in s1[-10:]: # one of these has to match because they are sorted
for origin2 in s2:
translation = origin2 - origin1
s2_trans = s2 - translation
merged = np.vstack([s1, s2_trans])
uni = np.unique(merged, axis=0)
overlap = merged.shape[0] - uni.shape[0]
if overlap >= 12:
s2_trans = | pd.DataFrame(s2_trans) | pandas.DataFrame |
#Import modules
import os
import pandas as pd
import numpy as np
from pandas import DatetimeIndex
import dask
import scipy
from scipy.optimize import minimize, LinearConstraint
import time
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import pickle
#Define Column Name
indexName = 'date'
indexExpiry = 'optionExpiry'
indexTenor = 'underlyingTerm'
indexStrike = 'Strike'
indexRelStrike = 'RelativeStrike'
def getTTMFromCoordinates(dfList):
return dfList[1].applymap(lambda x : x[0])
def getMoneynessFromCoordinates(dfList):
return dfList[1].applymap(lambda x : x[1])
def readfile(file):
print("file")
print(file)
def iterateOnFolderContent(folderName):
for elt in os.scandir(folderName):
if os.DirEntry.is_dir(elt):
print("Folder")
print(elt)
iterateOnFolderContent(elt)
else :
readfile(elt)
def parseTerm(stringTerm):
if 'M' == stringTerm[-1]:
return float(stringTerm[:-1])/12
elif 'Y' == stringTerm[-1]:
return float(stringTerm[:-1])
else :
raise Exception("Can not parse term")
def parseTenor(row):
return [parseTerm(row['underlyingTerm']), parseTerm(row['optionExpiry'])]
def smileFromSkew(skew):
atmVol = skew['A']
#smile = atmVol + skew[skewShift]
#return smile#.append(skew.drop(smile.index))
return atmVol + skew.drop('A')
def parseStrike(relStrike):
if relStrike.name[3] == 'A':
return relStrike['forward']
if "+" in relStrike.name[3]:
shift = int(relStrike.name[3].split("+")[1])
return relStrike['forward'] + shift/1000
if "-" in relStrike.name[3]:
shift = int(relStrike.name[3].split("-")[1])
return relStrike['forward'] - shift/1000
raise Exception(' Can not parse Strike ')
#intersection of all dates across history
def intersectionGrid(grid) :
nbDates = grid.index.get_level_values(0).unique().shape[0]
if nbDates <= 1:
return grid.index.droplevel(0)
else :
midDate = grid.index.get_level_values(0).unique()[int(nbDates/2)]
g1 = grid[grid.index.get_level_values(0) < midDate]
g2 = grid[grid.index.get_level_values(0) >= midDate]
return intersectionGrid(g1).intersection(intersectionGrid(g2))
def splitTrainTestDataRandomly(gridHistory, trainingSetPercentage):
nbDates = gridHistory.index.get_level_values(0).unique().shape[0]
trainingDates = np.random.choice(gridHistory.index.get_level_values(0).unique(),
replace=False,
size=int(nbDates * trainingSetPercentage))
trainingData = gridHistory.loc[pd.IndexSlice[trainingDates,:,:], :]
testingData = gridHistory.drop(trainingData.index)
trainingData.index = trainingData.index.droplevel([1,2])
testingData.index = testingData.index.droplevel([1,2])
return trainingData, testingData
def splitTrainTestDataChronologically(gridHistory, trainingSetPercentage):
firstTestingDate = int(gridHistory.index.get_level_values(0).unique().shape[0]
* trainingSetPercentage)
trainingDates = gridHistory.index.get_level_values(0).unique()[:firstTestingDate]
trainingData = gridHistory.loc[pd.IndexSlice[trainingDates,:,:], :]
testingData = gridHistory.drop(trainingData.index)
trainingData.index = trainingData.index.droplevel([1,2])
testingData.index = testingData.index.droplevel([1,2])
return trainingData, testingData
def sampleBatchOfDays(dataSet, nbDrawn):
trainingDates = np.random.choice(dataSet.index.get_level_values(0).unique(),
replace=False,
size=nbDrawn)
return dataSet.loc[trainingDates, :]
def splitHistory(history, colName):
return pd.pivot_table(history,
values = colName,
index = history.index.names,
columns=['Expiry','Tenor'])
def extractDataFromCSV(dataSetPath):
#Read csv file
data = | pd.read_csv(dataSetPath) | pandas.read_csv |
import rdflib
from datetime import datetime
from nanopub import Nanopublication
import logging
import sys
import pandas as pd
import configparser
import hashlib
from .autonomic.update_change_service import UpdateChangeService
from whyis.namespace import whyis, prov, sio
class Interpreter(UpdateChangeService):
kb = ":"
cb_fn = None
timeline_fn = None
data_fn = None
prefix_fn = "prefixes.txt"
prefixes = {}
studyRef = None
unit_code_list = []
unit_uri_list = []
unit_label_list = []
explicit_entry_list = []
virtual_entry_list = []
explicit_entry_tuples = []
virtual_entry_tuples = []
cb_tuple = {}
timeline_tuple = {}
config = configparser.ConfigParser()
def __init__(self, config_fn=None): # prefixes should be
if config_fn is not None:
try:
self.config.read(config_fn)
except Exception as e:
logging.exception("Error: Unable to open configuration file: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Prefixes', 'prefixes'):
self.prefix_fn = self.config.get('Prefixes', 'prefixes')
# prefix_file = open(self.prefix_fn,"r")
# self.prefixes = prefix_file.readlines()
prefix_file = pd.read_csv(self.prefix_fn, dtype=object)
try:
for row in prefix_file.itertuples():
self.prefixes[row.prefix] = row.url
except Exception as e:
logging.exception("Error: Something went wrong when trying to read the Prefix File: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Prefixes', 'base_uri'):
self.kb = self.config.get('Prefixes', 'base_uri')
if self.config.has_option('Source Files', 'dictionary'):
dm_fn = self.config.get('Source Files', 'dictionary')
try:
dm_file = pd.read_csv(dm_fn, dtype=object)
try: # Populate virtual and explicit entry lists
for row in dm_file.itertuples():
if pd.isnull(row.Column):
logging.exception("Error: The SDD must have a column named 'Column'")
sys.exit(1)
if row.Column.startswith("??"):
self.virtual_entry_list.append(row)
else:
self.explicit_entry_list.append(row)
except Exception as e:
logging.exception(
"Error: Something went wrong when trying to read the Dictionary Mapping File: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
except Exception as e:
logging.exception("Error: The specified Dictionary Mapping file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'codebook'):
self.cb_fn = self.config.get('Source Files', 'codebook')
if self.cb_fn is not None:
try:
cb_file = pd.read_csv(self.cb_fn, dtype=object)
try:
inner_tuple_list = []
for row in cb_file.itertuples():
if (pd.notnull(row.Column) and row.Column not in self.cb_tuple):
inner_tuple_list = []
inner_tuple = {}
inner_tuple["Code"] = row.Code
if pd.notnull(row.Label):
inner_tuple["Label"] = row.Label
if pd.notnull(row.Class):
inner_tuple["Class"] = row.Class
if "Resource" in row and pd.notnull(row.Resource):
inner_tuple["Resource"] = row.Resource
inner_tuple_list.append(inner_tuple)
self.cb_tuple[row.Column] = inner_tuple_list
except Exception as e:
logging.warning("Warning: Unable to process Codebook file: ")
if hasattr(e, 'message'):
logging.warning(e.message)
else:
logging.warning(e)
except Exception as e:
logging.exception("Error: The specified Codebook file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'timeline'):
self.timeline_fn = self.config.get('Source Files', 'timeline')
if self.timeline_fn is not None:
try:
timeline_file = pd.read_csv(self.timeline_fn, dtype=object)
try:
inner_tuple_list = []
for row in timeline_file.itertuples():
if pd.notnull(row.Name) and row.Name not in self.timeline_tuple:
inner_tuple_list = []
inner_tuple = {}
inner_tuple["Type"] = row.Type
if pd.notnull(row.Label):
inner_tuple["Label"] = row.Label
if pd.notnull(row.Start):
inner_tuple["Start"] = row.Start
if pd.notnull(row.End):
inner_tuple["End"] = row.End
if pd.notnull(row.Unit):
inner_tuple["Unit"] = row.Unit
if pd.notnull(row.inRelationTo):
inner_tuple["inRelationTo"] = row.inRelationTo
inner_tuple_list.append(inner_tuple)
self.timeline_tuple[row.Name] = inner_tuple_list
except Exception as e:
logging.warning("Warning: Unable to process Timeline file: ")
if hasattr(e, 'message'):
logging.warning(e.message)
else:
logging.warning(e)
except Exception as e:
logging.exception("Error: The specified Timeline file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'code_mappings'):
cmap_fn = self.config.get('Source Files', 'code_mappings')
code_mappings_reader = pd.read_csv(cmap_fn)
for code_row in code_mappings_reader.itertuples():
if pd.notnull(code_row.code):
self.unit_code_list.append(code_row.code)
if pd.notnull(code_row.uri):
self.unit_uri_list.append(code_row.uri)
if pd.notnull(code_row.label):
self.unit_label_list.append(code_row.label)
if self.config.has_option('Source Files', 'data_file'):
self.data_fn = self.config.get('Source Files', 'data_file')
def getInputClass(self):
return whyis.SemanticDataDictionary
def getOutputClass(self):
return whyis.SemanticDataDictionaryInterpretation
def get_query(self):
return '''SELECT ?s WHERE { ?s ?p ?o .} LIMIT 1\n'''
def process(self, i, o):
print("Processing SDD...")
self.app.db.store.nsBindings = {}
npub = Nanopublication(store=o.graph.store)
# prefixes={}
# prefixes.update(self.prefixes)
# prefixes.update(self.app.NS.prefixes)
self.writeVirtualEntryNano(npub)
self.writeExplicitEntryNano(npub)
self.interpretData(npub)
def parseString(self, input_string, delim):
my_list = input_string.split(delim)
my_list = [element.strip() for element in my_list]
return my_list
def rdflibConverter(self, input_word):
if "http" in input_word:
return rdflib.term.URIRef(input_word)
if ':' in input_word:
word_list = input_word.split(":")
term = self.prefixes[word_list[0]] + word_list[1]
return rdflib.term.URIRef(term)
return rdflib.Literal(input_word, datatype=rdflib.XSD.string)
def codeMapper(self, input_word):
unitVal = input_word
for unit_label in self.unit_label_list:
if unit_label == input_word:
unit_index = self.unit_label_list.index(unit_label)
unitVal = self.unit_uri_list[unit_index]
for unit_code in self.unit_code_list:
if unit_code == input_word:
unit_index = self.unit_code_list.index(unit_code)
unitVal = self.unit_uri_list[unit_index]
return unitVal
def convertVirtualToKGEntry(self, *args):
if args[0][:2] == "??":
if self.studyRef is not None:
if args[0] == self.studyRef:
return self.prefixes[self.kb] + args[0][2:]
if len(args) == 2:
return self.prefixes[self.kb] + args[0][2:] + "-" + args[1]
return self.prefixes[self.kb] + args[0][2:]
if ':' not in args[0]:
# Check for entry in column list
for item in self.explicit_entry_list:
if args[0] == item.Column:
if len(args) == 2:
return self.prefixes[self.kb] + args[0].replace(" ", "_").replace(",", "").replace("(",
"").replace(
")", "").replace("/", "-").replace("\\", "-") + "-" + args[1]
return self.prefixes[self.kb] + args[0].replace(" ", "_").replace(",", "").replace("(", "").replace(
")", "").replace("/", "-").replace("\\", "-")
return '"' + args[0] + "\"^^xsd:string"
return args[0]
def checkVirtual(self, input_word):
try:
if input_word[:2] == "??":
return True
return False
except Exception as e:
logging.exception("Something went wrong in Interpreter.checkVirtual(): ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
def isfloat(self, value):
try:
float(value)
return True
except ValueError:
return False
def writeVirtualEntryNano(self, nanopub):
for item in self.virtual_entry_list:
virtual_tuple = {}
term = rdflib.term.URIRef(self.prefixes[self.kb] + str(item.Column[2:]))
nanopub.assertion.add((term, rdflib.RDF.type, rdflib.OWL.Class))
nanopub.assertion.add(
(term, rdflib.RDFS.label, rdflib.Literal(str(item.Column[2:]), datatype=rdflib.XSD.string)))
# Set the rdf:type of the virtual row to either the Attribute or Entity value (or else owl:Individual)
if (pd.notnull(item.Entity)) and (pd.isnull(item.Attribute)):
if ',' in item.Entity:
entities = self.parseString(item.Entity, ',')
for entity in entities:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(entity))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Entity))))
virtual_tuple["Column"] = item.Column
virtual_tuple["Entity"] = self.codeMapper(item.Entity)
if virtual_tuple["Entity"] == "hasco:Study":
self.studyRef = item.Column
virtual_tuple["Study"] = item.Column
elif (pd.isnull(item.Entity)) and (pd.notnull(item.Attribute)):
if ',' in item.Attribute:
attributes = self.parseString(item.Attribute, ',')
for attribute in attributes:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(attribute))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Attribute))))
virtual_tuple["Column"] = item.Column
virtual_tuple["Attribute"] = self.codeMapper(item.Attribute)
else:
logging.warning(
"Warning: Virtual entry not assigned an Entity or Attribute value, or was assigned both.")
virtual_tuple["Column"] = item.Column
# If there is a value in the inRelationTo column ...
if pd.notnull(item.inRelationTo):
virtual_tuple["inRelationTo"] = item.inRelationTo
# If there is a value in the Relation column but not the Role column ...
if (pd.notnull(item.Relation)) and (pd.isnull(item.Role)):
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
virtual_tuple["Relation"] = item.Relation
# If there is a value in the Role column but not the Relation column ...
elif (pd.isnull(item.Relation)) and (pd.notnull(item.Role)):
role = rdflib.BNode()
nanopub.assertion.add(
(role, rdflib.RDF.type, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add(
(role, sio.inRelationTo, self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
nanopub.assertion.add((term, sio.hasRole, role))
virtual_tuple["Role"] = item.Role
# If there is a value in the Role and Relation columns ...
elif (pd.notnull(item.Relation)) and (pd.notnull(item.Role)):
virtual_tuple["Relation"] = item.Relation
virtual_tuple["Role"] = item.Role
nanopub.assertion.add(
(term, sio.hasRole, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
nanopub.provenance.add((term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z",
datatype=rdflib.XSD.dateTime)))
if pd.notnull(item.wasDerivedFrom):
if ',' in item.wasDerivedFrom:
derivedFromTerms = self.parseString(item.wasDerivedFrom, ',')
for derivedFromTerm in derivedFromTerms:
nanopub.provenance.add((term, prov.wasDerivedFrom,
self.rdflibConverter(self.convertVirtualToKGEntry(derivedFromTerm))))
else:
nanopub.provenance.add((term, prov.wasDerivedFrom,
self.rdflibConverter(self.convertVirtualToKGEntry(item.wasDerivedFrom))))
virtual_tuple["wasDerivedFrom"] = item.wasDerivedFrom
if pd.notnull(item.wasGeneratedBy):
if ',' in item.wasGeneratedBy:
generatedByTerms = self.parseString(item.wasGeneratedBy, ',')
for generatedByTerm in generatedByTerms:
nanopub.provenance.add((term, prov.wasGeneratedBy,
self.rdflibConverter(self.convertVirtualToKGEntry(generatedByTerm))))
else:
nanopub.provenance.add((term, prov.wasGeneratedBy,
self.rdflibConverter(self.convertVirtualToKGEntry(item.wasGeneratedBy))))
virtual_tuple["wasGeneratedBy"] = item.wasGeneratedBy
self.virtual_entry_tuples.append(virtual_tuple)
if self.timeline_fn is not None:
for key in self.timeline_tuple:
tl_term = self.rdflibConverter(self.convertVirtualToKGEntry(key))
nanopub.assertion.add((tl_term, rdflib.RDF.type, rdflib.OWL.Class))
for timeEntry in self.timeline_tuple[key]:
if 'Type' in timeEntry:
nanopub.assertion.add(
(tl_term, rdflib.RDFS.subClassOf, self.rdflibConverter(timeEntry['Type'])))
if 'Label' in timeEntry:
nanopub.assertion.add((tl_term, rdflib.RDFS.label,
rdflib.Literal(str(timeEntry['Label']), datatype=rdflib.XSD.string)))
if 'Start' in timeEntry and 'End' in timeEntry and timeEntry['Start'] == timeEntry['End']:
nanopub.assertion.add((tl_term, sio.hasValue, self.rdflibConverter(str(timeEntry['Start']))))
if 'Start' in timeEntry:
start_time = rdflib.BNode()
nanopub.assertion.add((start_time, sio.hasValue, self.rdflibConverter(str(timeEntry['Start']))))
nanopub.assertion.add((tl_term, sio.hasStartTime, start_time))
if 'End' in timeEntry:
end_time = rdflib.BNode()
nanopub.assertion.add((end_time, sio.hasValue, self.rdflibConverter(str(timeEntry['End']))))
nanopub.assertion.add((tl_term, sio.hasEndTime, end_time))
if 'Unit' in timeEntry:
nanopub.assertion.add(
(tl_term, sio.hasUnit, self.rdflibConverter(self.codeMapper(timeEntry['Unit']))))
if 'inRelationTo' in timeEntry:
nanopub.assertion.add((tl_term, sio.inRelationTo, self.rdflibConverter(
self.convertVirtualToKGEntry(timeEntry['inRelationTo']))))
nanopub.provenance.add((tl_term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z",
datatype=rdflib.XSD.dateTime)))
def writeExplicitEntryNano(self, nanopub):
for item in self.explicit_entry_list:
explicit_entry_tuple = {}
term = rdflib.term.URIRef(self.prefixes[self.kb] + str(
item.Column.replace(" ", "_").replace(",", "").replace("(", "").replace(")", "").replace("/",
"-").replace(
"\\", "-")))
nanopub.assertion.add((term, rdflib.RDF.type, rdflib.OWL.Class))
if pd.notnull(item.Attribute):
if ',' in item.Attribute:
attributes = self.parseString(item.Attribute, ',')
for attribute in attributes:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(attribute))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Attribute))))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Attribute"] = self.codeMapper(item.Attribute)
elif pd.notnull(item.Entity):
if ',' in item.Entity:
entities = self.parseString(item.Entity, ',')
for entity in entities:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(entity))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Entity))))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Entity"] = self.codeMapper(item.Entity)
else:
nanopub.assertion.add((term, rdflib.RDFS.subClassOf, sio.Attribute))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Attribute"] = self.codeMapper("sio:Attribute")
logging.warning("Warning: Explicit entry not assigned an Attribute or Entity value.")
if pd.notnull(item.attributeOf):
nanopub.assertion.add(
(term, sio.isAttributeOf, self.rdflibConverter(self.convertVirtualToKGEntry(item.attributeOf))))
explicit_entry_tuple["isAttributeOf"] = self.convertVirtualToKGEntry(item.attributeOf)
else:
logging.warning("Warning: Explicit entry not assigned an isAttributeOf value.")
if pd.notnull(item.Unit):
nanopub.assertion.add(
(term, sio.hasUnit, self.rdflibConverter(self.convertVirtualToKGEntry(self.codeMapper(item.Unit)))))
explicit_entry_tuple["Unit"] = self.convertVirtualToKGEntry(self.codeMapper(item.Unit))
if pd.notnull(item.Time):
nanopub.assertion.add(
(term, sio.existsAt, self.rdflibConverter(self.convertVirtualToKGEntry(item.Time))))
explicit_entry_tuple["Time"] = item.Time
if pd.notnull(item.inRelationTo):
explicit_entry_tuple["inRelationTo"] = item.inRelationTo
# If there is a value in the Relation column but not the Role column ...
if (pd.notnull(item.Relation)) and (pd.isnull(item.Role)):
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
explicit_entry_tuple["Relation"] = item.Relation
# If there is a value in the Role column but not the Relation column ...
elif ( | pd.isnull(item.Relation) | pandas.isnull |
""" I/O functions of the aecg package: tools for annotated ECG HL7 XML files
This module implements helper functions to parse and read annotated
electrocardiogram (ECG) stored in XML files following HL7
specification.
See authors, license and disclaimer at the top level directory of this project.
"""
# Imports =====================================================================
from typing import Dict, Tuple
from lxml import etree
from aecg import validate_xpath, new_validation_row, VALICOLS, \
TIME_CODES, SEQUENCE_CODES, \
Aecg, AecgLead, AecgAnnotationSet
import copy
import logging
import pandas as pd
import re
import zipfile
# Python logging ==============================================================
logger = logging.getLogger(__name__)
def parse_annotations(xml_filename: str,
zip_filename: str,
aecg_doc: etree._ElementTree,
aecgannset: AecgAnnotationSet,
path_prefix: str,
annsset_xmlnode_path: str,
valgroup: str = "RHYTHM",
log_validation: bool = False) -> Tuple[
AecgAnnotationSet, pd.DataFrame]:
"""Parses `aecg_doc` XML document and extracts annotations
Args:
xml_filename (str): Filename of the aECG XML file.
zip_filename (str): Filename of zip file containint the aECG XML file.
If '', then xml file is not stored in a zip file.
aecg_doc (etree._ElementTree): XML document of the aECG XML file.
aecgannset (AecgAnnotationSet): Annotation set to which append found
annotations.
path_prefix (str): Prefix of xml path from which start searching for
annotations.
annsset_xmlnode_path (str): Path to xml node of the annotation set
containing the annotations.
valgroup (str, optional): Indicates whether to search annotations in
rhythm or derived waveform. Defaults to "RHYTHM".
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Tuple[AecgAnnotationSet, pd.DataFrame]: Annotation set updated with
found annotations and dataframe with results of validation.
"""
anngrpid = 0
# Annotations stored within a beat
beatnodes = aecg_doc.xpath((
path_prefix +
"/component/annotation/code[@code=\'MDC_ECG_BEAT\']").replace(
'/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'})
beatnum = 0
valpd = pd.DataFrame()
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {len(beatnodes)} annotated beats found')
for beatnode in beatnodes:
for rel_path in ["../component/annotation/"
"code[contains(@code, \"MDC_ECG_\")]"]:
annsnodes = beatnode.xpath(rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotation code
valrow2 = validate_xpath(
annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename, valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame(
[valrow2], columns=VALICOLS), ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
# Annotations type
valrow2 = validate_xpath(
annsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path + \
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
subannsnodes = annsnode.xpath(
rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
else:
subannsnodes += [annsnode]
# Exclude annotations reporting interval values only
subannsnodes = [
sa for sa in subannsnodes
if not sa.get("code").startswith("MDC_ECG_TIME_PD_")]
for subannsnode in subannsnodes:
# Annotations type
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
rel_path3 = "../support/supportingROI/component/"\
"boundary/value"
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/"\
"boundary/code"
roinodes = subannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(
roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
# Annotations type
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path +\
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/" \
"boundary/code"
roinodes = annsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4],
columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
if log_validation:
valpd = valpd.append(
| pd.DataFrame([valrow2], columns=VALICOLS) | pandas.DataFrame |
#import data, test set do not have label
import pandas as pd
import numpy as np
train = pd.read_csv("./data/train.csv")
test = pd.read_csv("./data/test.csv")
print(train.head())
#data cleaning
##empty data
print(np.sum(np.array(train.isnull()==True), axis=0))
print(np.sum(np.array(test.isnull()==True), axis=0))
##fill null data
train = train.fillna(" ")
test = test.fillna(" ")
print(np.sum(np.array(train.isnull()==True), axis=0))
print(np.sum(np.array(test.isnull()==True), axis=0))
#set label, 1 is spam, otherwise is not
print(train['spam'].unique())
#merge email content and subject into the feature
X_train = train['subject'] + ' ' + train['email']
y_train = train['spam']
X_test = test['subject'] + ' ' + test['email']
#transfer txt into tokens ids
from keras.preprocessing.text import Tokenizer
max_words = 300
tokenizer = Tokenizer(num_words=max_words, lower=True, split=' ')
# 只给频率最高的300个词分配 id,其他的忽略
tokenizer.fit_on_texts(list(X_train)+list(X_test)) # tokenizer 训练
X_train_tokens = tokenizer.texts_to_sequences(X_train)
X_test_tokens = tokenizer.texts_to_sequences(X_test)
#make sequence get same length
# 样本 tokens 的长度不一样,pad
maxlen = 100
from keras.preprocessing import sequence
X_train_tokens_pad = sequence.pad_sequences(X_train_tokens, maxlen=maxlen,padding='post')
X_test_tokens_pad = sequence.pad_sequences(X_test_tokens, maxlen=maxlen,padding='post')
#build model
embeddings_dim = 30 # 词嵌入向量维度
from keras.models import Model, Sequential
from keras.layers import Embedding, LSTM, GRU, SimpleRNN, Dense
model = Sequential()
model.add(Embedding(input_dim=max_words, # Size of the vocabulary
output_dim=embeddings_dim, # 词嵌入的维度
input_length=maxlen))
model.add(SimpleRNN(units=64)) # 可以改为 GRU,SimpleRNN , LSTM
model.add(Dense(units=1, activation='sigmoid'))
model.summary()
#training
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']) # 配置模型
history = model.fit(X_train_tokens_pad, y_train,
batch_size=128, epochs=10, validation_split=0.2)
model.save("email_cat_lstm.h5") # 保存训练好的模型
#plot training graph
from matplotlib import pyplot as plt
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.show()
#test
pred_prob = model.predict(X_test_tokens_pad).squeeze()
pred_class = np.asarray(pred_prob > 0.5).astype(np.int32)
id = test['id']
output = | pd.DataFrame({'id':id, 'Class': pred_class}) | pandas.DataFrame |
import datetime
import json
from typing import Union
import pandas as pd
from wkz.gis.geo import get_location_name
def _get_daytime_name(date: datetime.datetime) -> str:
hour = date.hour
if (hour > 4) and (hour <= 8):
return "Early Morning"
elif (hour > 8) and (hour <= 12):
return "Morning"
elif (hour > 12) and (hour <= 16):
return "Noon"
elif (hour > 16) and (hour <= 20):
return "Evening"
elif (hour > 20) and (hour <= 24):
return "Night"
elif hour <= 4:
return "Late Night"
def _get_sport_name(sport_name: str) -> str:
if sport_name == "unknown":
return "Sport"
else:
return sport_name.capitalize()
def _get_coordinate_not_null(coordinates: Union[str, list]):
try:
if isinstance(coordinates, str):
coordinate = pd.Series(json.loads(coordinates), dtype=float).dropna().iloc[0]
elif isinstance(coordinates, list):
coordinate = | pd.Series(coordinates, dtype=float) | pandas.Series |
"""
Utils for doing data analysis
"""
import pandas as pd
import numpy as np
def impute_empty_years(
yearly_stats: pd.DataFrame, min_year: int = None, max_year: int = None
) -> pd.DataFrame:
"""
Imputes zero values for years without data
Args:
yearly_stats: A dataframe with a 'year' column and other columns with data
min_year: Lower bound for years to keep; can be smaller than yearly_stats.year.min()
max_year: Higher bound for years; can be larger than yearly_stats.year.min()
Returns:
A data frame with imputed 0s for years with no data
"""
min_year, max_year = set_def_min_max_years(yearly_stats, min_year, max_year)
return (
pd.DataFrame(data={"year": range(min_year, max_year + 1)})
.merge(yearly_stats, how="left")
.fillna(0)
.astype(yearly_stats.dtypes)
)
def set_def_min_max_years(df: pd.DataFrame, min_year: int, max_year: int) -> (int, int):
"""Set the default values for min and max years"""
if min_year is None:
min_year = df.year.min()
if max_year is None:
max_year = df.year.max()
return min_year, max_year
### GtR specific utils
def gtr_deduplicate_projects(gtr_docs: pd.DataFrame) -> pd.DataFrame:
"""
Deduplicates projects that have the same title and description. This can be used
to report the overall funding amount of the whole project when it has
received funding in separate installments across different years.
Args:
gtr_docs: A dataframe with columns with GtR project 'title', 'description',
'amount' (research funding) and other data
Returns:
A dataframe where projects with the exact same title and description have
been merged and their funding has been summed up
"""
gtr_docs_summed_amounts = (
gtr_docs.groupby(["title", "description"])
.agg(amount=("amount", "sum"))
.reset_index()
)
# Add the summed up amounts to the project and keep the earliest instance
# of the duplicates
return (
gtr_docs.drop("amount", axis=1)
.merge(gtr_docs_summed_amounts, on=["title", "description"], how="left")
.sort_values("start")
.drop_duplicates(["title", "description"], keep="first")
.reset_index(drop=True)
# Restore previous column order
)[gtr_docs.columns]
def gtr_funding_per_year(
gtr_docs: pd.DataFrame, min_year: int = None, max_year: int = None
) -> pd.DataFrame:
"""
Given a table with projects and their funding, return an aggregation by year
Args:
gtr_docs: A dataframe with columns for 'start', 'project_id' and 'amount'
(research funding) among other project data
min_year: Lower bound for years to keep
max_year: Higher bound for years to keep
Returns:
A dataframe with the following columns:
'year',
'no_of_projects' - number of new projects in a given year,
'amount_total' - total amount of research funding in a given year,
'amount_median' - median project funding in a given year
"""
# Convert project start dates to years
gtr_docs = gtr_docs.copy()
gtr_docs["year"] = pd.to_datetime(gtr_docs.start).dt.year
# Set min and max years for aggregation
min_year, max_year = set_def_min_max_years(gtr_docs, min_year, max_year)
# Group by year
return (
gtr_docs.groupby("year")
.agg(
# Number of new projects in a given year
no_of_projects=("project_id", "count"),
# Total amount of research funding in a given year
amount_total=("amount", "sum"),
# Median project funding in a given year
amount_median=("amount", np.median),
)
.reset_index()
# Limit results between min and max years
.query(f"year>={min_year}")
.query(f"year<={max_year}")
# Convert to thousands
.assign(
amount_total=lambda x: x.amount_total / 1000,
amount_median=lambda x: x.amount_median / 1000,
)
# Add zero values for years without data
.pipe(impute_empty_years, min_year=min_year, max_year=max_year)
)
def gtr_get_all_timeseries(
gtr_docs: pd.DataFrame, min_year: int = None, max_year: int = None
) -> pd.DataFrame:
"""
Calculates all typical time series from a list of GtR projects and return
as one combined table
Args:
gtr_docs: A dataframe with columns for 'start', 'project_id' and 'amount'
(research funding) among other project data
min_year: Lower bound for years to keep
max_year: Higher bound for years to keep
Returns:
Dataframe with columns for 'year', 'no_of_projects', 'amount_total' and
'amount_median'
"""
# Deduplicate projects. This is used to report the number of new projects
# started each year, accounting for cases where the same project has received
# additional funding in later years
gtr_docs_dedup = gtr_deduplicate_projects(gtr_docs)
# Number of new projects per year
time_series_projects = gtr_funding_per_year(gtr_docs_dedup, min_year, max_year)[
["year", "no_of_projects"]
]
# Amount of research funding per year (note: here we use the non-duplicated table,
# to account for additional funding for projects that might have started in earlier years
time_series_funding = gtr_funding_per_year(gtr_docs, min_year, max_year)
# Join up both tables
time_series_funding["no_of_projects"] = time_series_projects["no_of_projects"]
return time_series_funding
### Crunchbase specific utils
def cb_orgs_founded_per_year(
cb_orgs: pd.DataFrame, min_year: int = None, max_year: int = None
) -> pd.DataFrame:
"""
Calculates the number of Crunchbase organisations founded in a given year
Args:
cb_orgs: A dataframe with columns for 'id' and 'founded_on' among other data
min_year: Lower bound for years to keep
max_year: Higher bound for years to keep
Returns:
A dataframe with the following columns:
'year',
'no_of_orgs_founded' - number of new organisations founded in a given year
"""
# Remove orgs that don't have year when they were founded
cb_orgs = cb_orgs[-cb_orgs.founded_on.isnull()].copy()
# Convert dates to years
cb_orgs["year"] = pd.to_datetime(cb_orgs.founded_on).dt.year
# Set min and max years for aggregation
min_year, max_year = set_def_min_max_years(cb_orgs, min_year, max_year)
# Group by year
return (
cb_orgs.groupby("year")
.agg(no_of_orgs_founded=("id", "count"))
.reset_index()
.pipe(impute_empty_years, min_year=min_year, max_year=max_year)
)
def cb_investments_per_year(
cb_funding_rounds: pd.DataFrame, min_year: int = None, max_year: int = None
) -> pd.DataFrame:
"""
Aggregates the raised investment amount and number of deals across all orgs
Args:
cb_funding_rounds: A dataframe with columns for 'funding_round_id', 'raised_amount_usd'
'raised_amount_gbp' and 'announced_on' among other data
min_year: Lower bound for years to keep
max_year: Higher bound for years to keep
Returns:
A dataframe with the following columns:
'year',
'no_of_rounds' - number of funding rounds (deals) in a given year
'raised_amount_usd_total' - total raised investment (USD) in a given year
'raised_amount_gbp_total' - total raised investment (GBP) in a given year
"""
# Convert dates to years
cb_funding_rounds["year"] = pd.to_datetime(cb_funding_rounds.announced_on).dt.year
# Set min and max years for aggregation
min_year, max_year = set_def_min_max_years(cb_funding_rounds, min_year, max_year)
# Group by year
return (
cb_funding_rounds.groupby("year")
.agg(
no_of_rounds=("funding_round_id", "count"),
raised_amount_usd_total=("raised_amount_usd", "sum"),
raised_amount_gbp_total=("raised_amount_gbp", "sum"),
)
.reset_index()
.query(f"year>={min_year}")
.query(f"year<={max_year}")
.pipe(impute_empty_years, min_year=min_year, max_year=max_year)
)
def cb_get_all_timeseries(
cb_orgs: pd.DataFrame,
cb_funding_rounds: pd.DataFrame,
min_year: int = None,
max_year: int = None,
) -> pd.DataFrame:
"""
Calculates all typical time series from a list of GtR projects and return
as one combined table
Args:
cb_orgs: A dataframe with columns for 'id' and 'founded_on' among other data
cb_funding_rounds: A dataframe with columns for 'funding_round_id', 'raised_amount_usd'
'raised_amount_gbp' and 'announced_on' among other data
min_year: Lower bound for years to keep
max_year: Higher bound for years to keep
Returns:
A dataframe with the following columns:
'year',
'no_of_rounds' - number of funding rounds (deals) in a given year
'raised_amount_usd_total' - total raised investment (USD) in a given year
'raised_amount_gbp_total' - total raised investment (GBP) in a given year
'no_of_orgs_founded' - number of new organisations founded in a given year
"""
# Number of new companies per year
time_series_orgs_founded = cb_orgs_founded_per_year(cb_orgs, min_year, max_year)
# Amount of raised investment per year
time_series_investment = cb_investments_per_year(
cb_funding_rounds, min_year, max_year
)
# Join up both tables
time_series_investment["no_of_orgs_founded"] = time_series_orgs_founded[
"no_of_orgs_founded"
]
return time_series_investment
### Time series trends
def moving_average(
timeseries_df: pd.DataFrame, window: int = 3, replace_columns: bool = False
) -> pd.DataFrame:
"""
Calculates rolling mean of yearly timeseries (not centered)
Args:
timeseries_df: Should have a 'year' column and at least one other data column
window: Window of the rolling mean
rename_cols: If True, will create new set of columns for the moving average
values with the name pattern `{column_name}_sma{window}` where sma
stands for 'simple moving average'; otherwise this will replace the original columns
Returns:
Dataframe with moving average values
"""
# Rolling mean
df_ma = timeseries_df.rolling(window, min_periods=1).mean().drop("year", axis=1)
# Create new renamed columns
if not replace_columns:
column_names = timeseries_df.drop("year", axis=1).columns
new_column_names = ["{}_sma{}".format(s, window) for s in column_names]
df_ma = df_ma.rename(columns=dict(zip(column_names, new_column_names)))
return pd.concat([timeseries_df, df_ma], axis=1)
else:
return pd.concat([timeseries_df[["year"]], df_ma], axis=1)
def magnitude(time_series: pd.DataFrame, year_start: int, year_end: int) -> pd.Series:
"""
Calculates signals' magnitude (i.e. mean across year_start and year_end)
Args:
time_series: A dataframe with a columns for 'year' and other data
year_start: First year of the trend window
year_end: Last year of the trend window
Returns:
Series with magnitude estimates for all data columns
"""
magnitude = time_series.set_index("year").loc[year_start:year_end, :].mean()
return magnitude
def percentage_change(initial_value, new_value):
"""Calculates percentage change from first_value to second_value"""
return (new_value - initial_value) / initial_value * 100
def smoothed_growth(
time_series: pd.DataFrame, year_start: int, year_end: int, window: int = 3
) -> pd.Series:
"""Calculates a growth estimate by using smoothed (rolling mean) time series
Args:
time_series: A dataframe with a columns for 'year' and other data
year_start: First year of the trend window
year_end: Last year of the trend window
window: Moving average windows size (in years) for the smoothed growth estimate
Returns:
Series with smoothed growth estimates for all data columns
"""
# Smooth timeseries
ma_df = moving_average(time_series, window, replace_columns=True).set_index("year")
# Percentage change
return percentage_change(
initial_value=ma_df.loc[year_start, :], new_value=ma_df.loc[year_end, :]
)
def estimate_magnitude_growth(
time_series: pd.DataFrame, year_start: int, year_end: int, window: int = 3
) -> pd.DataFrame:
"""
Calculates signals' magnitude, estimates their growth and returns a combined dataframe
Args:
time_series: A dataframe with a columns for 'year' and other data
year_start: First year of the trend window
year_end: Last year of the trend window
window: Moving average windows size (in years) for the smoothed growth estimate
Returns:
Dataframe with magnitude and growth trend estimates; magnitude is in
absolute units (e.g. GBP 1000s if analysing research funding) whereas
growth is expresed as a percentage
"""
magnitude_df = magnitude(time_series, year_start, year_end)
growth_df = smoothed_growth(time_series, year_start, year_end, window)
combined_df = (
| pd.DataFrame([magnitude_df, growth_df], index=["magnitude", "growth"]) | pandas.DataFrame |
import sys
import pickle
import pandas as pd
import numpy as np
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import classification_report, accuracy_score
def load_data(database_filepath):
"""
Function to load data from sqlite
Input:
- database_filepath: That path to the sqlite DB
Ouput:
- X: messages
- y: OneHotEnoded categories
- Names of categories
"""
# Read data from sql to a DataFrame
df = pd.read_sql_table("MLStaging", "sqlite:///"+database_filepath)
# Separate X and y columns
# X - independent
# y - dependent
X = df["message"]
y = df.drop(["categories", "message", "original", "genre", "id"], axis=1)
print(y.columns)
return X, y, list(y.columns)
def tokenize(text):
"""
Function to tokenize the text
Input:
- text: The sentence that needs to be tokenized
Output:
- Lemmatized, lower char token array
"""
# Word tokenization
tokens = word_tokenize(text)
# Initializing Lemmatizer
lemmatizer = WordNetLemmatizer()
# Lemmatizing, lower case conversion and trimming the words for extra space
clean_tokens = [lemmatizer.lemmatize(i).lower().strip() for i in tokens]
return clean_tokens
def build_model():
"""
Function to build the ML model
Input:
-
Ouput:
- GridSearchCV object
"""
# Forming Pipleine
pipeline = Pipeline([
('count_vectorizer', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
# Initializing parameters for Grid search
parameters = {
'clf__estimator__n_estimators': [10, 50, 100]
}
# GridSearch Object with pipeline and parameters
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
"""
Function to evaluate model
"""
# Predict results of X_test
y_pred = model.predict(X_test)
# Converting both y_pred and Y_test into DataFrames
y_pred = | pd.DataFrame(y_pred, columns=category_names) | pandas.DataFrame |
import argparse
import sys
import time
from multiprocessing import Pool
import numpy as np
import pandas as pd
from terminaltables import *
from dataset import VideoDataSet
from ops.utils import temporal_nms
sys.path.append('./anet_toolkit/Evaluation')
import os
import pdb
import pickle
from anet_toolkit.Evaluation.eval_detection import \
compute_average_precision_detection
from ops.utils import get_configs, softmax
# options
parser = argparse.ArgumentParser(
description="Evaluate detection performance metrics")
parser.add_argument('dataset', type=str, choices=['thumos14', 'muses'])
parser.add_argument('detection_pickles', type=str, nargs='+')
parser.add_argument('--nms_threshold', type=float, default=0.4)
parser.add_argument('--no_regression', default=False, action="store_true")
parser.add_argument('-j', '--ap_workers', type=int, default=16)
parser.add_argument('--top_k', type=int, default=None)
parser.add_argument('--cls_scores', type=str, nargs='+')
parser.add_argument('--reg_scores', type=str, default=None)
parser.add_argument('--cls_top_k', type=int, default=1)
parser.add_argument('--cfg', default='data/dataset_cfg.yml')
parser.add_argument('--score_weights', type=float, default=None, nargs='+')
parser.add_argument('--min_length', type=float, default=None, help='minimum duration of proposals in second')
parser.add_argument('--one_iou', action='store_true')
parser.add_argument('--no_comp', action='store_true')
args = parser.parse_args()
configs = get_configs(args.dataset, args.cfg)
dataset_configs = configs['dataset_configs']
model_configs = configs["model_configs"]
num_class = model_configs['num_class']
nms_threshold = args.nms_threshold if args.nms_threshold else configs['evaluation']['nms_threshold']
top_k = args.top_k if args.top_k else configs['evaluation']['top_k']
print('---'*10)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("initiating evaluation of detection results {}".format(args.detection_pickles))
print('top_k={}'.format(top_k))
sys.stdout.flush()
score_pickle_list = []
for pc in args.detection_pickles:
score_pickle_list.append(pickle.load(open(pc, 'rb')))
if args.score_weights:
weights = np.array(args.score_weights) / sum(args.score_weights)
else:
weights = [1.0/len(score_pickle_list) for _ in score_pickle_list]
def merge_scores(vid):
def merge_part(arrs, index, weights):
if arrs[0][index] is not None:
return np.sum([a[index] * w for a, w in zip(arrs, weights)], axis=0)
else:
return None
arrays = [pc[vid] for pc in score_pickle_list]
act_weights = weights
comp_weights = weights
reg_weights = weights
rel_props = score_pickle_list[0][vid][0]
return rel_props, \
merge_part(arrays, 1, act_weights), \
merge_part(arrays, 2, comp_weights), \
merge_part(arrays, 3, reg_weights)
print('Merge detection scores from {} sources...'.format(len(score_pickle_list)))
detection_scores = {k: merge_scores(k) for k in score_pickle_list[0]}
print('Done.')
if 'deploy_prop_file' in dataset_configs:
prop_file = dataset_configs['deploy_prop_file']
else:
prop_file = dataset_configs['test_prop_file']
if 'deploy_online_slice' in dataset_configs:
online_slice = dataset_configs['deploy_online_slice']
else:
online_slice = dataset_configs.get('online_slice', False)
dataset = VideoDataSet(dataset_configs,
prop_file=prop_file,
ft_path=dataset_configs['train_ft_path'],
test_mode=True)
from functools import reduce
gt_lens = np.array(reduce(lambda x,y: x+y, [[(x.end_frame-x.start_frame)/6 for x in v.gt] for v in dataset.video_list]))
# pdb.set_trace()
dataset_detections = [dict() for i in range(num_class)]
def merge_all_vid_scores(pickle_list):
def merge_op(arrs, index, weights):
if arrs[0][index] is not None:
return np.sum([a[index] * w for a, w in zip(arrs, weights)], axis=0)
else:
return None
out_score_dict = {}
for vid in pickle_list[0]:
arrays = [pc[vid] for pc in pickle_list]
act_weights = weights
comp_weights = weights
reg_weights = weights
rel_props = pickle_list[0][vid][0]
out_score_dict[vid] = [rel_props, \
merge_op(arrays, 1, act_weights), \
merge_op(arrays, 2, comp_weights), \
merge_op(arrays, 3, reg_weights)]
return out_score_dict
if args.cls_scores:
print('Using classifier scores from {}'.format(args.cls_scores))
cls_score_pickle_list = []
for pc in args.cls_scores:
cls_score_pickle_list.append(pickle.load(open(pc, 'rb')))
cls_score_dict = merge_all_vid_scores(cls_score_pickle_list)
# cls_score_pc = pickle.load(open(args.cls_scores, 'rb'), encoding='bytes')
# cls_score_dict = cls_score_pc
# cls_score_dict = {os.path.splitext(os.path.basename(k.decode('utf-8')))[0]:v for k, v in cls_score_pc.items()}
else:
cls_score_dict = None
if args.reg_scores:
print('Using regression scores from {}'.format(args.reg_scores))
reg_score_dict = pickle.load(open(args.reg_scores, 'rb'))
else:
reg_score_dict = None
# generate detection results
def gen_detection_results(video_id, score_tp):
if len(score_tp[0].shape) == 3:
rel_prop = np.squeeze(score_tp[0], 0)
else:
rel_prop = score_tp[0]
# standardize regression scores
reg_scores = score_tp[3]
if reg_scores is None:
reg_scores = np.zeros((len(rel_prop), num_class, 2), dtype=np.float32)
reg_scores = reg_scores.reshape((-1, num_class, 2))
if cls_score_dict is None:
combined_scores = softmax(score_tp[1][:, :])
combined_scores = combined_scores[:,1:]
else:
combined_scores = softmax(cls_score_dict[video_id][1])[:, 1:]
if combined_scores.shape[1] < score_tp[2].shape[1]:
combined_scores = np.concatenate(
(combined_scores, np.zeros([len(combined_scores), score_tp[2].shape[1]-combined_scores.shape[1]])), axis=1)
elif combined_scores.shape[1] > score_tp[2].shape[1]:
combined_scores = combined_scores[:, :score_tp[2].shape[1]]
if not args.no_comp:
combined_scores = combined_scores * np.exp(score_tp[2])
keep_idx = np.argsort(combined_scores.ravel())[-top_k:]
# pdb.set_trace()
delete_short = args.min_length is not None
if delete_short:
print('delete short proposals')
duration = dataset.video_dict[video_id].num_frames / 6
prop_duration = duration * (rel_prop[:,1] - rel_prop[:, 0])
non_short_prop_idx = np.where(prop_duration <= args.min_length)[0]
keep_idx = [x for x in keep_idx if x // num_class in non_short_prop_idx]
# keep_prop_num = len({x//num_class for x in keep_idx})
for k in keep_idx:
cls = k % num_class
prop_idx = k // num_class
if video_id not in dataset_detections[cls]:
dataset_detections[cls][video_id] = np.array([
[rel_prop[prop_idx, 0], rel_prop[prop_idx, 1], combined_scores[prop_idx, cls],
reg_scores[prop_idx, cls, 0], reg_scores[prop_idx, cls, 1]]
])
else:
dataset_detections[cls][video_id] = np.vstack(
[dataset_detections[cls][video_id],
[rel_prop[prop_idx, 0], rel_prop[prop_idx, 1], combined_scores[prop_idx, cls],
reg_scores[prop_idx, cls, 0], reg_scores[prop_idx, cls, 1]]])
return len(keep_idx)
print("Preprocessing detections...")
orig_num_list = []
keep_num_list = []
def mean(x):
return sum(x)/len(x)
for k, v in detection_scores.items():
orig_num = len(v[0])
keep_num = gen_detection_results(k, v)
orig_num_list.append(orig_num)
keep_num_list.append(keep_num)
print('Done. {} videos, avg prop num {:.0f} => {:.0f}'.format(len(detection_scores), mean(orig_num_list), mean(keep_num_list)))
# perform NMS
print("Performing nms with thr {} ...".format(nms_threshold))
for cls in range(num_class):
dataset_detections[cls] = {
k: temporal_nms(v, nms_threshold) for k,v in dataset_detections[cls].items()
}
print("NMS Done.")
def perform_regression(detections):
t0 = detections[:, 0]
t1 = detections[:, 1]
center = (t0 + t1) / 2
duration = (t1 - t0)
new_center = center + duration * detections[:, 3]
new_duration = duration * np.exp(detections[:, 4])
new_detections = np.concatenate((
np.clip(new_center - new_duration / 2, 0, 1)[:, None], np.clip(new_center + new_duration / 2, 0, 1)[:, None], detections[:, 2:]
), axis=1)
return new_detections
# perform regression
if not args.no_regression:
print("Performing location regression...")
for cls in range(num_class):
dataset_detections[cls] = {
k: perform_regression(v) for k, v in dataset_detections[cls].items()
}
print("Regression Done.")
else:
print("Skip regresssion as requested by --no_regression")
# ravel test detections
def ravel_detections(detection_db, cls):
detection_list = []
for vid, dets in detection_db[cls].items():
detection_list.extend([[vid, cls] + x[:3] for x in dets.tolist()])
df = | pd.DataFrame(detection_list, columns=["video-id", "cls","t-start", "t-end", "score"]) | pandas.DataFrame |
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError):
td + other
with pytest.raises(TypeError):
other + td
with pytest.raises(TypeError):
td - other
with pytest.raises(TypeError):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([ | Timestamp("20121231 9:01") | pandas.Timestamp |
# -*- coding: utf-8 -*-
import glob
import json
import argparse
import re
import pandas as pd
from logic_util import parse_lambda
from syntactic_tree_parser import bpe_mask, un_bpe, un_bpe_mask
# recoverubg BPE words
def recover_bpe_words(row_info):
if row_info["template"] == "error-parse-tree" or not isinstance(row_info["sentence"], str):
row_info["bpe_template"] = 'error-parse-tree'
row_info["status"] = "warning"
else:
natural_w = row_info["sentence"].split()
bpe_w = row_info["bpe_sent"].split()
bpe_template = row_info["template"]
row_info["status"] = "normal"
for i, w in enumerate(bpe_w):
if "@@_" in w:
if " {} ".format(natural_w[i]) not in bpe_template:
row_info["status"] = "warning"
if w.replace("@@_", "") != natural_w[i]:
bpe_template = "err-pare-tree"
break
if re.match(r'[^\w]', w[-1]):
bpe_template = bpe_template.replace(
" {} ".format(natural_w[i][:-1]), " {} ".format(w[:-1]), 1)
else:
bpe_template = bpe_template.replace(
" {} ".format(natural_w[i]), " {} ".format(w), 1)
row_info["bpe_template"] = bpe_template
return row_info
def generate_template_mix(row_info, tag_freq=None):
if row_info["bpe_template"] == "error-parse-tree":
row_info["syntactic_template"] = row_info["bpe_sent"].replace("@@_", "@@ ") if isinstance(row_info["bpe_sent"], str) else ""
else:
bpe_template = parse_lambda(row_info["bpe_template"])
top_frequent_pos = list(tag_freq.items())
top_frequent_pos.sort(key=lambda x: x[1], reverse=True)
top_frequent_pos = [x[0] for x in top_frequent_pos[:5]]
bpe_template.flag_frequent_postag(top_frequent_pos)
mix_template_nodes = bpe_template.scan_frequent_tree()
row_info["syntactic_template"] = " ".join( [e.value.replace("@@_", "@@ ") for e in mix_template_nodes])
return row_info
def generate_template(row_info, tag_freq=None):
if row_info["bpe_template"] == "error-parse-tree":
row_info["syntactic_template"] = row_info["bpe_sent"]
else:
if tag_freq is None:
bpe_template = parse_lambda(row_info["bpe_template"])
sent_len = len(row_info["bpe_sent"].split(" "))
depth = min(max(sent_len*0.15, bpe_template.get_min_depth()),
bpe_template.get_max_depth())
# max_depth = bpe_template.get_max_depth()
# depth = min(max(max_depth - 2, bpe_template.get_min_depth()), max_depth)
nodes_pruned = bpe_template.get_leaf_nodes_with_depth(depth)
row_info["syntactic_template"] = " ".join(
[e.value.replace("@@_", "@@ ") for e in nodes_pruned])
else:
bpe_template = parse_lambda(row_info["bpe_template"])
min_depth = bpe_template.get_min_depth()
max_depth = bpe_template.get_max_depth()
if min_depth > max_depth:
print("----")
print(row_info["bpe_sent"])
print(row_info["bpe_template"])
print(min_depth, max_depth)
tmp_val = min(max_depth, min_depth)
max_depth = tmp_val
min_depth = tmp_val
max_prob = -1
max_nodes_pruned = None
if min_depth == max_depth:
max_nodes_pruned = bpe_template.get_leaf_nodes_with_depth(
min_depth)
for d in range(min_depth, max_depth):
nodes_pruned = bpe_template.get_leaf_nodes_with_depth(d)
probs = []
for n in nodes_pruned:
if n.value not in tag_freq:
continue
else:
probs.append(tag_freq[n.value])
cur_prob = sum(probs) / len(probs) if len(probs) > 0 else 0
if cur_prob > max_prob:
max_prob = cur_prob
max_nodes_pruned = nodes_pruned
row_info["syntactic_template"] = " ".join(
[e.value.replace("@@_", "@@ ") for e in max_nodes_pruned])
return row_info
def generate_template_replace_np(row_info, tag_freq=None):
if row_info["bpe_template"] == "error-parse-tree":
row_info["syntactic_template"] = row_info["bpe_sent"]
else:
bpe_template = parse_lambda(row_info["bpe_template"])
sent_len = len(row_info["bpe_sent"].split(" "))
bpe_template.prune_tag(["NP"])
nodes_pruned = bpe_template.get_leaf_nodes_template()
row_info["syntactic_template"] = " ".join( [e.value.replace("@@_", "@@ ") for e in nodes_pruned])
return row_info
def generate_template_replace_np_novp(row_info, tag_freq=None):
if row_info["bpe_template"] == "error-parse-tree":
row_info["syntactic_template"] = row_info["bpe_sent"]
else:
bpe_template = parse_lambda(row_info["bpe_template"])
bpe_template.flag_vp_in_subtree()
bpe_template.prune_tag_novp_in_subtree(["NP"])
nodes_pruned = bpe_template.get_leaf_nodes_template()
row_info["syntactic_template"] = " ".join( [e.value.replace("@@_", "@@ ") for e in nodes_pruned])
return row_info
def generate_template_replace_npvp_nov(row_info, tag_freq=None):
if row_info["bpe_template"] == "error-parse-tree":
row_info["syntactic_template"] = row_info["bpe_sent"]
else:
bpe_template = parse_lambda(row_info["bpe_template"])
bpe_template.flag_vp_in_subtree()
bpe_template.prune_tag_novp_in_subtree(["NP", "VP"])
nodes_pruned = bpe_template.get_leaf_nodes_template()
row_info["syntactic_template"] = " ".join( [e.value.replace("@@_", "@@ ") for e in nodes_pruned])
return row_info
def generate_template_replace_toptags_nov(row_info, tag_freq):
if row_info["bpe_template"] == "error-parse-tree":
row_info["syntactic_template"] = row_info["bpe_sent"]
else:
tag_top_freq = []
tag_freq = list(tag_freq.items())
tag_freq.sort(key=lambda x: x[1], reverse=True)
tag_top_freq = [x[0] for x in tag_freq[:10]]
bpe_template = parse_lambda(row_info["bpe_template"])
bpe_template.flag_vp_in_subtree()
bpe_template.prune_tag_novp_in_subtree(tag_top_freq)
nodes_pruned = bpe_template.get_leaf_nodes_template()
row_info["syntactic_template"] = " ".join( [e.value.replace("@@_", "@@ ") for e in nodes_pruned])
return row_info
def generate_template_depth3(row_info, tag_freq, depth_level=2):
if row_info["bpe_template"] == "error-parse-tree":
row_info["syntactic_template"] = "S"
else:
bpe_template = parse_lambda(row_info["bpe_template"])
max_nodes_pruned = bpe_template.get_leaf_nodes_with_depth(depth_level)
row_info["syntactic_template"] = " ".join(
[e.value.replace("@@_", "@@ ") for e in max_nodes_pruned])
return row_info
def bpe_template_aggregate(base_path="./data/iwslt14.tokenized.de-en/"):
for file_name in glob.glob(base_path + "*.template"):
print(file_name)
file_name_org = file_name.replace(".template", ".bpe")
with open(file_name_org, "rt", encoding="utf8") as f:
lines = [_l.strip().replace("@@ ", "@@_") for _l in f.readlines()]
df_org = | pd.DataFrame({'bpe_sent': lines}) | pandas.DataFrame |
# Import
import datetime
import json
import random
import time
import urllib
from datetime import timedelta
from typing import Optional
import geopy
import geopy.distance
import pandas as pd
from pyroutelib3 import Router
from shapely.geometry import Polygon, Point
from termcolor import colored
from tqdm import tqdm
from config import *
global ROUTER, GEOLOCATOR, MANUFACTURER_ID
# -------------------------- Init functions
def init() -> None:
"""
Init router, geolocator, and manufacturers list
:return:
"""
def init_manufacturers() -> None:
"""
Init List with IDs of manufacturers
:return:
"""
global MANUFACTURER_ID
logging.debug("Exec init_manufacturers()")
entries = dict()
for i in range(N_MANUFACTURERS):
manufacturer = "manufacturer_{i}".format(i=str(i))
entries[manufacturer] = get_random_id(False)
MANUFACTURER_ID = entries
logging.info(colored("Successfully initiated MANUFACTURER_ID dict", "green"))
logging.debug("MANUFACTURER_ID dict: {d}".format(d=MANUFACTURER_ID))
global ROUTER, GEOLOCATOR
ROUTER = Router("car")
GEOLOCATOR = geopy.Nominatim(user_agent=OSM_AGENT)
init_manufacturers()
# ---
def get_distance(coord_a: tuple, coord_b: tuple) -> float:
"""
Compute distance in km between start_coord and end_coord
:param coord_a: (lat, lon)
:param coord_b: (lat, lon)
:return: float (km, rounded to 2 decimal places)
"""
logging.debug("Exec get_distance()")
dist = round(geopy.distance.distance(coord_a, coord_b).km, 2)
logging.debug("Distance between points: {d}km".format(d=dist))
return dist
def get_manufacturerid() -> str:
"""
Get manufacturer id from MANUFACTURER_ID list
:return: id
"""
return MANUFACTURER_ID["manufacturer_" + str(random.randint(0, N_MANUFACTURERS - 1))]
def get_nodes(coord_a: tuple, coord_b: tuple) -> tuple:
"""
Get start and end nodes from route
:param coord_a: (lat, lon)
:param coord_b: (lat, lon)
:return: node_a ID (from OSM), node_b ID (from OSM)
"""
logging.debug("Exec get_nodes()")
try:
node_a = ROUTER.findNode(coord_a[0], coord_a[1])
node_b = ROUTER.findNode(coord_b[0], coord_b[1])
logging.info(colored("Successfully computed start and end node", "green"))
return node_a, node_b
except:
logging.warning(colored("Failed to compute start and end node", "red"))
def get_route(start_node: str, end_node: str) -> Optional[list]:
"""
Finds route between start_node and end_node and converts them to latlon coordinates
:param start_node: OSM node id
:param end_node: OSM node id
:return: list of lat,lon coordinates
"""
# Get route
logging.debug("Exec get_route()")
try:
status, route = ROUTER.doRoute(start_node, end_node)
# Get route coordinates
coordinates = list(map(ROUTER.nodeLatLon, route))
logging.info(colored("Successfully computed route", "green"))
logging.debug("Route: {r}".format(r=coordinates))
return coordinates
except:
logging.warning(colored("Failed to get a route", "red"))
return None
def get_zipcode(lat: float, lon: float) -> str:
"""
Get ZIP code from lat and lon
:param lat: latitude
:param lon: longitude
:return: zip code
"""
logging.debug("Exec get_zipcode()")
location = GEOLOCATOR.reverse((lat, lon))
zip_code = location.raw['address']['postcode']
return zip_code
def get_random_id(car: bool) -> str:
"""
Generate random ID for vehicle or manufacturer
:param car: (bool) if True: get V-number, else get M-number
:return: random_id (str)
"""
logging.debug("Exec get_random_id()")
if car is True:
random_id = 'V{0:0{x}d}'.format(random.randint(0, 10 ** 8), x=8)
else:
random_id = 'V{0:0{x}d}'.format(random.randint(0, 10 ** 5), x=5)
logging.info("Random id: {}".format(random_id))
return random_id
def get_random_time() -> datetime.datetime:
"""
This function will return a random datetime between two datetime
objects.
:return: datetime
"""
logging.debug("Exec get_random_time()")
start = TIME_RANGE[0]
end = TIME_RANGE[1]
delta = end - start
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = random.randrange(int_delta)
random_time = start + timedelta(seconds=random_second)
logging.info(colored("Successfully computed random time: {t}".format(t=random_time), "green"))
return random_time
# ------------------------------------------ Functions -------------------------------------------- #
def get_city_boundaries() -> Polygon:
"""
Get geo_data boundaries of the geo_data specified in config
Returns: polygon with geo_data boundaries
"""
def get_boundary_lonlat(city: str, country: str) -> Optional[list]:
"""
Get the city boundaries of city in country to use as guidelines which lonlat are valid or not.
:param city: string with city name
:param country: string with country name
:return: list with lonlat or None
"""
logging.debug("Exec nested function get_boundary_lonlat()")
url = "https://nominatim.openstreetmap.org/search.php?q=" + city + "+" + country + "&polygon_geojson=1&format=json"
page = urllib.request.urlopen(url).read()
osm_data = json.loads(page)
lonlat = None
for i in osm_data:
if (i['osm_type'] == 'relation') & (i['class'] == 'boundary'):
lonlat = i['geojson']['coordinates']
while len(lonlat) < 10:
lonlat = lonlat[0]
logging.debug(
colored("Found boundary coordinates for {city}, {country}:".format(city=city, country=country),
"green"))
logging.debug(i)
break
if lonlat == None:
logging.error(colored(
"Could not find boundary coordinates for {city}, {country}.".format(city=city, country=country), "red"))
return lonlat
logging.debug("Exec get_city_boundaries()")
# Extract coordinates, apply buffer and convert to Polygon for the geo_data
city = CITY.replace('ä', 'a').replace('ö', 'o').replace('ü', 'u')
lonlat = get_boundary_lonlat(city, COUNTRY)
poly = Polygon(lonlat).buffer(0.005)
logging.info(colored("Successfully got polygon for {city}, {country}".format(city=CITY, country=COUNTRY), "green"))
return poly
def get_valid_coord(poly: Polygon) -> tuple:
"""
Get random coordinate within the boundaries of a city
:param poly: polygon with city boundaries
:return: (lat, lon)
"""
logging.debug("Exec get_valid_coord()")
lon, lat = poly.exterior.xy
point_validity = False
while point_validity is False:
random_lon = round(random.uniform(min(lon), max(lon)), 14)
random_lat = round(random.uniform(min(lat), max(lat)), 14)
point_validity = poly.contains(Point(random_lon, random_lat))
logging.debug("Point validity: {v}".format(v=point_validity))
latlon = (random_lat, random_lon)
return latlon
def get_random_coords(poly: Polygon) -> tuple:
"""
Get random coordinates that are not too far away apart from one another
:param poly: defined boundaries in which random coordinates are generated
:return: tuple with start and end latlon
"""
logging.debug("Exec get_random_coords()")
dist = 0
while (dist < MIN_DIST_KM) or (dist > MAX_DIST_KM):
start_latlon = get_valid_coord(poly)
end_latlon = get_valid_coord(poly)
dist = get_distance(start_latlon, end_latlon)
logging.debug("Distance: {d}".format(d=dist))
logging.info(colored("Found two random coordinates with distance {d}km".format(d=dist), "green"))
return start_latlon, end_latlon
def compute_trip(start_coord: tuple, end_coord: tuple) -> Optional[pd.DataFrame]:
"""
Computes a (realistic) route between start and end coordinate, and enriches the dataset with manufacturer and
vehicle IDs, speed, co2 value, distance, time.
Args:
start_coord: latlon coordinate
end_coord: latlon coordinate
Returns: pandas Dataframe with information of a trip between start_coord and end_coord.
"""
logging.debug("Exec compute_trip()")
try:
# Find Nodes
start, end = get_nodes(start_coord, end_coord)
route_coords = get_route(start, end)
if route_coords is None:
logging.ERROR("Route_coords is None")
return None
vehicle_id = get_random_id(True)
manufacturer_id = get_manufacturerid()
timestamp_start = get_random_time()
timestamp = timestamp_start
total_dist = 0
total_seconds = 0
total_co2 = 0
trip = []
for i in tqdm(range(len(route_coords))):
try:
zipcode = get_zipcode(route_coords[i][0], route_coords[i][1])
except Exception as e:
if i > 0:
logging.info(colored("Could not get ZIP code. Using i-1 ZIP code.", "yellow"))
zipcode = trip[i - 1]["zipcode"]
else:
logging.ERROR(colored("Could not get ZIP code! {e}".format(e=e), "red"))
break
if i == 0:
# Init variables
seconds = 0
dist = 0
km_per_hour = 0
co2_per_km = 0
co2_relative = 0
else:
# Else, compute route and random speed, distance, co2 grams, and seconds
a = route_coords[i - 1]
b = route_coords[i]
# Compute random speed that is between 60% and 115% of the previously recorded speed
dist = get_distance(a, b)
if dist == 0:
km_per_hour = 0
co2_per_km = 0
co2_relative = 0
seconds = random.randint(0, 10)
else:
km_per_hour = round(speed_old * random.randint(60, 115) / 100, 0)
co2_per_km = round(co2_per_km_old * random.randint(70, 120) / 100, 0)
co2_relative = round(co2_per_km * dist, 2)
seconds = round((dist / km_per_hour) * 60 * 60, 0)
# Store old variables for next round (to make values somewhat cohesive)
speed_old = km_per_hour if km_per_hour > 0 else KM_PER_HOUR
co2_per_km_old = co2_per_km if co2_per_km > 0 else CO2_PER_KM
# Add to totals
total_dist = round(total_dist + dist, 2)
total_seconds += seconds
timestamp += timedelta(seconds=seconds)
total_co2 = round(total_co2 + co2_relative, 0)
logging.debug(
"Speed: {s}km/h, Distance: {d}km, Time: {t}s, CO2: {co}g | Total time: {tt}s, Total dist: {td}km, Total CO2: {tco}g".format(
s=km_per_hour,
d=dist,
co=co2_relative,
t=seconds,
tt=total_seconds,
td=round(
total_dist,
2),
tco=total_co2))
point = {
"vehicle_id": vehicle_id,
"manufacturer_id": manufacturer_id,
"zipcode": zipcode,
"timestamp": timestamp,
"latlon": route_coords[i],
"dist": dist,
"seconds": seconds,
"co2_grams": co2_relative, # CO2 grams / km relative to travelled distance
"total_dist": total_dist,
"total_seconds": total_seconds,
"total_co2_grams": total_co2,
"timestamp_tripstart": timestamp_start,
"avg_kmperhour": total_dist / ((total_seconds / 60) / 60) if total_seconds > 0 else 0,
"avg_co2perkm": total_co2 / total_dist if total_dist > 0 else 0
}
trip.append(point)
if DEBUG is False:
time.sleep(1)
logging.info(colored(trip[-1], "blue"))
df = | pd.DataFrame(trip) | pandas.DataFrame |
import itertools
import re
import os
import time
import copy
import json
import Amplo
import joblib
import shutil
import warnings
import numpy as np
import pandas as pd
from tqdm import tqdm
from typing import Union
from pathlib import Path
from datetime import datetime
from shap import TreeExplainer
from shap import KernelExplainer
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from Amplo import Utils
from Amplo.AutoML.Sequencer import Sequencer
from Amplo.AutoML.Modeller import Modeller
from Amplo.AutoML.DataSampler import DataSampler
from Amplo.AutoML.DataExplorer import DataExplorer
from Amplo.AutoML.DataProcessor import DataProcessor
from Amplo.AutoML.DriftDetector import DriftDetector
from Amplo.AutoML.FeatureProcessor import FeatureProcessor
from Amplo.AutoML.IntervalAnalyser import IntervalAnalyser
from Amplo.Classifiers.StackingClassifier import StackingClassifier
from Amplo.Documenting import BinaryDocumenting
from Amplo.Documenting import MultiDocumenting
from Amplo.Documenting import RegressionDocumenting
from Amplo.GridSearch import BaseGridSearch
from Amplo.GridSearch import HalvingGridSearch
from Amplo.GridSearch import OptunaGridSearch
from Amplo.Observation import DataObserver
from Amplo.Observation import ProductionObserver
from Amplo.Regressors.StackingRegressor import StackingRegressor
class Pipeline:
def __init__(self, **kwargs):
"""
Automated Machine Learning Pipeline for tabular data.
Designed for predictive maintenance applications, failure identification, failure prediction, condition
monitoring, etc.
Parameters
----------
Main Parameters:
main_dir [str]: Main directory of Pipeline (for documentation)
target [str]: Column name of the output/dependent/regressand variable.
name [str]: Name of the project (for documentation)
version [int]: Pipeline version (set automatically)
mode [str]: 'classification' or 'regression'
objective [str]: from sklearn metrics and scoring
Data Processor:
int_cols [list[str]]: Column names of integer columns
float_cols [list[str]]: Column names of float columns
date_cols [list[str]]: Column names of datetime columns
cat_cols [list[str]]: Column names of categorical columns
missing_values [str]: [DataProcessing] - 'remove', 'interpolate', 'mean' or 'zero'
outlier_removal [str]: [DataProcessing] - 'clip', 'boxplot', 'z-score' or 'none'
z_score_threshold [int]: [DataProcessing] If outlier_removal = 'z-score', the threshold is adaptable
include_output [bool]: Whether to include output in the training data (sensible only with sequencing)
Feature Processor:
extract_features [bool]: Whether to use FeatureProcessing module
information_threshold : [FeatureProcessing] Threshold for removing co-linear features
feature_timeout [int]: [FeatureProcessing] Time budget for feature processing
max_lags [int]: [FeatureProcessing] Maximum lags for lagged features to analyse
max_diff [int]: [FeatureProcessing] Maximum differencing order for differencing features
Interval Analyser:
interval_analyse [bool]: Whether to use IntervalAnalyser module
Note that this has no effect when data from ``self._read_data`` is not multi-indexed
Sequencing:
sequence [bool]: [Sequencing] Whether to use Sequence module
seq_back [int or list[int]]: Input time indices
If list -> includes all integers within the list
If int -> includes that many samples back
seq_forward [int or list[int]: Output time indices
If list -> includes all integers within the list.
If int -> includes that many samples forward.
seq_shift [int]: Shift input / output samples in time
seq_diff [int]: Difference the input & output, 'none', 'diff' or 'log_diff'
seq_flat [bool]: Whether to return a matrix (True) or Tensor (Flat)
Modelling:
standardize [bool]: Whether to standardize input/output data
shuffle [bool]: Whether to shuffle the samples during cross-validation
cv_splits [int]: How many cross-validation splits to make
store_models [bool]: Whether to store all trained model files
Grid Search:
grid_search_type [Optional[str]]: Which method to use 'optuna', 'halving', 'base' or None
grid_search_time_budget : Time budget for grid search
grid_search_candidates : Parameter evaluation budget for grid search
grid_search_iterations : Model evaluation budget for grid search
Stacking:
stacking [bool]: Whether to create a stacking model at the end
Production:
preprocess_function [str]: Add custom code for the prediction function, useful for production. Will be executed
with exec, can be multiline. Uses data as input.
Flags:
logging_level [Optional[Union[int, str]]]: Logging level for warnings, info, etc.
plot_eda [bool]: Whether to run Exploratory Data Analysis
process_data [bool]: Whether to force data processing
document_results [bool]: Whether to force documenting
no_dirs [bool]: Whether to create files or not
verbose [int]: Level of verbosity
"""
# Copy arguments
##################
# Main Settings
self.mainDir = kwargs.get('main_dir', 'AutoML/')
self.target = re.sub('[^a-z0-9]', '_', kwargs.get('target', '').lower())
self.name = kwargs.get('name', 'AutoML')
self.version = kwargs.get('version', None)
self.mode = kwargs.get('mode', None)
self.objective = kwargs.get('objective', None)
# Data Processor
self.intCols = kwargs.get('int_cols', None)
self.floatCols = kwargs.get('float_cols', None)
self.dateCols = kwargs.get('date_cols', None)
self.catCols = kwargs.get('cat_cols', None)
self.missingValues = kwargs.get('missing_values', 'zero')
self.outlierRemoval = kwargs.get('outlier_removal', 'clip')
self.zScoreThreshold = kwargs.get('z_score_threshold', 4)
self.includeOutput = kwargs.get('include_output', False)
# Balancer
self.balance = kwargs.get('balance', True)
# Feature Processor
self.extractFeatures = kwargs.get('extract_features', True)
self.informationThreshold = kwargs.get('information_threshold', 0.999)
self.featureTimeout = kwargs.get('feature_timeout', 3600)
self.maxLags = kwargs.get('max_lags', 0)
self.maxDiff = kwargs.get('max_diff', 0)
# Interval Analyser
self.useIntervalAnalyser = kwargs.get('interval_analyse', True)
# Sequencer
self.sequence = kwargs.get('sequence', False)
self.sequenceBack = kwargs.get('seq_back', 1)
self.sequenceForward = kwargs.get('seq_forward', 1)
self.sequenceShift = kwargs.get('seq_shift', 0)
self.sequenceDiff = kwargs.get('seq_diff', 'none')
self.sequenceFlat = kwargs.get('seq_flat', True)
# Modelling
self.standardize = kwargs.get('standardize', False)
self.shuffle = kwargs.get('shuffle', True)
self.cvSplits = kwargs.get('cv_shuffle', 10)
self.storeModels = kwargs.get('store_models', False)
# Grid Search Parameters
self.gridSearchType = kwargs.get('grid_search_type', 'optuna')
self.gridSearchTimeout = kwargs.get('grid_search_time_budget', 3600)
self.gridSearchCandidates = kwargs.get('grid_search_candidates', 250)
self.gridSearchIterations = kwargs.get('grid_search_iterations', 3)
# Stacking
self.stacking = kwargs.get('stacking', False)
# Production
self.preprocessFunction = kwargs.get('preprocess_function', None)
# Flags
self.plotEDA = kwargs.get('plot_eda', False)
self.processData = kwargs.get('process_data', True)
self.documentResults = kwargs.get('document_results', True)
self.verbose = kwargs.get('verbose', 0)
self.noDirs = kwargs.get('no_dirs', False)
# Checks
assert self.mode in [None, 'regression', 'classification'], 'Supported modes: regression, classification.'
assert 0 < self.informationThreshold < 1, 'Information threshold needs to be within [0, 1]'
assert self.maxLags < 50, 'Max_lags too big. Max 50.'
assert self.maxDiff < 5, 'Max diff too big. Max 5.'
assert self.gridSearchType is None \
or self.gridSearchType.lower() in ['base', 'halving', 'optuna'], \
'Grid Search Type must be Base, Halving, Optuna or None'
# Advices
if self.includeOutput and not self.sequence:
warnings.warn('[AutoML] IMPORTANT: strongly advices to not include output without sequencing.')
# Create dirs
if not self.noDirs:
self._create_dirs()
self._load_version()
# Store Pipeline Settings
self.settings = {'pipeline': kwargs, 'validation': {}, 'feature_set': ''}
# Objective & Scorer
self.scorer = None
if self.objective is not None:
assert isinstance(self.objective, str), 'Objective needs to be a string'
assert self.objective in metrics.SCORERS.keys(), 'Metric not supported, look at sklearn.metrics'
# Required sub-classes
self.dataSampler = DataSampler()
self.dataProcessor = DataProcessor()
self.dataSequencer = Sequencer()
self.featureProcessor = FeatureProcessor()
self.intervalAnalyser = IntervalAnalyser()
self.driftDetector = DriftDetector()
# Instance initiating
self.bestModel = None
self._data = None
self.featureSets = None
self.results = None
self.n_classes = None
self.is_fitted = False
# Monitoring
logging_level = kwargs.get('logging_level', 'INFO')
logging_dir = Path(self.mainDir) / 'app_logs.log' if not self.noDirs else None
self.logger = Utils.logging.get_logger('AutoML', logging_dir, logging_level, capture_warnings=True)
self._prediction_time = None
self._main_predictors = None
# User Pointing Functions
def get_settings(self, version: int = None) -> dict:
"""
Get settings to recreate fitted object.
Parameters
----------
version : int, optional
Production version, defaults to current version
"""
if version is None or version == self.version:
assert self.is_fitted, "Pipeline not yet fitted."
return self.settings
else:
settings_path = self.mainDir + f'Production/v{self.version}/Settings.json'
assert Path(settings_path).exists(), 'Cannot load settings from nonexistent version'
return json.load(open(settings_path, 'r'))
def load_settings(self, settings: dict):
"""
Restores a pipeline from settings.
Parameters
----------
settings [dict]: Pipeline settings
"""
# Set parameters
settings['pipeline']['no_dirs'] = True
self.__init__(**settings['pipeline'])
self.settings = settings
self.dataProcessor.load_settings(settings['data_processing'])
self.featureProcessor.load_settings(settings['feature_processing'])
# TODO: load_settings for IntervalAnalyser (not yet implemented)
if 'drift_detector' in settings:
self.driftDetector = DriftDetector(
num_cols=self.dataProcessor.float_cols + self.dataProcessor.int_cols,
cat_cols=self.dataProcessor.cat_cols,
date_cols=self.dataProcessor.date_cols
).load_weights(settings['drift_detector'])
def load_model(self, model: object):
"""
Restores a trained model
"""
assert type(model).__name__ == self.settings['model']
self.bestModel = model
self.is_fitted = True
def fit(self, *args, **kwargs):
"""
Fit the full AutoML pipeline.
1. Prepare data for training
2. Train / optimize models
3. Prepare Production Files
Nicely organises all required scripts / files to make a prediction
Parameters
----------
args
For data reading - Propagated to `self.data_preparation`
kwargs
For data reading (propagated to `self.data_preparation`) AND
for production filing (propagated to `self.conclude_fitting`)
"""
# Starting
print('\n\n*** Starting Amplo AutoML - {} ***\n\n'.format(self.name))
# Prepare data for training
self.data_preparation(*args, **kwargs)
# Train / optimize models
self.model_training(**kwargs)
# Conclude fitting
self.conclude_fitting(**kwargs)
def data_preparation(self, *args, **kwargs):
"""
Prepare data for modelling
1. Data Processing
Cleans all the data. See @DataProcessing
2. (optional) Exploratory Data Analysis
Creates a ton of plots which are helpful to improve predictions manually
3. Feature Processing
Extracts & Selects. See @FeatureProcessing
Parameters
----------
args
For data reading - Propagated to `self._read_data`
kwargs
For data reading - Propagated to `self._read_data`
"""
# Reading data
self._read_data(*args, **kwargs)
# Check data
obs = DataObserver(pipeline=self)
obs.observe()
# Detect mode (classification / regression)
self._mode_detector()
# Preprocess Data
self._data_processing()
# Run Exploratory Data Analysis
self._eda()
# Balance data
self._data_sampling()
# Sequence
self._sequencing()
# Extract and select features
self._feature_processing()
# Interval-analyze data
self._interval_analysis()
# Standardize
# Standardizing assures equal scales, equal gradients and no clipping.
# Therefore, it needs to be after sequencing & feature processing, as this alters scales
self._standardizing()
def model_training(self, **kwargs):
"""Train models
1. Initial Modelling
Runs various off the shelf models with default parameters for all feature sets
If Sequencing is enabled, this is where it happens, as here, the feature set is generated.
2. Grid Search
Optimizes the hyperparameters of the best performing models
3. (optional) Create Stacking model
4. (optional) Create documentation
Parameters
----------
kwargs : optional
Keyword arguments that will be passed to `self.grid_search`.
"""
# Run initial models
self._initial_modelling()
# Optimize Hyper parameters
self.grid_search(**kwargs)
# Create stacking model
self._create_stacking()
def conclude_fitting(self, *, model=None, feature_set=None, params=None, **kwargs):
"""
Prepare production files that are necessary to deploy a specific
model / feature set combination
Creates or modifies the following files
- ``Model.joblib`` (production model)
- ``Settings.json`` (model settings)
- ``Report.pdf`` (training report)
Parameters
----------
model : str or list of str, optional
Model file for which to prepare production files. If multiple, selects the best.
feature_set : str or list of str, optional
Feature set for which to prepare production files. If multiple, selects the best.
params : dict, optional
Model parameters for which to prepare production files.
Default: takes the best parameters
kwargs
Collecting container for keyword arguments that are passed through `self.fit()`.
"""
# Set up production path
prod_dir = self.mainDir + f'Production/v{self.version}/'
Path(prod_dir).mkdir(exist_ok=True)
# Parse arguments
model, feature_set, params = self._parse_production_args(model, feature_set, params)
# Verbose printing
if self.verbose > 0:
print(f'[AutoML] Preparing Production files for {model}, {feature_set}, {params}')
# Set best model (`self.bestModel`)
self._prepare_production_model(prod_dir + 'Model.joblib', model, feature_set, params)
# Set and store production settings
self._prepare_production_settings(prod_dir + 'Settings.json', model, feature_set, params)
# Observe production
# TODO[TS, 25.05.2022]: Currently, we are observing the data also here.
# However, in a future version we probably will only observe the data
# directly after :func:`_read_data()`. For now we wait...
obs = ProductionObserver(pipeline=self)
obs.observe()
self.settings['production_observation'] = obs.observations
# Report
report_path = self.mainDir + f'Documentation/v{self.version}/{model}_{feature_set}.pdf'
if not Path(report_path).exists():
self.document(self.bestModel, feature_set)
shutil.copy(report_path, prod_dir + 'Report.pdf')
# Finish
self.is_fitted = True
print('[AutoML] All done :)')
def convert_data(self, x: pd.DataFrame, preprocess: bool = True) -> [pd.DataFrame, pd.Series]:
"""
Function that uses the same process as the pipeline to clean data.
Useful if pipeline is pickled for production
Parameters
----------
data [pd.DataFrame]: Input features
"""
# Convert to Pandas
if isinstance(x, np.ndarray):
x = pd.DataFrame(x, columns=[f"Feature_{i}" for i in range(x.shape[1])])
# Custom code
if self.preprocessFunction is not None and preprocess:
ex_globals = {'data': x}
exec(self.preprocessFunction, ex_globals)
x = ex_globals['data']
# Process data
x = self.dataProcessor.transform(x)
# Drift Check
self.driftDetector.check(x)
# Split output
y = None
if self.target in x.keys():
y = x[self.target]
if not self.includeOutput:
x = x.drop(self.target, axis=1)
# Sequence
if self.sequence:
x, y = self.dataSequencer.convert(x, y)
# Convert Features
x = self.featureProcessor.transform(x, self.settings['feature_set'])
# Standardize
if self.standardize:
x, y = self._transform_standardize(x, y)
# NaN test -- datetime should be taken care of by now
if x.astype('float32').replace([np.inf, -np.inf], np.nan).isna().sum().sum() != 0:
raise ValueError(f"Column(s) with NaN: {list(x.keys()[x.isna().sum() > 0])}")
# Return
return x, y
def predict(self, data: pd.DataFrame) -> np.ndarray:
"""
Full script to make predictions. Uses 'Production' folder with defined or latest version.
Parameters
----------
data [pd.DataFrame]: data to do prediction on
"""
start_time = time.time()
assert self.is_fitted, "Pipeline not yet fitted."
# Print
if self.verbose > 0:
print('[AutoML] Predicting with {}, v{}'.format(type(self.bestModel).__name__, self.version))
# Convert
x, y = self.convert_data(data)
# Predict
if self.mode == 'regression' and self.standardize:
predictions = self._inverse_standardize(self.bestModel.predict(x))
else:
predictions = self.bestModel.predict(x)
# Stop timer
self._prediction_time = (time.time() - start_time) / len(x) * 1000
# Calculate main predictors
self._get_main_predictors(x)
return predictions
def predict_proba(self, data: pd.DataFrame) -> np.ndarray:
"""
Returns probabilistic prediction, only for classification.
Parameters
----------
data [pd.DataFrame]: data to do prediction on
"""
start_time = time.time()
assert self.is_fitted, "Pipeline not yet fitted."
assert self.mode == 'classification', 'Predict_proba only available for classification'
assert hasattr(self.bestModel, 'predict_proba'), '{} has no attribute predict_proba'.format(
type(self.bestModel).__name__)
# Print
if self.verbose > 0:
print('[AutoML] Predicting with {}, v{}'.format(type(self.bestModel).__name__, self.version))
# Convert data
x, y = self.convert_data(data)
# Predict
prediction = self.bestModel.predict_proba(x)
# Stop timer
self._prediction_time = (time.time() - start_time) / len(x) * 1000
# Calculate main predictors
self._get_main_predictors(x)
return prediction
# Fit functions
def _read_data(self, x=None, y=None, *, data=None, **kwargs):
"""
Reads and loads data into desired format.
Expects to receive:
1. Both, ``x`` and ``y`` (-> features and target), or
2. Either ``x`` or ``data`` (-> dataframe or path to folder)
Parameters
----------
x : np.ndarray or pd.Series or pd.DataFrame or str or Path, optional
x-data (input) OR acts as ``data`` parameter when param ``y`` is empty
y : np.ndarray or pd.Series, optional
y-data (target)
data : pd.DataFrame or str or Path, optional
Contains both, x and y, OR provides a path to folder structure
kwargs
Collecting container for keyword arguments that are passed through `self.fit()`.
Returns
-------
Pipeline
"""
assert x is not None or data is not None, 'No data provided'
assert (x is not None) ^ (data is not None), 'Setting both, `x` and `data`, is ambiguous'
# Labels are provided separately
if y is not None:
# Check data
x = x if x is not None else data
assert x is not None, 'Parameter ``x`` is not set'
assert isinstance(x, (np.ndarray, pd.Series, pd.DataFrame)), 'Unsupported data type for parameter ``x``'
assert isinstance(y, (np.ndarray, pd.Series)), 'Unsupported data type for parameter ``y``'
# Set target manually if not defined
if self.target == '':
self.target = 'target'
# Parse x-data
if isinstance(x, np.ndarray):
x = pd.DataFrame(x)
elif isinstance(x, pd.Series):
x = pd.DataFrame(x)
# Parse y-data
if isinstance(y, np.ndarray):
y = pd.Series(y, index=x.index)
y.name = self.target
# Check data
assert all(x.index == y.index), '``x`` and ``y`` indices do not match'
if self.target in x.columns:
assert all(x[self.target] == y), 'Target column co-exists in both, ``x`` and ``y`` data, ' \
f'but has not equal content. Rename the column ``{self.target}`` ' \
'in ``x`` or set a (different) target in initialization.'
# Concatenate x and y
data = pd.concat([x, y], axis=1)
# Set data parameter in case it is provided through parameter ``x``
data = data if data is not None else x
metadata = None
# A path was provided to read out (multi-indexed) data
if isinstance(data, (str, Path)):
# Set target manually if not defined
if self.target == '':
self.target = 'target'
# Parse data
data, metadata = Utils.io.merge_logs(data, self.target)
# Test data
assert self.target != '', 'No target string provided'
assert self.target in data.columns, 'Target column missing'
assert len(data.columns) == data.columns.nunique(), 'Columns have no unique names'
# Parse data
y = data[self.target]
x = data.drop(self.target, axis=1)
if isinstance(x.columns, pd.RangeIndex):
x.columns = [f'Feature_{i}' for i in range(x.shape[1])]
# Concatenate x and y
data = pd.concat([x, y], axis=1)
# Save data
self.set_data(data)
# Store metadata in settings
self.settings['file_metadata'] = metadata or dict()
return self
def has_new_training_data(self):
# Return True if no previous version exists
if self.version == 1:
return True
# Get previous and current file metadata
curr_metadata = self.settings['file_metadata']
last_metadata = self.get_settings(self.version - 1)['file_metadata']
# Check each settings file
for file_id in curr_metadata:
# Get file specific metadata
curr = curr_metadata[file_id]
last = last_metadata.get(file_id, dict())
# Compare metadata
same_folder = curr['folder'] == last.get('folder')
same_file = curr['file'] == last.get('file')
same_mtime = curr['last_modified'] == last.get('last_modified')
if not all([same_folder, same_file, same_mtime]):
return False
return True
def _mode_detector(self):
"""
Detects the mode (Regression / Classification)
"""
# Only run if mode is not provided
if self.mode is None:
# Classification if string
if self.y.dtype == str or self.y.nunique() < 0.1 * len(self.data):
self.mode = 'classification'
self.objective = self.objective or 'neg_log_loss'
# Else regression
else:
self.mode = 'regression'
self.objective = self.objective or 'neg_mean_absolute_error'
# Set scorer
self.scorer = metrics.SCORERS[self.objective]
# Copy to settings
self.settings['pipeline']['mode'] = self.mode
self.settings['pipeline']['objective'] = self.objective
# Print
if self.verbose > 0:
print(f"[AutoML] Setting mode to {self.mode} & objective to {self.objective}.")
return
def _data_processing(self):
"""
Organises the data cleaning. Heavy lifting is done in self.dataProcessor, but settings etc. needs
to be organised.
"""
self.dataProcessor = DataProcessor(target=self.target, int_cols=self.intCols, float_cols=self.floatCols,
date_cols=self.dateCols, cat_cols=self.catCols,
missing_values=self.missingValues,
outlier_removal=self.outlierRemoval, z_score_threshold=self.zScoreThreshold)
# Set paths
data_path = self.mainDir + f'Data/Cleaned_v{self.version}.csv'
settings_path = self.mainDir + f'Settings/Cleaning_v{self.version}.json'
if Path(data_path).exists() and Path(settings_path).exists():
# Load data
data = self._read_csv(data_path)
self.set_data(data)
# Load settings
self.settings['data_processing'] = json.load(open(settings_path, 'r'))
self.dataProcessor.load_settings(self.settings['data_processing'])
if self.verbose > 0:
print('[AutoML] Loaded Cleaned Data')
else:
# Cleaning
data = self.dataProcessor.fit_transform(self.data)
self.set_data(data)
# Store data
self._write_csv(self.data, data_path)
# Save settings
self.settings['data_processing'] = self.dataProcessor.get_settings()
json.dump(self.settings['data_processing'], open(settings_path, 'w'))
# If no columns were provided, load them from data processor
if self.dateCols is None:
self.dateCols = self.settings['data_processing']['date_cols']
if self.intCols is None:
self.dateCols = self.settings['data_processing']['int_cols']
if self.floatCols is None:
self.floatCols = self.settings['data_processing']['float_cols']
if self.catCols is None:
self.catCols = self.settings['data_processing']['cat_cols']
# Assert classes in case of classification
self.n_classes = self.y.nunique()
if self.mode == 'classification':
if self.n_classes >= 50:
warnings.warn('More than 20 classes, you may want to reconsider classification mode')
if set(self.y) != set([i for i in range(len(set(self.y)))]):
raise ValueError('Classes should be [0, 1, ...]')
def _eda(self):
if self.plotEDA:
print('[AutoML] Starting Exploratory Data Analysis')
eda = DataExplorer(self.x, y=self.y,
mode=self.mode,
folder=self.mainDir,
version=self.version)
eda.run()
def _data_sampling(self):
"""
Only run for classification problems. Balances the data using imblearn.
Does not guarantee to return balanced classes. (Methods are data dependent)
"""
self.dataSampler = DataSampler(method='both', margin=0.1, cv_splits=self.cvSplits, shuffle=self.shuffle,
fast_run=False, objective=self.objective)
# Set paths
data_path = self.mainDir + f'Data/Balanced_v{self.version}.csv'
# Only necessary for classification
if self.mode == 'classification' and self.balance:
if Path(data_path).exists():
# Load data
data = self._read_csv(data_path)
self.set_data(data)
if self.verbose > 0:
print('[AutoML] Loaded Balanced data')
else:
# Fit and resample
print('[AutoML] Resampling data')
x, y = self.dataSampler.fit_resample(self.x, self.y)
# Store
self._set_xy(x, y)
self._write_csv(self.data, data_path)
def _sequencing(self):
"""
Sequences the data. Useful mostly for problems where older samples play a role in future values.
The settings of this module are NOT AUTOMATIC
"""
self.dataSequencer = Sequencer(back=self.sequenceBack, forward=self.sequenceForward,
shift=self.sequenceShift, diff=self.sequenceDiff)
# Set paths
data_path = self.mainDir + f'Data/Sequence_v{self.version}.csv'
if self.sequence:
if Path(data_path).exists():
# Load data
data = self._read_csv(data_path)
self.set_data(data)
if self.verbose > 0:
print('[AutoML] Loaded Extracted Features')
else:
# Sequencing
print('[AutoML] Sequencing data')
x, y = self.dataSequencer.convert(self.x, self.y)
# Store
self._set_xy(x, y)
self._write_csv(self.data, data_path)
def _feature_processing(self):
"""
Organises feature processing. Heavy lifting is done in self.featureProcessor, but settings, etc.
needs to be organised.
"""
self.featureProcessor = FeatureProcessor(mode=self.mode, max_lags=self.maxLags, max_diff=self.maxDiff,
extract_features=self.extractFeatures, timeout=self.featureTimeout,
information_threshold=self.informationThreshold)
# Set paths
data_path = self.mainDir + f'Data/Extracted_v{self.version}.csv'
settings_path = self.mainDir + f'Settings/Extracting_v{self.version}.json'
if Path(data_path).exists() and Path(settings_path).exists():
# Loading data
x = self._read_csv(data_path)
self._set_x(x)
# Loading settings
self.settings['feature_processing'] = json.load(open(settings_path, 'r'))
self.featureProcessor.load_settings(self.settings['feature_processing'])
self.featureSets = self.settings['feature_processing']['featureSets']
if self.verbose > 0:
print('[AutoML] Loaded Extracted Features')
else:
print('[AutoML] Starting Feature Processor')
# Transform data
x, self.featureSets = self.featureProcessor.fit_transform(self.x, self.y)
# Store data
self._set_x(x)
self._write_csv(self.x, data_path)
# Save settings
self.settings['feature_processing'] = self.featureProcessor.get_settings()
json.dump(self.settings['feature_processing'], open(settings_path, 'w'))
def _interval_analysis(self):
"""
Interval-analyzes the data using ``Amplo.AutoML.IntervalAnalyser``
or resorts to pre-computed data, if present.
"""
# Skip analysis when analysis is not possible and/or not desired
is_interval_analyzable = len(self.x.index.names) == 2
if not (self.useIntervalAnalyser and is_interval_analyzable):
return
self.intervalAnalyser = IntervalAnalyser(target=self.target)
# Set paths
data_path = self.mainDir + f'Data/Interval_Analyzed_v{self.version}.csv'
settings_path = self.mainDir + f'Settings/Interval_Analysis_v{self.version}.json'
if Path(data_path).exists(): # TODO: and Path(settings_path).exists():
# Load data
data = self._read_csv(data_path)
self.set_data(data)
# TODO implement `IntervalAnalyser.load_settings` and add to `self.load_settings`
# # Load settings
# self.settings['interval_analysis'] = json.load(open(settings_path, 'r'))
# self.intervalAnalyser.load_settings(self.settings['interval_analysis'])
if self.verbose > 0:
print('[AutoML] Loaded interval-analyzed data')
else:
print(f'[AutoML] Interval-analyzing data')
# Transform data
data = self.intervalAnalyser.fit_transform(self.x, self.y)
# Store data
self.set_data(data)
self._write_csv(self.data, data_path)
# TODO implement `IntervalAnalyser.get_settings` and add to `self.get_settings`
# # Save settings
# self.settings['interval_analysis'] = self.intervalAnalyser.get_settings()
# json.dump(self.settings['interval_analysis'], open(settings_path, 'w'))
def _standardizing(self):
"""
Wrapper function to determine whether to fit or load
"""
# Return if standardize is off
if not self.standardize:
return
# Set paths
settings_path = self.mainDir + f'Settings/Standardize_v{self.version}.json'
if Path(settings_path).exists():
# Load data
self.settings['standardize'] = json.load(open(settings_path, 'r'))
else:
# Fit data
self._fit_standardize(self.x, self.y)
# Store Settings
json.dump(self.settings['standardize'], open(settings_path, 'w'))
# Transform data
x, y = self._transform_standardize(self.x, self.y)
self._set_xy(x, y)
def _initial_modelling(self):
"""
Runs various models to see which work well.
"""
# Set paths
results_path = Path(self.mainDir) / 'Results.csv'
# Load existing results
if results_path.exists():
# Load results
self.results = pd.read_csv(results_path)
# Printing here as we load it
results = self.results[np.logical_and(
self.results['version'] == self.version,
self.results['type'] == 'Initial modelling'
)]
for fs in set(results['dataset']):
print(f'[AutoML] Initial Modelling for {fs} ({len(self.featureSets[fs])})')
fsr = results[results['dataset'] == fs]
for i in range(len(fsr)):
row = fsr.iloc[i]
print(f'[AutoML] {row["model"].ljust(40)} {self.objective}: '
f'{row["mean_objective"]:.4f} \u00B1 {row["std_objective"]:.4f}')
# Check if this version has been modelled
if self.results is None or self.version not in self.results['version'].values:
# Iterate through feature sets
for feature_set, cols in self.featureSets.items():
# Skip empty sets
if len(cols) == 0:
print(f'[AutoML] Skipping {feature_set} features, empty set')
continue
print(f'[AutoML] Initial Modelling for {feature_set} features ({len(cols)})')
# Do the modelling
modeller = Modeller(mode=self.mode, shuffle=self.shuffle, store_models=self.storeModels,
objective=self.objective, dataset=feature_set,
store_results=False, folder=self.mainDir + 'Models/')
results = modeller.fit(self.x[cols], self.y)
# Add results to memory
results['type'] = 'Initial modelling'
results['version'] = self.version
if self.results is None:
self.results = results
else:
self.results = pd.concat([self.results, results])
# Save results
self.results.to_csv(results_path, index=False)
def grid_search(self, model=None, feature_set=None, parameter_set=None, **kwargs):
"""Runs a grid search.
By default, takes ``self.results`` and runs for the top ``n=self.gridSearchIterations`` optimizations.
There is the option to provide ``model`` and ``feature_set``, but **both** have to be provided. In this
case, the model and dataset combination will be optimized.
Implemented types, Base, Halving, Optuna.
Parameters
----------
model : list of (str or object) or object or str, optional
Which model to run grid search for.
feature_set : list of str or str, optional
Which feature set to run gid search for. Must be provided when `model` is not None.
Options: ``RFT``, ``RFI``, ``ShapThreshold`` or ``ShapIncrement``
parameter_set : dict, optional
Parameter grid to optimize over.
Notes
-----
When both parameters, ``model`` and ``feature_set``, are provided, the grid search behaves as follows:
- When both parameters are either of dtype ``str`` or have the same length, then grid search will
treat them as pairs.
- When one parameter is an iterable and the other parameter is either a string or an iterable
of different length, then grid search will happen for each unique combination of these parameters.
"""
# Skip grid search and set best initial model as best grid search parameters
if self.gridSearchType is None or self.gridSearchIterations == 0:
best_initial_model = self._sort_results(self.results[self.results['version'] == self.version]).iloc[:1]
best_initial_model['type'] = 'Hyper Parameter'
self.results = | pd.concat([self.results, best_initial_model], ignore_index=True) | pandas.concat |
import os
import numpy as np
import pandas as pd
import pickle
import glob
import shutil
import logging
import re, sys, joblib, bz2
import multiprocessing as mp
import tensorflow as tf
from joblib import Parallel, delayed
from Fuzzy_clustering.ver_tf2.CNN_tf_core_3d import CNN_3d
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
# from scipy.interpolate import interp2d
# from util_database import write_database
# from Fuzzy_clustering.ver_tf2.Forecast_model import forecast_model
from Fuzzy_clustering.ver_tf2.utils_for_forecast import split_continuous
from Fuzzy_clustering.ver_tf2.CNN_predict_3d import CNN_3d_predict
def optimize_cnn(cnn, kernels, hsize, cnn_max_iterations, cnn_learning_rate, gpu, filters):
flag = False
for _ in range(3):
try:
acc_old_cnn, scale_cnn, model_cnn = cnn.train_cnn(max_iterations=cnn_max_iterations,
learning_rate=cnn_learning_rate, kernels=kernels, h_size=hsize, gpu_id=gpu,filters=filters)
flag=True
except:
filters = int(filters/2)
pass
if not flag:
acc_old_cnn=np.inf
scale_cnn=None
model_cnn=None
return acc_old_cnn, kernels, hsize, scale_cnn, model_cnn, cnn.pool_size, cnn.trial, cnn_learning_rate
def predict(q, H, model):
tf.config.set_soft_device_placement(True)
pred = model.predict(H)
q.put((pred[0]))
class cnn_3d_model():
def __init__(self, static_data, rated, cluster_dir):
self.static_data_all = static_data
self.static_data = static_data['CNN']
self.rated = rated
self.cluster = os.path.basename(cluster_dir)
self.cluster_cnn_dir = os.path.join(cluster_dir, 'CNN_3d')
self.model_dir = os.path.join(self.cluster_cnn_dir, 'model')
self.cluster_dir = cluster_dir
self.istrained = False
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
try:
self.load(self.model_dir)
except:
pass
def train_cnn(self, X, y):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(self.model_dir, 'log_train_' + self.cluster + '.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
print('CNN training...begin for %s ', self.cluster)
logger.info('CNN training...begin for %s ', self.cluster)
if len(y.shape)==1:
y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = split_continuous(X, y, test_size=0.15, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.15, random_state=42)
results =[]
for trial in [0, 3]:
if trial != 0:
pool_size = [1, 2, 2]
else:
pool_size = [2, 1]
cnn = CNN_3d(self.static_data, self.rated, X_train, y_train, X_val, y_val, X_test, y_test, pool_size, trial=trial)
self.acc_cnn = np.inf
gpus = np.tile(self.static_data['gpus'], 4)
if trial==0:
kernels=[
# [2, 2],
[2, 4],
[4, 2],
# [4, 4]
]
else:
kernels = [
[2, 4, 4],
# [2, 2, 2],
[3, 2, 2],
# [3, 4, 4]
]
# res = optimize_cnn(cnn, kernels[0], self.static_data['h_size'],
# self.static_data['max_iterations'],
# self.static_data['learning_rate'],
# gpus[0],int(self.static_data['filters']))
res = Parallel(n_jobs=len(self.static_data['gpus']))(
delayed(optimize_cnn)(cnn, kernels[k], self.static_data['h_size'],
self.static_data['max_iterations'],
self.static_data['learning_rate'],
gpus[int(k)], int(self.static_data['filters'])) for k in range(2))
results += res
for r in results:
logger.info("kernel: %s accuracy cnn: %s", r[1], r[0])
acc_cnn = np.array([r[0] for r in results])
self.acc_cnn, self.best_kernel, hsize, self.scale_cnn, model_cnn, self.pool_size, self.trial, lr= results[acc_cnn.argmin()]
self.model = model_cnn
train_res = | pd.DataFrame.from_dict(model_cnn['error_func'], orient='index') | pandas.DataFrame.from_dict |
from flask import Flask, redirect,url_for, render_template, request
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import datetime
import pickle
from pandas import to_datetime
app = Flask(__name__)
@app.route("/home")
@app.route("/")
def home():
return render_template("home.html")
@app.route("/kerala" ,methods=["POST", "GET"])
def kerala():
if request.method == "POST":
date = list()
dat=request.form["nm"]
date.append([dat])
date = pd.DataFrame(date)
date.columns = ['ds']
date['ds'] = to_datetime(date['ds'])
with open('models/Kerala.pkl', 'rb') as f:
model = pickle.load(f)
forecast = model.predict(date)
x=forecast.iloc[ : ,3].values
return redirect(url_for("predict",pred=x))
else:
return render_template("state.html")
@app.route("/tamilnadu" ,methods=["POST", "GET"])
def tn():
if request.method == "POST":
date = list()
dat=request.form["nm"]
date.append([dat])
date = pd.DataFrame(date)
date.columns = ['ds']
date['ds'] = to_datetime(date['ds'])
with open('models/TamilNadu.pkl', 'rb') as f:
model = pickle.load(f)
forecast = model.predict(date)
x=forecast.iloc[ : ,3].values
return redirect(url_for("predict",pred=x))
else:
return render_template("state.html")
@app.route("/karnataka" ,methods=["POST", "GET"])
def kn():
if request.method == "POST":
date = list()
dat=request.form["nm"]
date.append([dat])
date = pd.DataFrame(date)
date.columns = ['ds']
date['ds'] = to_datetime(date['ds'])
with open('models/Karnataka.pkl', 'rb') as f:
model = pickle.load(f)
forecast = model.predict(date)
x=forecast.iloc[ : ,3].values
return redirect(url_for("predict",pred=x))
else:
return render_template("state.html")
@app.route("/westbengal" ,methods=["POST", "GET"])
def wb():
if request.method == "POST":
date = list()
dat=request.form["nm"]
date.append([dat])
date = pd.DataFrame(date)
date.columns = ['ds']
date['ds'] = to_datetime(date['ds'])
with open('models/WestBengal.pkl', 'rb') as f:
model = pickle.load(f)
forecast = model.predict(date)
x=forecast.iloc[ : ,3].values
return redirect(url_for("predict",pred=x))
else:
return render_template("state.html")
@app.route("/telangana" ,methods=["POST", "GET"])
def telangana():
if request.method == "POST":
date = list()
dat=request.form["nm"]
date.append([dat])
date = pd.DataFrame(date)
date.columns = ['ds']
date['ds'] = to_datetime(date['ds'])
with open('models/Telangana.pkl', 'rb') as f:
model = pickle.load(f)
forecast = model.predict(date)
x=forecast.iloc[ : ,3].values
return redirect(url_for("predict",pred=x))
else:
return render_template("state.html")
@app.route("/rajasthan" ,methods=["POST", "GET"])
def rajasth():
if request.method == "POST":
date = list()
dat=request.form["nm"]
date.append([dat])
date = pd.DataFrame(date)
date.columns = ['ds']
date['ds'] = to_datetime(date['ds'])
with open('models/Rajasthan.pkl', 'rb') as f:
model = pickle.load(f)
forecast = model.predict(date)
x=forecast.iloc[ : ,3].values
return redirect(url_for("predict",pred=x))
else:
return render_template("state.html")
@app.route("/maharashtra" ,methods=["POST", "GET"])
def mh():
if request.method == "POST":
date = list()
dat=request.form["nm"]
date.append([dat])
date = pd.DataFrame(date)
date.columns = ['ds']
date['ds'] = to_datetime(date['ds'])
with open('models/maharashtra.pkl', 'rb') as f:
model = pickle.load(f)
forecast = model.predict(date)
x=forecast.iloc[ : ,3].values
return redirect(url_for("predict",pred=x))
else:
return render_template("state.html")
@app.route("/gujarat" ,methods=["POST", "GET"])
def gujarat():
if request.method == "POST":
date = list()
dat=request.form["nm"]
date.append([dat])
date = pd.DataFrame(date)
date.columns = ['ds']
date['ds'] = to_datetime(date['ds'])
with open('models/Gujarat.pkl', 'rb') as f:
model = pickle.load(f)
forecast = model.predict(date)
x=forecast.iloc[ : ,3].values
return redirect(url_for("predict",pred=x))
else:
return render_template("state.html")
@app.route("/delhi" ,methods=["POST", "GET"])
def delhi():
if request.method == "POST":
date = list()
dat=request.form["nm"]
date.append([dat])
date = pd.DataFrame(date)
date.columns = ['ds']
date['ds'] = to_datetime(date['ds'])
with open('models/Delhi.pkl', 'rb') as f:
model = pickle.load(f)
forecast = model.predict(date)
x=forecast.iloc[ : ,3].values
return redirect(url_for("predict",pred=x))
else:
return render_template("state.html")
@app.route("/bihar" ,methods=["POST", "GET"])
def bihar():
if request.method == "POST":
date = list()
dat=request.form["nm"]
date.append([dat])
date = pd.DataFrame(date)
date.columns = ['ds']
date['ds'] = to_datetime(date['ds'])
with open('models/Bihar.pkl', 'rb') as f:
model = pickle.load(f)
forecast = model.predict(date)
x=forecast.iloc[ : ,3].values
return redirect(url_for("predict",pred=x))
else:
return render_template("state.html")
@app.route("/andhrapradesh" ,methods=["POST", "GET"])
def ap():
if request.method == "POST":
date = list()
dat=request.form["nm"]
date.append([dat])
date = pd.DataFrame(date)
date.columns = ['ds']
date['ds'] = to_datetime(date['ds'])
with open('models/AndhraPradesh.pkl', 'rb') as f:
model = pickle.load(f)
forecast = model.predict(date)
x=forecast.iloc[ : ,3].values
return redirect(url_for("predict",pred=x))
else:
return render_template("state.html")
@app.route("/goa" ,methods=["POST", "GET"])
def goa():
if request.method == "POST":
date = list()
dat=request.form["nm"]
date.append([dat])
date = pd.DataFrame(date)
date.columns = ['ds']
date['ds'] = | to_datetime(date['ds']) | pandas.to_datetime |
#%% [markdown]
# # Author : <NAME>
# ***
# ## Capstone Project for Qualifying IBM Data Science Professional Certification
# ***
#%% [markdown]
#
# # Import Packages
#
#%%
import numpy as np # library to handle data in a vectorized manner
import pandas as pd # library for data analsysis
| pd.set_option('display.max_columns', None) | pandas.set_option |
# Tests aimed at pandas.core.indexers
import numpy as np
import pytest
from pandas.core.indexers import is_scalar_indexer, length_of_indexer, validate_indices
def test_length_of_indexer():
arr = np.zeros(4, dtype=bool)
arr[0] = 1
result = length_of_indexer(arr)
assert result == 1
def test_is_scalar_indexer():
indexer = (0, 1)
assert is_scalar_indexer(indexer, 2)
assert not is_scalar_indexer(indexer[0], 2)
indexer = (np.array([2]), 1)
assert is_scalar_indexer(indexer, 2)
indexer = (np.array([2]), np.array([3]))
assert is_scalar_indexer(indexer, 2)
indexer = (np.array([2]), np.array([3, 4]))
assert not is_scalar_indexer(indexer, 2)
assert not is_scalar_indexer(slice(None), 1)
indexer = 0
assert is_scalar_indexer(indexer, 1)
indexer = (0,)
assert is_scalar_indexer(indexer, 1)
class TestValidateIndices:
def test_validate_indices_ok(self):
indices = np.asarray([0, 1])
validate_indices(indices, 2)
validate_indices(indices[:0], 0)
validate_indices(np.array([-1, -1]), 0)
def test_validate_indices_low(self):
indices = np.asarray([0, -2])
with pytest.raises(ValueError, match="'indices' contains"):
validate_indices(indices, 2)
def test_validate_indices_high(self):
indices = np.asarray([0, 1, 2])
with pytest.raises(IndexError, match="indices are out"):
| validate_indices(indices, 2) | pandas.core.indexers.validate_indices |
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
from typing import List
import numpy as np
import pandas as pd
import pytest
from psbutils.arrayshapes import CHAIN_LENGTH_KEY, Bindings, ShapeInferrer, Shapes
A2 = np.zeros((2,), dtype=np.int32)
A24 = np.zeros((2, 4), dtype=np.float64)
A2468 = np.zeros((2, 4, 6, 8))
A146 = np.zeros((1, 4, 6))
def test_shapes_basic():
Shapes(A2, "2") # OK
Shapes()(A2, "2") # initialize empty and add __call__
Shapes(["foo", "bar"], "2") # OK; list can be coerced to array
Shapes([["foo", "bar"]], "1, 2") # OK; list can be coerced to array, spaces ignored
with pytest.raises(ValueError):
Shapes(A2, "3,") # 2 != 3; final comma ignored
def test_shapes2():
Shapes(A2, "X") # trivially OK: X is bound to 2
assert Shapes(A24, "X,Y").bindings == {"X": 2, "Y": 4} # trivially OK: X is bound to 2, Y is bound to 4
with pytest.raises(ValueError):
Shapes(A24, "X,X") # X is bound to 2, so cannot be re-bound to 4
Shapes(A24, ("X, 2*X")) # OK: X is bound to 2, 2*X evaluates to 4
Shapes(A24, "X, (X+6)/2") # OK: X is bound to 2, (2+6)/2 = 4.
Shapes(A24, "X, (X+6)/2") # OK: X is bound to 2, (2+6)/2 = 4.
def test_shapes_bad_syntax():
with pytest.raises(ValueError):
Shapes(A24, "X, 2*Y") # Y is part of an expression but has not been bound yet
with pytest.raises(ValueError):
Shapes(A24, "X,Foo*2") # variables in expressions have to be single characters, not splice variables
with pytest.raises(ValueError):
Shapes(A2, "X|Y") # "|" is not a valid operator
with pytest.raises(ValueError):
Shapes(A24, "X, (X+6/2") # missing closing parenthesis
with pytest.raises(ValueError):
Shapes(A24, "X,X/0") # division by zero not allowed
def test_shapes4():
with pytest.raises(ValueError):
Shapes(A2, "X")(A24, "Y,X") # bindings apply across different arrays
Shapes(A2, "X")(A24, "Y,X*2") # OK; X=2, so X*2=4 is OK in second array
with pytest.raises(ValueError):
Shapes(None, "X") # cannot have shape without array
with pytest.raises(ValueError):
Shapes(A2) # cannot have array without shape
def test_shapes5():
Shapes(A24, "X, 2*X", [np.int32, np.float64])
with pytest.raises(TypeError):
Shapes(A24, "X, 2*X", np.int32)
def test_shapes_splice_variables():
assert Shapes(A2468, "XX,Y,Z").bindings == {"XX": (2, 4), "Y": 6, "Z": 8}
assert Shapes(A2468, "X,YY,Z").bindings == {"X": 2, "YY": (4, 6), "Z": 8}
assert Shapes(A2468, "X,Y,ZZ").bindings == {"X": 2, "Y": 4, "ZZ": (6, 8)}
assert Shapes(A24, "X,YY,Z").bindings == {"X": 2, "YY": (), "Z": 4}
with pytest.raises(ValueError):
Shapes(A2468, "XX, YY, Z") # multiple splice variables
with pytest.raises(ValueError):
Shapes(A2, "X, YY, Z") # too many variables even if YY is empty
Shapes(A2468, "X,YY,Z")(A146, "1,YY") # YY expands to 4,6 in both expressions
def test_shapes_where():
Shapes(A2, "X")(A24, "X,Y").where(lambda bdgs: bdgs["Y"] == 2 * bdgs["X"])
with pytest.raises(ValueError):
Shapes(A2, "X")(A24, "X,Y").where(lambda bdgs: bdgs["Y"] == 3 * bdgs["X"])
def test_shapes_infer():
Shapes.clear()
for n in range(1, 5):
Shapes(np.concatenate([A2] * n), "X")(np.concatenate([A24] * n), "Y,Z")
lines = Shapes.infer()
assert len(lines) == 3 # lines[0] is header line
assert lines[1].endswith(": Y=X")
assert lines[2].endswith(": Z=4")
def infer_for_bindings(blist: List[Bindings]) -> List[str]:
return ShapeInferrer(blist).constraints()
def test_shapes_infer_for_bindings():
blist: List[Bindings] = []
assert infer_for_bindings(blist) == []
blist = [{"X": 1, "Y": 1}, {"X": 2, "Y": 2}, {"X": 5, "Y": 5}]
assert infer_for_bindings(blist) == ["Y=X"]
blist = [{"X": 1, "Y": 1}, {"X": 2, "Y": 1}]
assert infer_for_bindings(blist) == ["Y=1"]
blist = [{"X": 1, "Y": 1}]
assert infer_for_bindings(blist) == [] # don't infer anything from a single instance
blist = [{"X": 1, "Y": 1}, {"X": 2, "Y": 3}]
assert infer_for_bindings(blist) == ["Y=2*X-1"]
blist = [
{"X": 1, "Y": 2},
{"X": 2, "Y": 4},
{"X": 3, "Y": 6},
]
assert infer_for_bindings(blist) == ["Y=2*X"]
blist = [
{"X": 1, "Y": 2},
{"X": 2, "Y": 5},
{"X": 3, "Y": 6},
]
assert infer_for_bindings(blist) == [] # because not linear
blist = [
{"X": 1, "Y": 2, "Z": 2},
{"X": 2, "Y": 4, "Z": 3},
{"X": 3, "Y": 6, "Z": 4},
]
assert infer_for_bindings(blist) == ["Y=2*X", "Z=X+1"]
blist = [
{"X": 2, "Y": 3, "Z": 6},
{"X": 2, "Y": 4, "Z": 8},
{"X": 3, "Y": 5, "Z": 15},
{"X": 5, "Y": 7, "Z": 35},
]
assert infer_for_bindings(blist) == ["X*Y=Z"]
def test_shapes_infer_for_bindings_splice_variables():
blist: List[Bindings] = [{"X": 2, "YY": (3, 4)}, {"X": 5, "YY": (6, 4)}]
assert infer_for_bindings(blist) == ["dims(YY)=2", "first(YY)=X+1", "last(YY)=4", "prod(YY)=4*X+4"]
blist = [{"YY": (3, 4), "ZZ": (3, 4)}, {"YY": (6, 4), "ZZ": (6, 4)}]
assert infer_for_bindings(blist) == [
"ZZ=YY",
"dims(YY)=2",
"last(YY)=4",
]
def test_shape_inferrer_bindings():
blist: List[Bindings] = [{"X": 2, "YY": (3, 4)}, {"X": 5, "YY": (6, 4)}]
# Projections of the splice variable YY:
assert ShapeInferrer(blist).bindings == [
{"X": 2, "YY": (3, 4), "dims(YY)": 2, "first(YY)": 3, "last(YY)": 4, "prod(YY)": 12},
{"X": 5, "YY": (6, 4), "dims(YY)": 2, "first(YY)": 6, "last(YY)": 4, "prod(YY)": 24},
]
blist = [{"X": 2, "Y": 5}]
# Product of the ordinary variables X and Y:
assert ShapeInferrer(blist).bindings == [{"X": 2, "Y": 5, "X*Y": 10}]
# Bindings with non-maximal chain length omitted, and chain length item discarded:
blist = [{"X": 2, "Y": 5, CHAIN_LENGTH_KEY: 2}, {"X": 3, CHAIN_LENGTH_KEY: 1}]
assert ShapeInferrer(blist).bindings == [{"X": 2, "Y": 5, "X*Y": 10}]
def alternator(arr):
"""
Returns an object consisting of the even-numbered members of arr, whatever its type and dimensionality.
Raises an exception if there is an odd number of members.
"""
assert len(arr) % 2 == 0
if isinstance(arr, list):
return [arr[i] for i in range(0, len(arr), 2)]
return arr[range(0, len(arr), 2)]
def test_shape_inference_process():
arrays = [
np.zeros((4)),
[0] * 6, # verify Shapes works with lists as well as numpy arrays
| pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) | pandas.DataFrame |
import numpy as np
from pandas import DataFrame, Series
import pandas as pd
from utilities import (LICENSE_KEY, generate_token, master_player_lookup,
YAHOO_FILE, YAHOO_KEY, YAHOO_SECRET)
import json
from yahoo_oauth import OAuth2
from pathlib import Path
# store credentials if don't already exist
if not Path(YAHOO_FILE).exists():
yahoo_credentials_dict = {
'consumer_key': YAHOO_KEY,
'consumer_secret': YAHOO_SECRET,
}
with open(YAHOO_FILE) as f:
json.dump(yahoo_credentials_dict, f)
OAUTH = OAuth2(None, None, from_file=YAHOO_FILE)
game_url = 'https://fantasysports.yahooapis.com/fantasy/v2/game/nfl'
OAUTH.session.get(game_url, params={'format': 'json'}).json()
###############################################################################
# roster data
###############################################################################
LEAGUE_ID = 43886
TEAM_ID = 11
WEEK = 1
roster_url = ('https://fantasysports.yahooapis.com/fantasy/v2' +
f'/team/406.l.{LEAGUE_ID}.t.{TEAM_ID}/roster;week={WEEK}')
# gets current data
# should run/look at, but we're overwriting with saved data next line
roster_json = OAUTH.session.get(roster_url, params={'format': 'json'}).json()
with open('./projects/integration/raw/yahoo/roster.json') as f:
roster_json = json.load(f)
# open up in browser and look at it
players_dict = (
roster_json['fantasy_content']['team'][1]['roster']['0']['players'])
players_dict.keys()
player0 = players_dict['0'] # <NAME>
def player_list_to_dict(player):
player_info = player['player'][0]
player_info_dict = {}
for x in player_info:
if (type(x) is dict) and (len(x.keys()) == 1):
for key in x.keys(): # tricky way to get access to key
player_info_dict[key] = x[key]
return player_info_dict
player_list_to_dict(player0)
def process_player(player):
player_info = player_list_to_dict(player)
pos_info = player['player'][1]['selected_position'][1]
dict_to_return = {}
dict_to_return['yahoo_id'] = int(player_info['player_id'])
dict_to_return['name'] = player_info['name']['full']
dict_to_return['player_position'] = player_info['primary_position']
dict_to_return['team_position'] = pos_info['position']
return dict_to_return
process_player(player0)
[process_player(player) for key, player
in players_dict.items() if key != 'count']
players_df = DataFrame(
[process_player(player) for key, player in players_dict.items() if key !=
'count' ])
players_df
wrs = players_df.query("team_position == 'WR'")
wrs
suffix = Series(range(1, len(wrs) + 1), index=wrs.index)
suffix
wrs['team_position'] + suffix.astype(str)
def add_pos_suffix(df_subset):
if len(df_subset) > 1:
suffix = Series(range(1, len(df_subset) + 1), index=df_subset.index)
df_subset['team_position'] = (
df_subset['team_position'] + suffix.astype(str))
return df_subset
players_df2 = pd.concat([
add_pos_suffix(players_df.query(f"team_position == '{x}'"))
for x in players_df['team_position'].unique()])
players_df2
players_df2['start'] = ~(players_df2['team_position'].str.startswith('BN') |
players_df2['team_position'].str.startswith('IR'))
players_df2
def process_players(players):
players_raw = DataFrame(
[process_player(player) for key, player in players.items() if key !=
'count' ])
players_df = pd.concat([
add_pos_suffix(players_raw.query(f"team_position == '{x}'"))
for x in players_raw['team_position'].unique()])
players_df['start'] = ~(players_df['team_position'].str.startswith('BN') |
players_df['team_position'].str.startswith('IR'))
return players_df
process_players(players_dict)
# players_dict = roster_json['fantasy_content']['team'][1]['roster']['0']['players']
team_id = roster_json['fantasy_content']['team'][0][1]['team_id']
players_df2['team_id'] = team_id
def process_roster(team):
players_df = process_players(team[1]['roster']['0']['players'])
team_id = team[0][1]['team_id']
players_df['team_id'] = team_id
return players_df
roster_df = process_roster(roster_json['fantasy_content']['team'])
points_url = ('https://fantasysports.yahooapis.com/fantasy/v2/' +
f'team/406.l.{LEAGUE_ID}.t.{TEAM_ID}' +
"/players;out=metadata,stats,ownership,percent_owned,draft_analysis")
# gets current data
# should run/look at, but we're overwriting with saved data next line
points_json = OAUTH.session.get(points_url, params={'format': 'json'}).json()
# saved data
with open('./projects/integration/raw/yahoo/points.json') as f:
points_json = json.load(f)
player_dict = points_json['fantasy_content']['team'][1]['players']
gronk = player_dict['1']
def process_player_stats(player):
dict_to_return = {}
dict_to_return['yahoo_id'] = int(player['player'][0][1]['player_id'])
dict_to_return['actual'] = float(
player['player'][1]['player_points']['total'])
return dict_to_return
process_player_stats(gronk)
def process_team_stats(team):
stats = DataFrame([process_player_stats(player) for key, player in
team.items() if key != 'count'])
stats.loc[stats['actual'] == 0, 'actual'] = np.nan
return stats
stats = process_team_stats(player_dict)
roster_df_w_stats = | pd.merge(roster_df, stats) | pandas.merge |
import requests
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
import pandas as pd
def query(addr):
r = requests.post(
'http://citizenatlas.dc.gov/newwebservices/locationverifier.asmx/findLocation2',
data={'f': 'json', 'str': addr})
r.raise_for_status()
return r.json()
def query_schools(x,y):
max_attempts = 10
attempts = 0
s = requests.Session()
url = 'http://geospatial.dcgis.dc.gov/SchoolsWebService/sy19-20/getSchools.asmx/findSchoolsNew?x={}&y={}&f=json'.format(x,y)
retries = Retry(total=5, backoff_factor = 1, status_forcelist=[500, 502, 503, 504])
s.mount('http://', HTTPAdapter(max_retries=retries))
r = s.get(url)
return r.json()
def add_schools(row):
schools = query_schools(row['XCOORD'],row['YCOORD'])
return pd.Series([schools['newBoundarySchools']['inBoundaryElementarySchool'],
schools['newBoundarySchools']['inBoundaryMiddleSchool'],
schools['newBoundarySchools']['inBoundaryHighSchool']])
if __name__ == "__main__":
df = | pd.read_csv('Address_Points.csv') | pandas.read_csv |
"""
The wntr.metrics.misc module contains metrics that do not fall into the
topographic, hydraulic, water quality, water security, or economic categories.
"""
from wntr.network import Junction
import pandas as pd
import numpy as np
import sys
import logging
if sys.version_info >= (3,0):
from functools import reduce
logger = logging.getLogger(__name__)
def query(arg1, operation, arg2):
"""
Return a boolean mask using comparison operators, i.e. "arg1 operation arg2".
For example, find the node-time pairs when demand < 90% expected demand.
Parameters
-----------
arg1 : pd.Panel, pd.DataFrame, pd.Series, np.array, list, scalar
Argument 1
operation : numpy.ufunc
Numpy universal comparison function, options = np.greater,
np.greater_equal, np.less, np.less_equal, np.equal, np.not_equal
arg2 : same size and type as arg1, or a scalar
Argument 2
Returns
-------
mask : same size and type as arg1
contains bool
"""
try:
mask = operation(arg1, arg2)
except AttributeError:
logger.error('operation(arg1, arg2) failed')
return mask
def average_water_consumed(wn):
"""
Compute average water consumed at each node, qbar, computed as follows:
.. math:: qbar=\dfrac{\sum_{k=1}^{K}\sum_{t=1}^{lcm_n}qbase_n m_n(k,t mod (L(k)))}{lcm_n}
where
:math:`K` is the number of demand patterns at node :math:`n`,
:math:`L(k)` is the number of time steps in pattern :math:`k`,
:math:`lcm_n` is the least common multiple of the demand patterns time steps for node :math:`n`,
:math:`qbase_n` is the base demand at node :math:`n` and
:math:`m_n(k,t mod L(k))` is the demand multiplier specified in pattern :math:`k` for node :math:`n` at time :math:`t mod L(k)`.
For example, if a node has two demand patterns specified in the EPANET input (INP) file, and
one pattern repeats every 6 hours and the other repeats every 12 hours, the first
pattern will be repeated once, making its total duration effectively 12 hours.
If any :math:`m_n(k,t mod L(k))` value is less than 0, then that node's population is 0.
Parameters
-----------
wn : WaterNetworkModel
Returns
-------
qbar : pd.Series
A pandas Series that contains average water consumed per node, in m3/s
"""
qbar = | pd.Series() | pandas.Series |
import datetime
import json
import numpy as np
import requests
import pandas as pd
import streamlit as st
from copy import deepcopy
from fake_useragent import UserAgent
import webbrowser
from footer_utils import image, link, layout, footer
service_input = st.selectbox('Select Service',["","CoWin Vaccine Slot","Oxygen","Beds","Ambulance","Medicines","Miscellaneous","Important Links"])
if service_input =="CoWin Vaccine Slot":
temp_user_agent = UserAgent()
browser_header = {'User-Agent': temp_user_agent.random}
st.title("Vacciation Slot Availability")
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def import_dataset():
df = pd.read_csv("Combined_List.csv")
return df
def district_mapping(state_inp,df):
return list(df[df['State_Name']==state_inp]['District_Name'])
def column_mapping(df,col,value):
df_temp = deepcopy(df.loc[df[col] == value, :])
return df_temp
def availability_check(df,col,value):
df_temp2 = deepcopy(df.loc[df[col]>value, :])
return df_temp2
@st.cache(allow_output_mutation=True)
def Pageviews():
return []
mapping_df= import_dataset()
state_name = list((mapping_df['State_Name'].sort_values().unique()))
district_name = list((mapping_df['District_Name'].sort_values().unique()))
age = [18,45]
date_input = st.sidebar.slider('Select Date Range', min_value=0, max_value=50)
state_input = st.sidebar.selectbox('Select State',state_name)
district_input = st.sidebar.selectbox('Select District',district_mapping(state_input,mapping_df))
age_input = st.sidebar.selectbox('Select Minimum Age',[""]+age)
fee_input = st.sidebar.selectbox('Select Free or Paid',[""]+['Free','Paid'])
vaccine_input = st.sidebar.selectbox("Select Vaccine",[""]+['COVISHIELD','COVAXIN'])
available_input = st.sidebar.selectbox("Select Availability",[""]+['Available'])
col_rename = {
'date': 'Date',
'min_age_limit': 'Minimum Age Limit',
'available_capacity': 'Available Capacity',
'vaccine': 'Vaccine',
'pincode': 'Pincode',
'name': 'Hospital Name',
'state_name' : 'State',
'district_name' : 'District',
'block_name': 'Block Name',
'fee_type' : 'Fees'
}
DIST_ID = mapping_df[mapping_df['District_Name']==district_input]['District_ID'].values[0]
base_date = datetime.datetime.today()
date_list = [base_date+ datetime.timedelta(days = x) for x in range(date_input+1)]
date_string = [i.strftime('%d-%m-%y') for i in date_list]
final_df =None
for INP_DATE in date_string:
URL = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id={}&date={}".format(DIST_ID, INP_DATE)
data = requests.get(URL,headers = browser_header)
if (data.ok) and ('centers' in data.json()):
data_json = data.json()['centers']
if data_json is not None:
data_df = | pd.DataFrame(data_json) | pandas.DataFrame |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import mul
from logbook import Logger
import numpy as np
from numpy import float64, int64, nan
import pandas as pd
from pandas import isnull
from pandas.core.tools.datetimes import normalize_date
from six import iteritems
from six.moves import reduce
from zipline.assets import (
Asset,
AssetConvertible,
Equity,
Future,
PricingDataAssociable,
)
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.data.continuous_future_reader import (
ContinuousFutureSessionBarReader,
ContinuousFutureMinuteBarReader
)
from zipline.assets.roll_finder import (
CalendarRollFinder,
VolumeRollFinder
)
from zipline.data.dispatch_bar_reader import (
AssetDispatchMinuteBarReader,
AssetDispatchSessionBarReader
)
from zipline.data.resample import (
DailyHistoryAggregator,
ReindexMinuteBarReader,
ReindexSessionBarReader,
)
from zipline.data.history_loader import (
DailyHistoryLoader,
MinuteHistoryLoader,
)
from zipline.data.us_equity_pricing import NoDataOnDate
from zipline.utils.math_utils import (
nansum,
nanmean,
nanstd
)
from zipline.utils.memoize import remember_last, weak_lru_cache
from zipline.utils.pandas_utils import timedelta_to_integral_minutes
from zipline.errors import (
NoTradeDataAvailableTooEarly,
NoTradeDataAvailableTooLate,
HistoryWindowStartsBeforeData,
)
log = Logger('DataPortal')
# 可供查询(资产、时间)组合所对应值的字段
BASE_FIELDS = frozenset([
"open",
"high",
"low",
"close",
"volume",
"price",
"contract",
"sid",
"last_traded",
"amount",
"turnover",
"cmv",
"tmv",
"prev_close",
"change_pct",
])
# # 对于股票日线原始数据,已经对数据进行了预处理,无需ffill
OHLCV_FIELDS = frozenset([
"open", "high", "low", "close", "volume",
"amount","turnover","cmv","tmv", "prev_close","change_pct",
])
# # 此前已经对不同的列是否参与调整进行了处理,无需额外控制
OHLCVP_FIELDS = frozenset([
"open", "high", "low", "close", "volume", "price",
"amount","turnover","cmv","tmv", "prev_close","change_pct",
])
HISTORY_FREQUENCIES = set(["1m", "1d"])
DEFAULT_MINUTE_HISTORY_PREFETCH = 1560
DEFAULT_DAILY_HISTORY_PREFETCH = 40
_DEF_M_HIST_PREFETCH = DEFAULT_MINUTE_HISTORY_PREFETCH
_DEF_D_HIST_PREFETCH = DEFAULT_DAILY_HISTORY_PREFETCH
class DataPortal(object):
"""Interface to all of the data that a zipline simulation needs.
This is used by the simulation runner to answer questions about the data,
like getting the prices of assets on a given day or to service history
calls.
Parameters
----------
asset_finder : zipline.assets.assets.AssetFinder
The AssetFinder instance used to resolve assets.
trading_calendar: zipline.utils.calendar.exchange_calendar.TradingCalendar
The calendar instance used to provide minute->session information.
first_trading_day : pd.Timestamp
The first trading day for the simulation.
equity_daily_reader : BcolzDailyBarReader, optional
The daily bar reader for equities. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
equity_minute_reader : BcolzMinuteBarReader, optional
The minute bar reader for equities. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
future_daily_reader : BcolzDailyBarReader, optional
The daily bar ready for futures. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
future_minute_reader : BcolzFutureMinuteBarReader, optional
The minute bar reader for futures. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
adjustment_reader : SQLiteAdjustmentWriter, optional
The adjustment reader. This is used to apply splits, dividends, and
other adjustment data to the raw data from the readers.
last_available_session : pd.Timestamp, optional
The last session to make available in session-level data.
last_available_minute : pd.Timestamp, optional
The last minute to make available in minute-level data.
"""
def __init__(self,
asset_finder,
trading_calendar,
first_trading_day,
equity_daily_reader=None,
equity_minute_reader=None,
future_daily_reader=None,
future_minute_reader=None,
adjustment_reader=None,
last_available_session=None,
last_available_minute=None,
minute_history_prefetch_length=_DEF_M_HIST_PREFETCH,
daily_history_prefetch_length=_DEF_D_HIST_PREFETCH):
self.trading_calendar = trading_calendar
self.asset_finder = asset_finder
self._adjustment_reader = adjustment_reader
# caches of sid -> adjustment list
self._splits_dict = {}
self._mergers_dict = {}
self._dividends_dict = {}
# Cache of sid -> the first trading day of an asset.
self._asset_start_dates = {}
self._asset_end_dates = {}
# Handle extra sources, like Fetcher.
self._augmented_sources_map = {}
self._extra_source_df = None
self._first_available_session = first_trading_day
if last_available_session:
self._last_available_session = last_available_session
else:
# Infer the last session from the provided readers.
last_sessions = [
reader.last_available_dt
for reader in [equity_daily_reader, future_daily_reader]
if reader is not None
]
if last_sessions:
self._last_available_session = min(last_sessions)
else:
self._last_available_session = None
if last_available_minute:
self._last_available_minute = last_available_minute
else:
# Infer the last minute from the provided readers.
last_minutes = [
reader.last_available_dt
for reader in [equity_minute_reader, future_minute_reader]
if reader is not None
]
if last_minutes:
self._last_available_minute = min(last_minutes)
else:
self._last_available_minute = None
aligned_equity_minute_reader = self._ensure_reader_aligned(
equity_minute_reader)
aligned_equity_session_reader = self._ensure_reader_aligned(
equity_daily_reader)
aligned_future_minute_reader = self._ensure_reader_aligned(
future_minute_reader)
aligned_future_session_reader = self._ensure_reader_aligned(
future_daily_reader)
self._roll_finders = {
'calendar': CalendarRollFinder(self.trading_calendar,
self.asset_finder),
}
aligned_minute_readers = {}
aligned_session_readers = {}
if aligned_equity_minute_reader is not None:
aligned_minute_readers[Equity] = aligned_equity_minute_reader
if aligned_equity_session_reader is not None:
aligned_session_readers[Equity] = aligned_equity_session_reader
if aligned_future_minute_reader is not None:
aligned_minute_readers[Future] = aligned_future_minute_reader
aligned_minute_readers[ContinuousFuture] = \
ContinuousFutureMinuteBarReader(
aligned_future_minute_reader,
self._roll_finders,
)
if aligned_future_session_reader is not None:
aligned_session_readers[Future] = aligned_future_session_reader
self._roll_finders['volume'] = VolumeRollFinder(
self.trading_calendar,
self.asset_finder,
aligned_future_session_reader,
)
aligned_session_readers[ContinuousFuture] = \
ContinuousFutureSessionBarReader(
aligned_future_session_reader,
self._roll_finders,
)
_dispatch_minute_reader = AssetDispatchMinuteBarReader(
self.trading_calendar,
self.asset_finder,
aligned_minute_readers,
self._last_available_minute,
)
_dispatch_session_reader = AssetDispatchSessionBarReader(
self.trading_calendar,
self.asset_finder,
aligned_session_readers,
self._last_available_session,
)
self._pricing_readers = {
'minute': _dispatch_minute_reader,
'daily': _dispatch_session_reader,
}
self._daily_aggregator = DailyHistoryAggregator(
self.trading_calendar.schedule.market_open,
_dispatch_minute_reader,
self.trading_calendar
)
self._history_loader = DailyHistoryLoader(
self.trading_calendar,
_dispatch_session_reader,
self._adjustment_reader,
self.asset_finder,
self._roll_finders,
prefetch_length=daily_history_prefetch_length,
)
self._minute_history_loader = MinuteHistoryLoader(
self.trading_calendar,
_dispatch_minute_reader,
self._adjustment_reader,
self.asset_finder,
self._roll_finders,
prefetch_length=minute_history_prefetch_length,
)
self._first_trading_day = first_trading_day
# Get the first trading minute
self._first_trading_minute, _ = (
self.trading_calendar.open_and_close_for_session(
self._first_trading_day
)
if self._first_trading_day is not None else (None, None)
)
# Store the locs of the first day and first minute
self._first_trading_day_loc = (
self.trading_calendar.all_sessions.get_loc(self._first_trading_day)
if self._first_trading_day is not None else None
)
def _ensure_reader_aligned(self, reader):
if reader is None:
return
if reader.trading_calendar.name == self.trading_calendar.name:
return reader
elif reader.data_frequency == 'minute':
return ReindexMinuteBarReader(
self.trading_calendar,
reader,
self._first_available_session,
self._last_available_session
)
elif reader.data_frequency == 'session':
return ReindexSessionBarReader(
self.trading_calendar,
reader,
self._first_available_session,
self._last_available_session
)
def _reindex_extra_source(self, df, source_date_index):
return df.reindex(index=source_date_index, method='ffill')
def handle_extra_source(self, source_df, sim_params):
"""
Extra sources always have a sid column.
We expand the given data (by forward filling) to the full range of
the simulation dates, so that lookup is fast during simulation.
"""
if source_df is None:
return
# Normalize all the dates in the df
source_df.index = source_df.index.normalize()
# source_df's sid column can either consist of assets we know about
# (such as sid(24)) or of assets we don't know about (such as
# palladium).
#
# In both cases, we break up the dataframe into individual dfs
# that only contain a single asset's information. ie, if source_df
# has data for PALLADIUM and GOLD, we split source_df into two
# dataframes, one for each. (same applies if source_df has data for
# AAPL and IBM).
#
# We then take each child df and reindex it to the simulation's date
# range by forward-filling missing values. this makes reads simpler.
#
# Finally, we store the data. For each column, we store a mapping in
# self.augmented_sources_map from the column to a dictionary of
# asset -> df. In other words,
# self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df
# holding that data.
source_date_index = self.trading_calendar.sessions_in_range(
sim_params.start_session,
sim_params.end_session
)
# Break the source_df up into one dataframe per sid. This lets
# us (more easily) calculate accurate start/end dates for each sid,
# de-dup data, and expand the data to fit the backtest start/end date.
grouped_by_sid = source_df.groupby(["sid"])
group_names = grouped_by_sid.groups.keys()
group_dict = {}
for group_name in group_names:
group_dict[group_name] = grouped_by_sid.get_group(group_name)
# This will be the dataframe which we query to get fetcher assets at
# any given time. Get's overwritten every time there's a new fetcher
# call
extra_source_df = pd.DataFrame()
for identifier, df in iteritems(group_dict):
# Before reindexing, save the earliest and latest dates
earliest_date = df.index[0]
latest_date = df.index[-1]
# Since we know this df only contains a single sid, we can safely
# de-dupe by the index (dt). If minute granularity, will take the
# last data point on any given day
df = df.groupby(level=0).last()
# Reindex the dataframe based on the backtest start/end date.
# This makes reads easier during the backtest.
df = self._reindex_extra_source(df, source_date_index)
if not isinstance(identifier, Asset):
# for fake assets we need to store a start/end date
self._asset_start_dates[identifier] = earliest_date
self._asset_end_dates[identifier] = latest_date
for col_name in df.columns.difference(['sid']):
if col_name not in self._augmented_sources_map:
self._augmented_sources_map[col_name] = {}
self._augmented_sources_map[col_name][identifier] = df
# Append to extra_source_df the reindexed dataframe for the single
# sid
extra_source_df = extra_source_df.append(df)
self._extra_source_df = extra_source_df
def _get_pricing_reader(self, data_frequency):
return self._pricing_readers[data_frequency]
def get_last_traded_dt(self, asset, dt, data_frequency):
"""
Given an asset and dt, returns the last traded dt from the viewpoint
of the given dt.
If there is a trade on the dt, the answer is dt provided.
"""
return self._get_pricing_reader(data_frequency).get_last_traded_dt(
asset, dt)
@staticmethod
def _is_extra_source(asset, field, map):
"""
Internal method that determines if this asset/field combination
represents a fetcher value or a regular OHLCVP lookup.
"""
# If we have an extra source with a column called "price", only look
# at it if it's on something like palladium and not AAPL (since our
# own price data always wins when dealing with assets).
return not (field in BASE_FIELDS and
(isinstance(asset, (Asset, ContinuousFuture))))
def _get_fetcher_value(self, asset, field, dt):
day = normalize_date(dt)
try:
return \
self._augmented_sources_map[field][asset].loc[day, field]
except KeyError:
return np.NaN
def get_spot_value(self, assets, field, dt, data_frequency):
"""
Public API method that returns a scalar value representing the value
of the desired asset's field at either the given dt.
Parameters
----------
assets : Asset, ContinuousFuture, or iterable of same.
The asset or assets whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume',
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The spot value of ``field`` for ``asset`` The return type is based
on the ``field`` requested. If the field is one of 'open', 'high',
'low', 'close', or 'price', the value will be a float. If the
``field`` is 'volume' the value will be a int. If the ``field`` is
'last_traded' the value will be a Timestamp.
"""
assets_is_scalar = False
if isinstance(assets, (AssetConvertible, PricingDataAssociable)):
assets_is_scalar = True
else:
# If 'assets' was not one of the expected types then it should be
# an iterable.
try:
iter(assets)
except TypeError:
raise TypeError(
"Unexpected 'assets' value of type {}."
.format(type(assets))
)
session_label = self.trading_calendar.minute_to_session_label(dt)
def get_single_asset_value(asset):
if self._is_extra_source(
asset, field, self._augmented_sources_map):
return self._get_fetcher_value(asset, field, dt)
if field not in BASE_FIELDS:
raise KeyError("Invalid column: " + str(field))
if dt < asset.start_date or \
(data_frequency == "daily" and
session_label > asset.end_date) or \
(data_frequency == "minute" and
session_label > asset.end_date):
if field == "volume":
return 0
elif field == "contract":
return None
elif field != "last_traded":
return np.NaN
if data_frequency == "daily":
if field == "contract":
return self._get_current_contract(asset, session_label)
else:
return self._get_daily_spot_value(
asset, field, session_label,
)
else:
if field == "last_traded":
return self.get_last_traded_dt(asset, dt, 'minute')
elif field == "price":
return self._get_minute_spot_value(
asset, "close", dt, ffill=True,
)
elif field == "contract":
return self._get_current_contract(asset, dt)
else:
return self._get_minute_spot_value(asset, field, dt)
if assets_is_scalar:
return get_single_asset_value(assets)
else:
return list(map(get_single_asset_value, assets))
def get_adjustments(self, assets, field, dt, perspective_dt):
"""
Returns a list of adjustments between the dt and perspective_dt for the
given field and list of assets
Parameters
----------
assets : list of type Asset, or Asset
The asset, or assets whose adjustments are desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
adjustments : list[Adjustment]
The adjustments to that field.
"""
if isinstance(assets, Asset):
assets = [assets]
adjustment_ratios_per_asset = []
def split_adj_factor(x):
return x if field != 'volume' else 1.0 / x
for asset in assets:
adjustments_for_asset = []
split_adjustments = self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
)
for adj_dt, adj in split_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(split_adj_factor(adj))
elif adj_dt > perspective_dt:
break
if field != 'volume':
merger_adjustments = self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
)
for adj_dt, adj in merger_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
dividend_adjustments = self._get_adjustment_list(
asset, self._dividends_dict, "DIVIDENDS",
)
for adj_dt, adj in dividend_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
ratio = reduce(mul, adjustments_for_asset, 1.0)
adjustment_ratios_per_asset.append(ratio)
return adjustment_ratios_per_asset
def get_adjusted_value(self, asset, field, dt,
perspective_dt,
data_frequency,
spot_value=None):
"""
Returns a scalar value representing the value
of the desired asset's field at the given dt with adjustments applied.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The value of the given ``field`` for ``asset`` at ``dt`` with any
adjustments known by ``perspective_dt`` applied. The return type is
based on the ``field`` requested. If the field is one of 'open',
'high', 'low', 'close', or 'price', the value will be a float. If
the ``field`` is 'volume' the value will be a int. If the ``field``
is 'last_traded' the value will be a Timestamp.
"""
if spot_value is None:
# if this a fetcher field, we want to use perspective_dt (not dt)
# because we want the new value as of midnight (fetcher only works
# on a daily basis, all timestamps are on midnight)
if self._is_extra_source(asset, field,
self._augmented_sources_map):
spot_value = self.get_spot_value(asset, field, perspective_dt,
data_frequency)
else:
spot_value = self.get_spot_value(asset, field, dt,
data_frequency)
if isinstance(asset, Equity):
ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0]
spot_value *= ratio
return spot_value
def _get_minute_spot_value(self, asset, column, dt, ffill=False):
reader = self._get_pricing_reader('minute')
if ffill:
# If forward filling, we want the last minute with values (up to
# and including dt).
query_dt = reader.get_last_traded_dt(asset, dt)
if pd.isnull(query_dt):
# no last traded dt, bail
if column == 'volume':
return 0
else:
return np.nan
else:
# If not forward filling, we just want dt.
query_dt = dt
try:
result = reader.get_value(asset.sid, query_dt, column)
except NoDataOnDate:
if column == 'volume':
return 0
else:
return np.nan
if not ffill or (dt == query_dt) or (dt.date() == query_dt.date()):
return result
# the value we found came from a different day, so we have to adjust
# the data if there are any adjustments on that day barrier
return self.get_adjusted_value(
asset, column, query_dt,
dt, "minute", spot_value=result
)
def _get_daily_spot_value(self, asset, column, dt):
reader = self._get_pricing_reader('daily')
if column == "last_traded":
last_traded_dt = reader.get_last_traded_dt(asset, dt)
if | isnull(last_traded_dt) | pandas.isnull |
"""
Code to gather time series related to LiveOcean forcing.
"""
import os
import sys
pth = os.path.abspath('../alpha')
if pth not in sys.path:
sys.path.append(pth)
import Lfun
Ldir = Lfun.Lstart()
import zrfun
import zfun
import numpy as np
import pandas as pd
import pickle
import netCDF4 as nc
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
# define where to put the output
outdir = Ldir['LOo'] + 'superplot/'
Lfun.make_dir(outdir)
gtag = 'cas6_v3'
gtagex = gtag + '_lo8b'
year = '2019'
# River forcing
# created by x_river/extract_rivers.py
fnr = gtag + '_' + year + '.01.01_' + year + '.12.31.p'
fn = Ldir['LOo'] + 'river/' + fnr
riv_df0 = pd.read_pickle(fn)
# keep only a subset of the rivers
riv_df = riv_df0.loc[:, ['columbia', 'fraser', 'skagit']]
# # Tide
# pth = os.path.abspath(Ldir['parent'] + 'ptools/tide_obs_mod/')
# if pth not in sys.path:
# sys.path.append(pth)
# import obsfun
# noaa_sn_dict, dfo_sn_dict, sn_dict = obsfun.get_sn_dicts()
# t_indir = Ldir['parent'] + 'ptools_output/tide/mod_data/' + gtagex + '/'
# t_fn = t_indir + 'tide_' + str(sn_dict['Seattle']) + '_' + year + '.p'
# tide_df = pickle.load(open(t_fn, 'rb'))
# # remove the timezone
# tide_df = tide_df.tz_localize(None)
# # make a dataframe of spring-neap conditions
# eta = tide_df['eta'].values
# eta_rms = np.sqrt(zfun.filt_godin(eta**2))
# tide_df['eta_rms'] = eta_rms
# # subsample to daily
# tide_daily_df = tide_df.loc[::24, 'eta_rms']
# Wind
fnw = Ldir['LOo'] + 'moor/custom_' + gtagex + '_'+year+'.01.01_'+year+'.12.31/shelf_hourly.nc'
moor_ds = nc.Dataset(fnw)
ot = moor_ds['ocean_time'][:]
svstr = moor_ds['svstr'][:]
svstr_lp = zfun.filt_AB8d(svstr)
wind_time = []
for tt in ot:
wind_time.append(Lfun.modtime_to_datetime(tt))
wind_df = pd.DataFrame(index=wind_time, columns=['svstr', 'svstr_lp'])
wind_df['svstr'] = svstr
wind_df['svstr_lp'] = svstr_lp
wind_df = wind_df.resample('D').mean()
# Tide
# make a dataframe of spring-neap conditions
eta = zfun.fillit(moor_ds['zeta'][:])
eta_rms = np.sqrt(zfun.filt_godin(eta**2))
# subsample to daily
tide_daily_df = | pd.DataFrame(index=wind_df.index, columns=['eta_rms']) | pandas.DataFrame |
""" Research results class """
import os
from collections import OrderedDict
import glob
import json
import dill
import pandas as pd
class Results:
""" Class for dealing with results of research
Parameters
----------
path : str
path to root folder of research
names : str, list or None
names of units (pipleines and functions) to load
variables : str, list or None
names of variables to load
iterations : int, list or None
iterations to load
repetition : int
index of repetition to load
configs, aliases : dict, Config, Option, Domain or None
configs to load
use_alias : bool
if True, use alias for model name, else use its full name.
Defaults to True
concat_config : bool
if True, concatenate all config options into one string and store
it in 'config' column, else use separate column for each option.
Defaults to False
drop_columns : bool
used only if `concat_config=True`. Drop or not columns with options and
leave only concatenated config.
kwargs : dict
kwargs will be interpreted as config paramter
Returns
-------
pandas.DataFrame or dict
will have columns: iteration, name (of pipeline/function)
and column for config. Also it will have column for each variable of pipeline
and output of the function that was saved as a result of the research.
**How to perform slicing**
Method `load` with default parameters will create pandas.DataFrame with all dumped
parameters. To specify subset of results one can define names of pipelines/functions,
produced variables/outputs of them, iterations and configs. For example,
we have the following research:
```
domain = Option('layout', ['cna', 'can', 'acn']) * Option('model', [VGG7, VGG16])
research = (Research()
.add_pipeline(train_ppl, variables='loss', name='train')
.add_pipeline(test_ppl, name='test', execute=100, run=True, import_from='train')
.add_callable(accuracy, returns='accuracy', name='test_accuracy',
execute=100, pipeline='test')
.add_domain(domain))
research.run(n_iters=10000)
```
The code
```
Results(research=research).load(iterations=np.arange(5000, 10000),
variables='accuracy', names='test_accuracy',
configs=Option('layout', ['cna', 'can']))
```
will load output of ``accuracy`` function for configs
that contain layout 'cna' or 'can' for iterations starting with 5000.
The resulting dataframe will have columns 'iteration', 'name',
'accuracy', 'layout', 'model'. One can get the same in the follwing way:
```
results = Results(research=research).load()
results = results[(results.iterations >= 5000) &
(results.name == 'test_accuracy') & results.layout.isin(['cna', 'can'])]
```
"""
def __init__(self, path, *args, **kwargs):
self.path = path
self.description = self._get_description()
self.configs = None
self.df = self._load(*args, **kwargs)
def _get_list(self, value):
if not isinstance(value, list):
value = [value]
return value
def _sort_files(self, files, iterations):
files = {file: int(file.split('_')[-1]) for file in files}
files = OrderedDict(sorted(files.items(), key=lambda x: x[1]))
result = []
start = 0
iterations = [item for item in iterations if item is not None]
for name, end in files.items():
if len(iterations) == 0:
intersection = pd.np.arange(start, end)
else:
intersection = pd.np.intersect1d(iterations, pd.np.arange(start, end))
if len(intersection) > 0:
result.append((name, intersection))
start = end
return OrderedDict(result)
def _slice_file(self, dumped_file, iterations_to_load, variables):
iterations = dumped_file['iteration']
if len(iterations) > 0:
elements_to_load = pd.np.array([pd.np.isin(it, iterations_to_load) for it in iterations])
res = OrderedDict()
for variable in ['iteration', 'sample_index', *variables]:
if variable in dumped_file:
res[variable] = | pd.np.array(dumped_file[variable]) | pandas.np.array |
#-*- coding:utf-8 -*-
import pandas as pd
import pdb
import sys,os
import random
import yaml
from collections import defaultdict
class GenerateData():
def __init__(self, conf):
self.conf = conf
def process(self, train_rate = 0.9):
ori_file = self.conf['ori_path']
#csv = pd.read_csv(ori_file, header = 0, error_bad_lines=False)
csv = pd.read_csv(ori_file, header = 0, sep=",", error_bad_lines=False)
self.text = csv['text']
self.label = csv['target']
self.data = defaultdict(list)
"""train:test = 8:2"""
#train_path = 'data/classify_train.csv'
#test_path = 'data/classify_test.csv'
train_path = self.conf['train_path']
test_path = self.conf['test_path']
for idx in range(len(self.text)):
self.data[self.label[idx]].append(self.text[idx])
train_x = []
train_y = []
test_x = []
test_y = []
for key in self.data:
all_len = len(self.data[key])
train_len = int(all_len*train_rate)
test_len = all_len - train_len
for idx,item in enumerate(self.data[key]):
if idx<train_len:
#if idx<all_len-2:
train_x.append(self.data[key][idx])
train_y.append(key)
else:
test_x.append(self.data[key][idx])
test_y.append(key)
dt_train = pd.DataFrame({'text':train_x,'intent':train_y})
dt_test = | pd.DataFrame({'text':test_x,'intent':test_y}) | pandas.DataFrame |
## Real Estate price predictor
import pandas as pd
import numpy as np
housing = pd.read_csv("data.csv")
housing.head()
# Imputing missing values
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="median")
imputer.fit(housing)
X = imputer.transform(housing)
housing_tr = | pd.DataFrame(X, columns=housing.columns) | pandas.DataFrame |
from sklearn.datasets import load_wine
import pandas as pd
wine = load_wine()
columns_names = wine.feature_names
y = wine.target
X = wine.data
# Splitting features and target datasets into: train and test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.35)
# Training a Linear Regression model with fit()
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=4000)
lr.fit(X_train, y_train)
# Output of the training is a model: a + b*X0 + c*X1 + d*X2 ...
print(f"Intercept per class: {lr.intercept_}\n")
print(f"Coeficients per class: {lr.coef_}\n")
print(f"Available classes: {lr.classes_}\n")
print(f"Named Coeficients for class 1: {pd.DataFrame(lr.coef_[0], columns_names)}\n")
print(f"Named Coeficients for class 2: {pd.DataFrame(lr.coef_[1], columns_names)}\n")
print(f"Named Coeficients for class 3: { | pd.DataFrame(lr.coef_[2], columns_names) | pandas.DataFrame |
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.neighbors import NeighborhoodComponentsAnalysis as NCA
from sklearn.pipeline import Pipeline
import pandas as pd
from sklearn.metrics import matthews_corrcoef, confusion_matrix
from sklearn.metrics import classification_report as class_re
from sklearn.model_selection import GridSearchCV
from os import path
from collections import namedtuple
from sklearn.preprocessing import MinMaxScaler
from openpyxl import load_workbook
from openpyxl import Workbook
"""classification functions"""
def knn_classification_nca(X_train, Y_train, X_test, state=20):
"""A function that applies grid and random search to tune model and also gives a prediction, it also uses the
NCA transformation of the data which seems to improve performance"""
# Creating a score and parameters to search from
scoring = {"f1": "f1_weighted"}
grid_param2_nca = {"knn__n_neighbors": range(1, 11), "knn__p": range(1, 6),
"knn__metric": ["minkowski", "canberra", "hamming"]}
with_nca = namedtuple("with_nca", ["fitted_grid", "y_grid", "grid_train_Y"])
# Model setting
nca = NCA(random_state=state)
pipe = Pipeline(steps=[("nca", nca), ("knn", KNN(n_jobs=-1))])
knn_grid = GridSearchCV(pipe, grid_param2_nca, scoring=scoring, refit="f1", cv=5)
# Model training with nca
fitted_grid = knn_grid.fit(X_train, Y_train)
# Model predictions with nca
y_grid = fitted_grid.best_estimator_.predict(X_test)
# training data prediction with nca
grid_train_Y = fitted_grid.best_estimator_.predict(X_train)
nca_model_list = with_nca(*[fitted_grid, y_grid, grid_train_Y])
return nca_model_list
def knn_classification(X_train, Y_train, X_test):
"""A function that applies grid and random search to tune model and also gives a prediction, it also uses the
NCA transformation of the data which seems to improve performance"""
# Creating a score and parameters to search from
scoring = {"f1": "f1_weighted"}
grid_param2 = {"n_neighbors": range(1, 11), "p": range(1, 6), "metric": ["minkowski", "canberra", "hamming"]}
no_nca = namedtuple("no_nca", ["fitted_grid", "y_grid", "grid_train_Y"])
# Model setting
knn_grid = GridSearchCV(KNN(n_jobs=-1), grid_param2, scoring=scoring, refit="f1", cv=5)
# Model training
fitted_grid = knn_grid.fit(X_train, Y_train)
# Model predictions
y_grid = fitted_grid.best_estimator_.predict(X_test)
# training data prediction
grid_train_Y = fitted_grid.best_estimator_.predict(X_train)
no_nca_model_list = no_nca(*[fitted_grid, y_grid, grid_train_Y])
return no_nca_model_list
def print_score(no_nca_list, Y_train, Y_test):
""" The function prints the scores of the models and the prediction performance """
target_names = ["class 0", "class 1"]
no_nca = namedtuple("no_nca", ["grid_score", "grid_params", "grid_confusion", "tr_report", "te_report",
"train_mat", "grid_matthews", "grid_train_confusion"])
# Model comparison
grid_score = no_nca_list.fitted_grid.best_score_
grid_params = no_nca_list.fitted_grid.best_params_
# Training scores
grid_train_confusion = confusion_matrix(Y_train, no_nca_list.grid_train_Y)
grid_tr_report = class_re(Y_train, no_nca_list.grid_train_Y, target_names=target_names, output_dict=True)
train_mat = matthews_corrcoef(Y_train, no_nca_list.grid_train_Y)
# Test metrics grid
grid_confusion = confusion_matrix(Y_test, no_nca_list.y_grid)
grid_matthews = matthews_corrcoef(Y_test, no_nca_list.y_grid)
grid_te_report = class_re(Y_test, no_nca_list.y_grid, target_names=target_names, output_dict=True)
everything = no_nca(*[grid_score, grid_params, grid_confusion, grid_tr_report, grid_te_report, train_mat,
grid_matthews, grid_train_confusion])
return everything
def nested_cv(X, Y):
"""Performs something similar to a nested cross-validation"""
metric_list_nca = []
metric_list_no_nca = []
model_list = []
parameter_list_nca = []
parameter_list_no_nca = []
random_state = [20, 40, 70, 80, 90]
scaling = MinMaxScaler()
esterase = ['EH51(22)', 'EH75(16)', 'EH46(23)', 'EH98(11)', 'EH49(23)']
for states in random_state:
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.20, random_state=states, stratify=Y)
X_train = X_train.loc[[x for x in X_train.index if x not in esterase]]
X_test = X_test.loc[[x for x in X_test.index if x not in esterase]]
Y_train = Y_train.loc[[x for x in Y_train.index if x not in esterase]]
Y_test = Y_test.loc[[x for x in Y_test.index if x not in esterase]]
transformed_x = scaling.fit_transform(X_train)
transformed_x = pd.DataFrame(transformed_x)
transformed_x.index = X_train.index
transformed_x.columns = X_train.columns
test_x = scaling.transform(X_test)
test_x = pd.DataFrame(test_x)
test_x.index = X_test.index
test_x.columns = X_test.columns
# nca classfication
nca_model_list = knn_classification_nca(transformed_x, Y_train, test_x)
nca_score = print_score(nca_model_list, Y_train, Y_test)
metric_list_nca.append([nca_score.grid_score, nca_score.train_mat, nca_score.grid_matthews])
parameter_list_nca.append(
[nca_score.grid_params, nca_score.grid_confusion, nca_score.tr_report, nca_score.te_report,
nca_score.grid_train_confusion])
# no nca classification
no_nca_model_list = knn_classification(transformed_x, Y_train, test_x)
no_nca_score = print_score(no_nca_model_list, Y_train, Y_test)
metric_list_no_nca.append([no_nca_score.grid_score, no_nca_score.train_mat, no_nca_score.grid_matthews])
parameter_list_no_nca.append(
[no_nca_score.grid_params, no_nca_score.grid_confusion, no_nca_score.tr_report, no_nca_score.te_report,
no_nca_score.grid_train_confusion])
# puts the models into a list
model_list.append([nca_model_list.fitted_grid, nca_model_list.y_grid, no_nca_model_list.fitted_grid,
no_nca_model_list.y_grid])
return model_list, metric_list_nca, metric_list_no_nca, parameter_list_nca, parameter_list_no_nca, random_state
def mean_nested(X, Y):
"""From the results of the nested_CV it computes the means of the different performance metrics"""
model_list, metric_list_nca, metric_list_no_nca, parameter_list_nca, parameter_list_no_nca, random_state = nested_cv(
X, Y)
score_record = namedtuple("scores", ["grid_score", "train_mat", "grid_matthews"])
parameter_record = namedtuple("parameters", ["grid_params", "grid_confusion", "tr_report", "te_report",
"grid_train_confusion"])
model_record = namedtuple("models", ["nca_fitted", "nca_y", "no_nca_fitted", "no_nca_y"])
# with nca
parameters_nca = [parameter_record(*z) for z in parameter_list_nca]
records_nca = [score_record(*y) for y in metric_list_nca]
named_models = [model_record(*d) for d in model_list]
# Without nca
parameters_no_nca = [parameter_record(*z) for z in parameter_list_no_nca]
records_no_nca = [score_record(*y) for y in metric_list_no_nca]
return named_models, parameters_nca, records_nca, parameters_no_nca, records_no_nca, random_state
def unlisting(parameters, records, mode=1):
""" A function that separates all the scores in independent lists"""
# Getting all scores grid search
g_mathew = [x.grid_matthews for x in records]
train_mat = [x.train_mat for x in records]
cv_score = [x.grid_score for x in records]
if mode == 1:
g_neighbours = [y.grid_params["n_neighbors"] for y in parameters]
g_p = [y.grid_params["p"] for y in parameters]
g_distance = [y.grid_params["metric"] for y in parameters]
else:
g_neighbours = [y.grid_params["knn__n_neighbors"] for y in parameters]
g_p = [y.grid_params["knn__p"] for y in parameters]
g_distance = [y.grid_params["knn__metric"] for y in parameters]
return g_mathew, train_mat, cv_score, g_neighbours, g_p, g_distance
def to_dataframe(parameters, records, random_state, mode):
matrix = namedtuple("confusion_matrix", ["true_n", "false_p", "false_n", "true_p"])
g_mathew, train_mat, cv_score, g_neighbours, g_p, g_distance = unlisting(parameters, records, mode)
# Taking the confusion matrix
g_test_confusion = [matrix(*x.grid_confusion.ravel()) for x in parameters]
g_training_confusion = [matrix(*x.grid_train_confusion.ravel()) for x in parameters]
g_te_report = [ | pd.DataFrame(x.te_report) | pandas.DataFrame |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.ticker as mticker
import sys
starterpath = sys.argv[1]
fname = starterpath + '/ccbench-all/uartlog'
outputpath = starterpath + '/outputplot.pdf'
f = open(fname, 'r')
q = f.readlines()
f.close()
q = filter(lambda x: x.startswith('App:'), q)
q = map(lambda x: x.strip().split(","), q)
q = map(lambda x: list(map(lambda z: z.split(":"), x)), q)
def arr_to_dict(q):
# to dicts
as_dict = []
for elem in q:
d = dict()
for pair in elem:
d[pair[0]] = pair[1]
as_dict.append(d)
return as_dict
cacheline_stride_bmark = filter(lambda x: ['RunType', '[16]'] in x, q)
unit_stride_bmark = filter(lambda x: ['RunType', '[1]'] in x, q)
random_bmark = filter(lambda x: ['RunType', '[0]'] in x, q)
def data_from_full_dict(array_of_dict):
times = []
sizes = []
for d in array_of_dict:
time = eval(d['Time'])[0]
appsize = eval(d['AppSize'])[0] * 4
times.append(time)
sizes.append(appsize)
return {'size': sizes, 'time': times}
cacheline_stride_bmark_data = data_from_full_dict(arr_to_dict(cacheline_stride_bmark))
unit_stride_bmark_data = data_from_full_dict(arr_to_dict(unit_stride_bmark))
random_bmark_data = data_from_full_dict(arr_to_dict(random_bmark))
cacheline_ccbench_df = | pd.DataFrame(data=cacheline_stride_bmark_data) | pandas.DataFrame |
from bs4 import BeautifulSoup as bs
import requests
import pandas as pd
import os
import state_code
from state_code import SAMPLE
import collections
import plotly.graph_objects as go
import numpy as np
import plotly.express as px
files = os.listdir('./dataset/')
files.sort()
biggest_con = 0
def set_color_group(item):
#print(lst)
pop = int(item)
[1000, 5000, 10000, 50000]
if pop <= 0:
return 0
elif pop < 1000:
return 1
elif 1000 <= pop < 5000:
return 2
elif 5000 <= pop < 10000:
return 3
elif 10000 <= pop < 50000:
return 4
else:
return 5
entire = pd.DataFrame(columns=['code','state','total_confirmed','color_code', 'date'])
daily_total = []
#output_df = []
for i in files:
#print(i)
daily = 0
total = {}
raw_df = pd.read_csv('./dataset/'+i).fillna(0)
country = [col for col in raw_df.columns if 'Country' in col][0]
state = [col for col in raw_df.columns if 'Province' in col][0]
check = raw_df[country] == 'US'
df = raw_df[check]
for _, row in df.iterrows():
us_state = row[state].split(',')[0]
if us_state not in state_code.CODE:
if len(row[state].split(',')) < 2 or len((row[state].split(',')[1]).split(' ')[1]) < 2:
continue
s_code = (row[state].split(',')[1]).split(' ')[1]
if s_code in state_code.CODE_R:
us_state = state_code.CODE_R[s_code]
else:
print(row[state])
continue
#print(row[state],i)
if us_state in total:
#print(total[temp])
total[us_state][0] += row['Confirmed']
total[us_state][1] += row['Deaths']
total[us_state][2] += row['Recovered']
else:
total[us_state] = [row['Confirmed'], row['Deaths'], row['Recovered']]
#total[row[state]][3] += row['Active']
#print(total)
#usa_df = pd.DataFrame(columns=['code','total_confirmed','total_death','total_recovered','date'])
#usa_df = pd.DataFrame(columns=['code','state','total_confirmed','color_code', 'date'])
# total two-d array total[0] = [number_confirmed, number_death, number_ recovered]
#wtf
for j in total:
item = total[j][0]
biggest_con = max(biggest_con, total[j][0])
daily += int(total[j][0])
a_row = [state_code.CODE[j], j]+[str(int(item))]+[set_color_group(item)]+ [i[:10][:5]]
leng = len(entire)
entire.loc[leng] = a_row
#usa_df.loc[leng] = a_row
#row_df = pd.DataFrame([a_row])
# usa_df = usa_df.append(a_row, ignore_index=True)
daily_total.append(daily)
#print(usa_df)
#output_df.append(usa_df.fillna(0))
#print(entire.dtypes)
entire['total_confirmed'] = pd.to_numeric(entire.total_confirmed, errors='coerce')
entire['color_code'] = | pd.to_numeric(entire.color_code, errors='coerce') | pandas.to_numeric |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list( | range(10) | pandas.compat.range |
"""
Unit test for smart explainer
"""
import unittest
from unittest.mock import patch, Mock
import os
from os import path
from pathlib import Path
import types
import pandas as pd
import numpy as np
import catboost as cb
from sklearn.linear_model import LinearRegression
from shapash.explainer.smart_explainer import SmartExplainer
from shapash.explainer.multi_decorator import MultiDecorator
from shapash.explainer.smart_state import SmartState
import category_encoders as ce
import shap
def init_sme_to_pickle_test():
"""
Init sme to pickle test
TODO: Docstring
Returns
-------
[type]
[description]
"""
current = Path(path.abspath(__file__)).parent.parent.parent
pkl_file = path.join(current, 'data/xpl.pkl')
xpl = SmartExplainer()
contributions = pd.DataFrame([[-0.1, 0.2, -0.3], [0.1, -0.2, 0.3]])
y_pred = pd.DataFrame(data=np.array([1, 2]), columns=['pred'])
dataframe_x = pd.DataFrame([[1, 2, 3], [1, 2, 3]])
xpl.compile(contributions=contributions, x=dataframe_x, y_pred=y_pred, model=LinearRegression())
xpl.filter(max_contrib=2)
return pkl_file, xpl
class TestSmartExplainer(unittest.TestCase):
"""
Unit test smart explainer
TODO: Docstring
"""
def test_init(self):
"""
test init smart explainer
"""
xpl = SmartExplainer()
assert hasattr(xpl, 'plot')
def assertRaisesWithMessage(self, msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
self.assertFail()
except Exception as inst:
self.assertEqual(inst.args[0]['message'], msg)
@patch('shapash.explainer.smart_explainer.SmartState')
def test_choose_state_1(self, mock_smart_state):
"""
Unit test choose state 1
Parameters
----------
mock_smart_state : [type]
[description]
"""
xpl = SmartExplainer()
xpl.choose_state('contributions')
mock_smart_state.assert_called()
@patch('shapash.explainer.smart_explainer.MultiDecorator')
def test_choose_state_2(self, mock_multi_decorator):
"""
Unit test choose state 2
Parameters
----------
mock_multi_decorator : [type]
[description]
"""
xpl = SmartExplainer()
xpl.choose_state([1, 2, 3])
mock_multi_decorator.assert_called()
def test_validate_contributions_1(self):
"""
Unit test validate contributions 1
"""
xpl = SmartExplainer()
contributions = [
np.array([[2, 1], [8, 4]]),
np.array([[5, 5], [0, 0]])
]
model = Mock()
model._classes = np.array([1, 3])
model.predict = types.MethodType(self.predict, model)
model.predict_proba = types.MethodType(self.predict_proba, model)
xpl.model = model
xpl._case = "classification"
xpl._classes = list(model._classes)
xpl.state = xpl.choose_state(contributions)
xpl.x_init = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
expected_output = [
pd.DataFrame(
[[2, 1], [8, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
),
pd.DataFrame(
[[5, 5], [0, 0]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
]
output = xpl.validate_contributions(contributions)
assert len(expected_output) == len(output)
test_list = [pd.testing.assert_frame_equal(e, m) for e, m in zip(expected_output, output)]
assert all(x is None for x in test_list)
def test_apply_preprocessing_1(self):
"""
Unit test apply preprocessing 1
"""
xpl = SmartExplainer()
contributions = [1, 2, 3]
output = xpl.apply_preprocessing(contributions)
expected = contributions
self.assertListEqual(output, expected)
def test_apply_preprocessing_2(self):
"""
Unit test apply preprocessing 2
"""
xpl = SmartExplainer()
xpl.state = Mock()
preprocessing = Mock()
contributions = [1, 2, 3]
xpl.apply_preprocessing(contributions, preprocessing)
xpl.state.inverse_transform_contributions.assert_called()
def test_modify_postprocessing_1(self):
"""
Unit test modify postprocessing 1
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1:'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
postprocessing = {0: {'type' : 'suffix', 'rule':' t'},
'Column2': {'type' : 'prefix', 'rule' : 'test'}}
expected_output = {
'Col1': {'type' : 'suffix', 'rule':' t'},
'Col2': {'type' : 'prefix', 'rule' : 'test'}
}
output = xpl.modify_postprocessing(postprocessing)
assert output == expected_output
def test_modify_postprocessing_2(self):
"""
Unit test modify postprocessing 2
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1: 'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
postprocessing = {'Error': {'type': 'suffix', 'rule': ' t'}}
with self.assertRaises(ValueError):
xpl.modify_postprocessing(postprocessing)
def test_check_postprocessing_1(self):
"""
Unit test check_postprocessing
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1: 'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
postprocessing1 = {0: {'Error': 'suffix', 'rule': ' t'}}
postprocessing2 = {0: {'type': 'Error', 'rule': ' t'}}
postprocessing3 = {0: {'type': 'suffix', 'Error': ' t'}}
postprocessing4 = {0: {'type': 'suffix', 'rule': ' '}}
postprocessing5 = {0: {'type': 'case', 'rule': 'lower'}}
postprocessing6 = {0: {'type': 'case', 'rule': 'Error'}}
with self.assertRaises(ValueError):
xpl.check_postprocessing(postprocessing1)
xpl.check_postprocessing(postprocessing2)
xpl.check_postprocessing(postprocessing3)
xpl.check_postprocessing(postprocessing4)
xpl.check_postprocessing(postprocessing5)
xpl.check_postprocessing(postprocessing6)
def test_apply_postprocessing_1(self):
"""
Unit test apply_postprocessing 1
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1: 'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
assert np.array_equal(xpl.x_pred, xpl.apply_postprocessing())
def test_apply_postprocessing_2(self):
"""
Unit test apply_postprocessing 2
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1: 'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
postprocessing = {'Col1': {'type': 'suffix', 'rule': ' t'},
'Col2': {'type': 'prefix', 'rule': 'test'}}
expected_output = pd.DataFrame(
data=[['1 t', 'test2'],
['3 t', 'test4']],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
output = xpl.apply_postprocessing(postprocessing)
assert np.array_equal(output, expected_output)
def test_check_contributions_1(self):
"""
Unit test check contributions 1
"""
xpl = SmartExplainer()
xpl.contributions, xpl.x_pred = Mock(), Mock()
xpl.state = Mock()
xpl.check_contributions()
xpl.state.check_contributions.assert_called_with(xpl.contributions, xpl.x_pred)
def test_check_contributions_2(self):
"""
Unit test check contributions 2
"""
xpl = SmartExplainer()
xpl.contributions, xpl.x_pred = Mock(), Mock()
mock_state = Mock()
mock_state.check_contributions.return_value = False
xpl.state = mock_state
with self.assertRaises(ValueError):
xpl.check_contributions()
def test_check_label_dict_1(self):
"""
Unit test check label dict 1
"""
xpl = SmartExplainer(label_dict={1: 'Yes', 0: 'No'})
xpl._classes = [0, 1]
xpl._case = 'classification'
xpl.check_label_dict()
def test_check_label_dict_2(self):
"""
Unit test check label dict 2
"""
xpl = SmartExplainer()
xpl._case = 'regression'
xpl.check_label_dict()
def test_check_features_dict_1(self):
"""
Unit test check features dict 1
"""
xpl = SmartExplainer(features_dict={'Age': 'Age (Years Old)'})
xpl.columns_dict = {0: 'Age', 1: 'Education', 2: 'Sex'}
xpl.check_features_dict()
assert xpl.features_dict['Age'] == 'Age (Years Old)'
assert xpl.features_dict['Education'] == 'Education'
@patch('shapash.explainer.smart_explainer.SmartExplainer.choose_state')
@patch('shapash.explainer.smart_explainer.SmartExplainer.apply_preprocessing')
def test_compile_0(self, mock_apply_preprocessing, mock_choose_state):
"""
Unit test compile
Parameters
----------
mock_apply_preprocessing : [type]
[description]
mock_choose_state : [type]
[description]
"""
xpl = SmartExplainer()
mock_state = Mock()
mock_choose_state.return_value = mock_state
model = lambda: None
model.predict = types.MethodType(self.predict, model)
mock_state.rank_contributions.return_value = 1, 2, 3
contributions = pd.DataFrame([[-0.1, 0.2, -0.3], [0.1, -0.2, 0.3]])
mock_state.validate_contributions.return_value = contributions
mock_apply_preprocessing.return_value = contributions
x_pred = pd.DataFrame([[1, 2, 3], [1, 2, 3]])
xpl.compile(x=x_pred, model=model, contributions=contributions)
assert hasattr(xpl, 'state')
assert xpl.state == mock_state
assert hasattr(xpl, 'x_pred')
pd.testing.assert_frame_equal(xpl.x_pred, x_pred)
assert hasattr(xpl, 'contributions')
pd.testing.assert_frame_equal(xpl.contributions, contributions)
mock_choose_state.assert_called()
mock_state.validate_contributions.assert_called()
mock_apply_preprocessing.assert_called()
mock_state.rank_contributions.assert_called()
assert xpl._case == "regression"
def test_compile_1(self):
"""
Unit test compile 1
checking compile method without model
"""
df = pd.DataFrame(range(0, 21), columns=['id'])
df['y'] = df['id'].apply(lambda x: 1 if x < 10 else 0)
df['x1'] = np.random.randint(1, 123, df.shape[0])
df['x2'] = np.random.randint(1, 3, df.shape[0])
df = df.set_index('id')
clf = cb.CatBoostClassifier(n_estimators=1).fit(df[['x1', 'x2']], df['y'])
xpl = SmartExplainer()
xpl.compile(model=clf, x=df[['x1', 'x2']])
assert xpl._case == "classification"
self.assertListEqual(xpl._classes, [0, 1])
def test_compile_2(self):
"""
Unit test compile 2
checking new attributes added to the compile method
"""
df = pd.DataFrame(range(0, 5), columns=['id'])
df['y'] = df['id'].apply(lambda x: 1 if x < 2 else 0)
df['x1'] = np.random.randint(1, 123, df.shape[0])
df['x2'] = ["S", "M", "S", "D", "M"]
df = df.set_index('id')
encoder = ce.OrdinalEncoder(cols=["x2"], handle_unknown="None")
encoder_fitted = encoder.fit(df)
df_encoded = encoder_fitted.transform(df)
output = df[["x1", "x2"]].copy()
output["x2"] = ["single", "married", "single", "divorced", "married"]
clf = cb.CatBoostClassifier(n_estimators=1).fit(df_encoded[['x1', 'x2']], df_encoded['y'])
postprocessing_1 = {"x2": {
"type": "transcoding",
"rule": {"S": "single", "M": "married", "D": "divorced"}}}
postprocessing_2 = {
"family_situation": {
"type": "transcoding",
"rule": {"S": "single", "M": "married", "D": "divorced"}}}
xpl_postprocessing1 = SmartExplainer()
xpl_postprocessing2 = SmartExplainer(features_dict={"x1": "age",
"x2": "family_situation"}
)
xpl_postprocessing3 = SmartExplainer()
xpl_postprocessing1.compile(model=clf,
x=df_encoded[['x1', 'x2']],
preprocessing=encoder_fitted,
postprocessing=postprocessing_1)
xpl_postprocessing2.compile(model=clf,
x=df_encoded[['x1', 'x2']],
preprocessing=encoder_fitted,
postprocessing=postprocessing_2)
xpl_postprocessing3.compile(model=clf,
x=df_encoded[['x1', 'x2']],
preprocessing=None,
postprocessing=None)
assert hasattr(xpl_postprocessing1, "preprocessing")
assert hasattr(xpl_postprocessing1, "postprocessing")
assert hasattr(xpl_postprocessing2, "preprocessing")
assert hasattr(xpl_postprocessing2, "postprocessing")
assert hasattr(xpl_postprocessing3, "preprocessing")
assert hasattr(xpl_postprocessing3, "postprocessing")
pd.testing.assert_frame_equal(xpl_postprocessing1.x_pred, output)
pd.testing.assert_frame_equal(xpl_postprocessing2.x_pred, output)
assert xpl_postprocessing1.preprocessing == encoder_fitted
assert xpl_postprocessing2.preprocessing == encoder_fitted
assert xpl_postprocessing1.postprocessing == postprocessing_1
assert xpl_postprocessing2.postprocessing == postprocessing_1
def test_compile_3(self):
"""
Unit test compile 3
checking compile method without model
"""
df = pd.DataFrame(range(0, 21), columns=['id'])
df['y'] = df['id'].apply(lambda x: 1 if x < 10 else 0)
df['x1'] = np.random.randint(1, 123, df.shape[0])
df['x2'] = np.random.randint(1, 3, df.shape[0])
df = df.set_index('id')
clf = cb.CatBoostClassifier(n_estimators=1).fit(df[['x1', 'x2']], df['y'])
clf_explainer = shap.TreeExplainer(clf)
contrib = pd.DataFrame(
[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
columns=['contribution_0', 'contribution_1', 'contribution_2', 'contribution_3'],
index=[0, 1, 2]
)
xpl = SmartExplainer()
with self.assertRaises(ValueError):
xpl.compile(model=clf, x=df[['x1', 'x2']], explainer=clf_explainer, contributions=contrib)
def test_filter_0(self):
"""
Unit test filter 0
"""
xpl = SmartExplainer()
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter()
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_not_called()
mock_state.cap_contributions.assert_not_called()
mock_state.sign_contributions.assert_not_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_not_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
@patch('shapash.explainer.smart_explainer.SmartExplainer.check_features_name')
def test_filter_1(self, mock_check_features_name):
"""
Unit test filter 1
Parameters
----------
mock_check_features_name : [type]
[description]
"""
xpl = SmartExplainer()
mock_check_features_name.return_value = [1, 2]
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter(features_to_hide=['X1', 'X2'])
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_called()
mock_state.cap_contributions.assert_not_called()
mock_state.sign_contributions.assert_not_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_not_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
def test_filter_2(self):
"""
Unit test filter 2
"""
xpl = SmartExplainer()
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter(threshold=0.1)
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_not_called()
mock_state.cap_contributions.assert_called()
mock_state.sign_contributions.assert_not_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_not_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
def test_filter_3(self):
"""
Unit test filter 3
"""
xpl = SmartExplainer()
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter(positive=True)
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_not_called()
mock_state.cap_contributions.assert_not_called()
mock_state.sign_contributions.assert_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_not_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
def test_filter_4(self):
"""
Unit test filter 4
"""
xpl = SmartExplainer()
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter(max_contrib=10)
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_not_called()
mock_state.cap_contributions.assert_not_called()
mock_state.sign_contributions.assert_not_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
def test_filter_5(self):
"""
Unit test filter 5
"""
xpl = SmartExplainer()
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter(positive=True, max_contrib=10)
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_not_called()
mock_state.cap_contributions.assert_not_called()
mock_state.sign_contributions.assert_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
def test_filter_6(self):
"""
Unit test filter 6
"""
xpl = SmartExplainer()
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter()
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_not_called()
mock_state.cap_contributions.assert_not_called()
mock_state.sign_contributions.assert_not_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_not_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
def test_filter_7(self):
"""
Unit test filter 7
"""
xpl = SmartExplainer()
contributions = [
pd.DataFrame(
data=[[0.5, 0.4, 0.3], [0.9, 0.8, 0.7]],
columns=['Col1', 'Col2', 'Col3']
),
pd.DataFrame(
data=[[0.3, 0.2, 0.1], [0.6, 0.5, 0.4]],
columns=['Col1', 'Col2', 'Col3']
)
]
xpl.data = {'var_dict': 1, 'contrib_sorted': contributions, 'x_sorted': 3}
xpl.state = MultiDecorator(SmartState())
xpl.filter(threshold=0.5, max_contrib=2)
expected_mask = [
pd.DataFrame(
data=[[True, False, False], [True, True, False]],
columns=['contrib_1', 'contrib_2', 'contrib_3']
),
pd.DataFrame(
data=[[False, False, False], [True, True, False]],
columns=['contrib_1', 'contrib_2', 'contrib_3']
)
]
assert len(expected_mask) == len(xpl.mask)
test_list = [pd.testing.assert_frame_equal(e, m) for e, m in zip(expected_mask, xpl.mask)]
assert all(x is None for x in test_list)
expected_masked_contributions = [
pd.DataFrame(
data=[[0.0, 0.7], [0.0, 0.7]],
columns=['masked_neg', 'masked_pos']
),
pd.DataFrame(
data=[[0.0, 0.6], [0.0, 0.4]],
columns=['masked_neg', 'masked_pos']
)
]
assert len(expected_masked_contributions) == len(xpl.masked_contributions)
test_list = [pd.testing.assert_frame_equal(e, m) for e, m in
zip(expected_masked_contributions, xpl.masked_contributions)]
assert all(x is None for x in test_list)
expected_param_dict = {
'features_to_hide': None,
'threshold': 0.5,
'positive': None,
'max_contrib': 2
}
self.assertDictEqual(expected_param_dict, xpl.mask_params)
def test_check_label_name_1(self):
"""
Unit test check label name 1
"""
label_dict = {1: 'Age', 2: 'Education'}
xpl = SmartExplainer(label_dict=label_dict)
xpl.inv_label_dict = {v: k for k, v in xpl.label_dict.items()}
xpl._classes = [1, 2]
entry = 'Age'
expected_num = 0
expected_code = 1
expected_value = 'Age'
label_num, label_code, label_value = xpl.check_label_name(entry, 'value')
assert expected_num == label_num
assert expected_code == label_code
assert expected_value == label_value
def test_check_label_name_2(self):
"""
Unit test check label name 2
"""
xpl = SmartExplainer(label_dict = None)
xpl._classes = [1, 2]
entry = 1
expected_num = 0
expected_code = 1
expected_value = 1
label_num, label_code, label_value = xpl.check_label_name(entry, 'code')
assert expected_num == label_num
assert expected_code == label_code
assert expected_value == label_value
def test_check_label_name_3(self):
"""
Unit test check label name 3
"""
label_dict = {1: 'Age', 2: 'Education'}
xpl = SmartExplainer(label_dict=label_dict)
xpl.inv_label_dict = {v: k for k, v in xpl.label_dict.items()}
xpl._classes = [1, 2]
entry = 0
expected_num = 0
expected_code = 1
expected_value = 'Age'
label_num, label_code, label_value = xpl.check_label_name(entry, 'num')
assert expected_num == label_num
assert expected_code == label_code
assert expected_value == label_value
def test_check_label_name_4(self):
"""
Unit test check label name 4
"""
xpl = SmartExplainer()
label = 0
origin = 'error'
expected_msg = "Origin must be 'num', 'code' or 'value'."
self.assertRaisesWithMessage(expected_msg, xpl.check_label_name, **{'label': label, 'origin': origin})
def test_check_label_name_5(self):
"""
Unit test check label name 5
"""
label_dict = {1: 'Age', 2: 'Education'}
xpl = SmartExplainer(label_dict=label_dict)
xpl.inv_label_dict = {v: k for k, v in xpl.label_dict.items()}
xpl._classes = [1, 2]
label = 'Absent'
expected_msg = f"Label (Absent) not found for origin (value)"
origin = 'value'
self.assertRaisesWithMessage(expected_msg, xpl.check_label_name, **{'label': label, 'origin': origin})
def test_check_features_name_1(self):
"""
Unit test check features name 1
"""
xpl = SmartExplainer()
xpl.features_dict = {'tech_0': 'domain_0', 'tech_1': 'domain_1', 'tech_2': 'domain_2'}
xpl.inv_features_dict = {v: k for k, v in xpl.features_dict.items()}
xpl.columns_dict = {0: 'tech_0', 1: 'tech_1', 2: 'tech_2'}
xpl.inv_columns_dict = {v: k for k, v in xpl.columns_dict.items()}
feature_list_1 = ['domain_0', 'tech_1']
feature_list_2 = ['domain_0', 0]
self.assertRaises(ValueError, xpl.check_features_name, feature_list_1)
self.assertRaises(ValueError, xpl.check_features_name, feature_list_2)
def test_check_features_name_2(self):
"""
Unit test check features name 2
"""
xpl = SmartExplainer()
xpl.features_dict = {'tech_0': 'domain_0', 'tech_1': 'domain_1', 'tech_2': 'domain_2'}
xpl.inv_features_dict = {v: k for k, v in xpl.features_dict.items()}
xpl.columns_dict = {0: 'tech_0', 1: 'tech_1', 2: 'tech_2'}
xpl.inv_columns_dict = {v: k for k, v in xpl.columns_dict.items()}
feature_list = ['domain_0', 'domain_2']
output = xpl.check_features_name(feature_list)
expected_output = [0, 2]
np.testing.assert_array_equal(output, expected_output)
def test_check_features_name_3(self):
"""
Unit test check features name 3
"""
xpl = SmartExplainer()
xpl.columns_dict = {0: 'tech_0', 1: 'tech_1', 2: 'tech_2'}
xpl.inv_columns_dict = {v: k for k, v in xpl.columns_dict.items()}
feature_list = ['tech_2']
output = xpl.check_features_name(feature_list)
expected_output = [2]
np.testing.assert_array_equal(output, expected_output)
def test_check_features_name_4(self):
"""
Unit test check features name 4
"""
xpl = SmartExplainer()
xpl.columns_dict = None
xpl.features_dict = None
feature_list = [1, 2, 4]
output = xpl.check_features_name(feature_list)
expected_output = feature_list
np.testing.assert_array_equal(output, expected_output)
def test_save_1(self):
"""
Unit test save 1
"""
pkl_file, xpl = init_sme_to_pickle_test()
xpl.save(pkl_file)
assert path.exists(pkl_file)
os.remove(pkl_file)
def test_load_1(self):
"""
Unit test load 1
"""
temp, xpl = init_sme_to_pickle_test()
xpl2 = SmartExplainer()
current = Path(path.abspath(__file__)).parent.parent.parent
pkl_file = path.join(current, 'data/xpl_to_load.pkl')
xpl2.load(pkl_file)
attrib_xpl = [element for element in xpl.__dict__.keys()]
attrib_xpl2 = [element for element in xpl2.__dict__.keys()]
assert all(attrib in attrib_xpl2 for attrib in attrib_xpl)
assert all(attrib2 in attrib_xpl for attrib2 in attrib_xpl2)
def test_check_y_pred_1(self):
"""
Unit test check y pred
"""
xpl = SmartExplainer()
xpl.y_pred = None
xpl.x_pred = None
xpl.check_y_pred()
def test_check_y_pred_2(self):
"""
Unit test check y pred 2
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
data=np.array([[1, 2], [3, 4]]),
columns=['Col1', 'Col2']
)
xpl.y_pred = pd.DataFrame(
data=np.array(['1', 0]),
columns=['Y']
)
with self.assertRaises(ValueError):
xpl.check_y_pred(xpl.y_pred)
def test_check_y_pred_3(self):
"""
Unit test check y pred 3
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
data=np.array([[1, 2], [3, 4]]),
columns=['Col1', 'Col2']
)
xpl.y_pred = pd.DataFrame(
data=np.array([0]),
columns=['Y']
)
with self.assertRaises(ValueError):
xpl.check_y_pred(xpl.y_pred)
def test_check_y_pred_4(self):
"""
Unit test check y pred 4
"""
xpl = SmartExplainer()
xpl.y_pred = [0, 1]
self.assertRaises(AttributeError, xpl.check_y_pred)
def test_check_y_pred_5(self):
"""
Unit test check y pred 5
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
data=np.array([[1, 2], [3, 4]]),
columns=['Col1', 'Col2']
)
xpl.y_pred = pd.Series(
data=np.array(['0'])
)
with self.assertRaises(ValueError):
xpl.check_y_pred(xpl.y_pred)
def test_check_model_1(self):
"""
Unit test check model 1
"""
model = lambda: None
model.predict = types.MethodType(self.predict, model)
xpl = SmartExplainer()
xpl.model = model
xpl._case, xpl._classes = xpl.check_model()
assert xpl._case == 'regression'
assert xpl._classes is None
def test_check_model_2(self):
"""
Unit test check model 2
"""
xpl = SmartExplainer()
df1 = pd.DataFrame([1, 2])
df2 = pd.DataFrame([3, 4])
xpl.contributions = [df1, df2]
xpl.state = xpl.choose_state(xpl.contributions)
model = lambda: None
model._classes = np.array([1, 2])
model.predict = types.MethodType(self.predict, model)
model.predict_proba = types.MethodType(self.predict_proba, model)
xpl.model = model
xpl._case, xpl._classes = xpl.check_model()
assert xpl._case == 'classification'
self.assertListEqual(xpl._classes, [1, 2])
def test_check_features_desc_1(self):
"""
Unit test check features desc 1
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[0.12, 0, 13, 1],
[0.13, 1, 14, 1],
[0.14, 1, 15, 1],
[0.15, np.NaN, 13, 1]],
columns=['col1', 'col2', 'col3', 'col4']
)
expected = {
'col1' : 4,
'col2' : 2,
'col3' : 3,
'col4' : 1
}
assert xpl.check_features_desc() == expected
@patch('shapash.explainer.smart_explainer.SmartExplainer.check_y_pred')
def test_add_1(self, mock_check_y_pred):
"""
Unit test add 1
Parameters
----------
mock_check_y_pred : [type]
[description]
"""
xpl = SmartExplainer()
dataframe_yp = pd.DataFrame([1, 3, 1], columns=['pred'], index=[0, 1, 2])
mock_y_pred = Mock(return_value=dataframe_yp)
mock_check_y_pred.return_value = mock_y_pred()
xpl.x_pred = dataframe_yp
xpl.add(y_pred=dataframe_yp)
expected = SmartExplainer()
expected.y_pred = dataframe_yp
assert not | pd.testing.assert_frame_equal(xpl.y_pred, expected.y_pred) | pandas.testing.assert_frame_equal |
from __future__ import print_function
import collections
import os
import sys
import numpy as np
import pandas as pd
try:
from sklearn.impute import SimpleImputer as Imputer
except ImportError:
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path)
import candle
global_cache = {}
SEED = 2017
P1B3_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B3/'
DATA_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/combo/'
def get_file(url):
return candle.fetch_file(url, 'Pilot1')
def impute_and_scale(df, scaling='std', keepcols=None):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
if keepcols is None:
df = df.dropna(axis=1, how='all')
else:
df = df[keepcols].copy()
all_na_cols = df.columns[df.isna().all()]
df[all_na_cols] = 0
imputer = Imputer(strategy='mean')
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return pd.DataFrame(mat, columns=df.columns)
if scaling == 'maxabs':
scaler = MaxAbsScaler()
elif scaling == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def load_dose_response(min_logconc=-4., max_logconc=-4., subsample=None, fraction=False):
"""Load cell line response to different drug compounds, sub-select response for a specific
drug log concentration range and return a pandas dataframe.
Parameters
----------
min_logconc : -3, -4, -5, -6, -7, optional (default -4)
min log concentration of drug to return cell line growth
max_logconc : -3, -4, -5, -6, -7, optional (default -4)
max log concentration of drug to return cell line growth
subsample: None, 'naive_balancing' (default None)
subsampling strategy to use to balance the data based on growth
fraction: bool (default False)
divide growth percentage by 100
"""
path = get_file(P1B3_URL + 'NCI60_dose_response_with_missing_z5_avg.csv')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep=',', engine='c',
na_values=['na', '-', ''],
dtype={'NSC': object, 'CELLNAME': str, 'LOG_CONCENTRATION': np.float32, 'GROWTH': np.float32})
global_cache[path] = df
df = df[(df['LOG_CONCENTRATION'] >= min_logconc) & (df['LOG_CONCENTRATION'] <= max_logconc)]
df = df[['NSC', 'CELLNAME', 'GROWTH', 'LOG_CONCENTRATION']]
if subsample and subsample == 'naive_balancing':
df1 = df[df['GROWTH'] <= 0]
df2 = df[(df['GROWTH'] > 0) & (df['GROWTH'] < 50)].sample(frac=0.7, random_state=SEED)
df3 = df[(df['GROWTH'] >= 50) & (df['GROWTH'] <= 100)].sample(frac=0.18, random_state=SEED)
df4 = df[df['GROWTH'] > 100].sample(frac=0.01, random_state=SEED)
df = pd.concat([df1, df2, df3, df4])
if fraction:
df['GROWTH'] /= 100
df = df.set_index(['NSC'])
return df
def load_combo_response(response_url=None, fraction=False, use_combo_score=False, use_mean_growth=False,
exclude_cells=[], exclude_drugs=[]):
"""Load cell line response to pairs of drugs, sub-select response for a specific
drug log concentration range and return a pandas dataframe.
Parameters
----------
fraction: bool (default False)
divide growth percentage by 100
use_combo_score: bool (default False)
return combination score in place of percent growth (stored in 'GROWTH' column)
"""
response_url = response_url or (DATA_URL + 'ComboDrugGrowth_Nov2017.csv')
path = get_file(response_url)
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path,
usecols=['CELLNAME', 'NSC1', 'CONC1', 'NSC2', 'CONC2', 'PERCENTGROWTH', 'VALID', 'SCORE', 'SCREENER', 'STUDY'],
na_values=['na', '-', ''],
dtype={'NSC1': object, 'NSC2': object, 'CONC1': object, 'CONC2': object, 'PERCENTGROWTH': str, 'SCORE': str},
engine='c', error_bad_lines=False, warn_bad_lines=True)
global_cache[path] = df
df = df[df['VALID'] == 'Y']
df = df[['CELLNAME', 'NSC1', 'NSC2', 'CONC1', 'CONC2', 'PERCENTGROWTH', 'SCORE']]
exclude_cells = [x.split('.')[-1] for x in exclude_cells]
exclude_drugs = [x.split('.')[-1] for x in exclude_drugs]
df = df[~df['CELLNAME'].isin(exclude_cells) & ~df['NSC1'].isin(exclude_drugs) & ~df['NSC2'].isin(exclude_drugs)]
df['PERCENTGROWTH'] = df['PERCENTGROWTH'].astype(np.float32)
df['SCORE'] = df['SCORE'].astype(np.float32)
df['NSC2'] = df['NSC2'].fillna(df['NSC1'])
df['CONC2'] = df['CONC2'].fillna(df['CONC1'])
df['SCORE'] = df['SCORE'].fillna(0)
cellmap_path = get_file(DATA_URL + 'NCI60_CELLNAME_to_Combo.txt')
df_cellmap = pd.read_csv(cellmap_path, sep='\t')
df_cellmap.set_index('Name', inplace=True)
cellmap = df_cellmap[['CELLNAME']].to_dict()['CELLNAME']
df['CELLNAME'] = df['CELLNAME'].map(lambda x: cellmap[x])
df_mean_min = df.groupby(['CELLNAME', 'NSC1', 'NSC2', 'CONC1', 'CONC2']).mean()
df_mean_min = df_mean_min.groupby(['CELLNAME', 'NSC1', 'NSC2']).min()
df_mean_min = df_mean_min.add_suffix('_MIN').reset_index() # add PERCENTGROWTH_MIN by flattening the hierarchical index
df_min = df_mean_min
# df_min = df.groupby(['CELLNAME', 'NSC1', 'NSC2']).min()
# df_min = df_min.add_suffix('_MIN').reset_index() # add PERCENTGROWTH_MIN by flattening the hierarchical index
df = df.drop(['CONC1', 'CONC2'], axis=1)
df_max = df.groupby(['CELLNAME', 'NSC1', 'NSC2']).max()
df_max = df_max.add_suffix('_MAX').reset_index() # add SCORE_MAX by flattening the hierarchical index
df_avg = df.copy()
df_avg['PERCENTGROWTH'] = df_avg['PERCENTGROWTH'].apply(lambda x: 100 if x > 100 else 50 + x / 2 if x < 0 else 50 + x / 2)
df_avg = df.groupby(['CELLNAME', 'NSC1', 'NSC2']).mean()
df_avg = df_avg.add_suffix('_AVG').reset_index()
if use_combo_score:
df = df_max.rename(columns={'SCORE_MAX': 'GROWTH'}).drop('PERCENTGROWTH_MAX', axis=1)
elif use_mean_growth:
df = df_avg.rename(columns={'PERCENTGROWTH_AVG': 'GROWTH'}).drop('SCORE_AVG', axis=1)
else:
df = df_min.rename(columns={'PERCENTGROWTH_MIN': 'GROWTH'}).drop('SCORE_MIN', axis=1)
if fraction:
df['GROWTH'] /= 100
return df
def load_combo_dose_response(response_url=None, fraction=False, use_combo_score=False, exclude_cells=[], exclude_drugs=[]):
"""Load cell line response to pairs of drugs, sub-select response for a specific
drug log concentration range and return a pandas dataframe.
Parameters
----------
fraction: bool (default False)
divide growth percentage by 100
use_combo_score: bool (default False)
return combination score in place of percent growth (stored in 'GROWTH' column)
"""
response_url = response_url or (DATA_URL + 'ComboDrugGrowth_Nov2017.csv')
path = get_file(response_url)
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path,
usecols=['CELLNAME', 'NSC1', 'CONC1', 'NSC2', 'CONC2', 'PERCENTGROWTH', 'VALID', 'SCORE', 'SCREENER', 'STUDY'],
na_values=['na', '-', ''],
dtype={'NSC1': object, 'NSC2': object, 'CONC1': object, 'CONC2': object, 'PERCENTGROWTH': str, 'SCORE': str},
engine='c', error_bad_lines=False, warn_bad_lines=True,
)
global_cache[path] = df
df = df[df['VALID'] == 'Y']
df = df[['CELLNAME', 'NSC1', 'NSC2', 'CONC1', 'CONC2', 'PERCENTGROWTH', 'SCORE']]
exclude_cells = [x.split('.')[-1] for x in exclude_cells]
exclude_drugs = [x.split('.')[-1] for x in exclude_drugs]
df = df[~df['CELLNAME'].isin(exclude_cells) & ~df['NSC1'].isin(exclude_drugs) & ~df['NSC2'].isin(exclude_drugs)]
df['PERCENTGROWTH'] = df['PERCENTGROWTH'].astype(np.float32)
df['SCORE'] = df['SCORE'].astype(np.float32)
df['NSC2'] = df['NSC2'].fillna(df['NSC1'])
df['CONC2'] = df['CONC2'].fillna(df['CONC1'])
df['SCORE'] = df['SCORE'].fillna(0)
cellmap_path = get_file(DATA_URL + 'NCI60_CELLNAME_to_Combo.txt')
df_cellmap = pd.read_csv(cellmap_path, sep='\t')
df_cellmap.set_index('Name', inplace=True)
cellmap = df_cellmap[['CELLNAME']].to_dict()['CELLNAME']
df['CELLNAME'] = df['CELLNAME'].map(lambda x: cellmap[x])
df_mean = df.groupby(['CELLNAME', 'NSC1', 'NSC2', 'CONC1', 'CONC2']).mean()
df_mean = df_mean.reset_index()
df_mean['CONC1'] = -np.log10(df_mean['CONC1'].astype(np.float32))
df_mean['CONC2'] = -np.log10(df_mean['CONC2'].astype(np.float32))
df = df_mean.rename(columns={'PERCENTGROWTH': 'GROWTH', 'CONC1': 'pCONC1', 'CONC2': 'pCONC2'})
# df_mean_min = df.groupby(['CELLNAME', 'NSC1', 'NSC2', 'CONC1', 'CONC2']).mean()
# df_mean_min = df_mean_min.groupby(['CELLNAME', 'NSC1', 'NSC2']).min()
# df_mean_min = df_mean_min.add_suffix('_MIN').reset_index() # add PERCENTGROWTH_MIN by flattening the hierarchical index
# df_min = df_mean_min
# df_min = df.groupby(['CELLNAME', 'NSC1', 'NSC2']).min()
# df_min = df_min.add_suffix('_MIN').reset_index() # add PERCENTGROWTH_MIN by flattening the hierarchical index
# df = df.drop(['CONC1', 'CONC2'], axis=1)
# df_max = df.groupby(['CELLNAME', 'NSC1', 'NSC2']).max()
# df_max = df_max.add_suffix('_MAX').reset_index() # add SCORE_MAX by flattening the hierarchical index
# if use_combo_score:
# df = df_max.rename(columns={'SCORE_MAX': 'GROWTH'}).drop('PERCENTGROWTH_MAX', axis=1)
# else:
# df = df_min.rename(columns={'PERCENTGROWTH_MIN': 'GROWTH'}).drop('SCORE_MIN', axis=1)
if fraction:
df['GROWTH'] /= 100
return df
def load_drug_set_descriptors(drug_set='ALMANAC', ncols=None, scaling='std', add_prefix=True):
if drug_set == 'ALMANAC':
path = get_file(DATA_URL + 'ALMANAC_drug_descriptors_dragon7.txt')
elif drug_set == 'GDSC':
path = get_file(DATA_URL + 'GDSC_PubChemCID_drug_descriptors_dragon7')
elif drug_set == 'NCI_IOA_AOA':
path = get_file(DATA_URL + 'NCI_IOA_AOA_drug_descriptors_dragon7')
elif drug_set == 'RTS':
path = get_file(DATA_URL + 'RTS_drug_descriptors_dragon7')
elif drug_set == 'pan':
path = get_file(DATA_URL + 'pan_drugs_dragon7_descriptors.tsv')
else:
raise Exception('Drug set {} not supported!'.format(drug_set))
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na', '-', ''],
)
global_cache[path] = df
# df1 = pd.DataFrame(df.loc[:, 'NAME'].astype(int).astype(str))
df1 = pd.DataFrame(df.loc[:, 'NAME'])
# df1['NAME'] = df1['NAME'].map(lambda x: x[4:])
df1.rename(columns={'NAME': 'Drug'}, inplace=True)
df2 = df.drop('NAME', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
keepcols = None
else:
train_ref = load_drug_descriptors(add_prefix=add_prefix)
keepcols = train_ref.columns[1:]
df2 = impute_and_scale(df2, scaling, keepcols=keepcols)
df2 = df2.astype(np.float32)
df_dg = pd.concat([df1, df2], axis=1)
return df_dg
def load_drug_descriptors_new(ncols=None, scaling='std', add_prefix=True):
"""Load drug descriptor data, sub-select columns of drugs descriptors
randomly if specificed, impute and scale the selected data, and return a
pandas dataframe.
Parameters
----------
ncols : int or None
number of columns (drugs descriptors) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file(DATA_URL + 'ALMANAC_drug_descriptors_dragon7.txt')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na', '-', ''],
)
global_cache[path] = df
# df1 = pd.DataFrame(df.loc[:, 'NAME'].astype(int).astype(str))
df1 = pd.DataFrame(df.loc[:, 'NAME'])
# df1['NAME'] = df1['NAME'].map(lambda x: x[4:])
df1.rename(columns={'NAME': 'Drug'}, inplace=True)
df2 = df.drop('NAME', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df_dg = | pd.concat([df1, df2], axis=1) | pandas.concat |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 09:26:17 2020
@author: jone
"""
import pandas as pd
import numpy as np
import dipole
import matplotlib.pyplot as plt
import datetime as dt
#investigate the relationship between equatorward boundary in NOAA data and the
#occurrence rate of substorms from sophie list
#Load omni data
omnifile = '/home/jone/Documents/Dropbox/science/superdarn/lobe_circulation/omni_1min_1999-2017.hdf'
omni = pd.read_hdf(omnifile)
#Load Sophie list
sophie = pd.read_hdf('./jone_data/sophie75.h5')
use = (sophie.index>=omni.index[0]) & (sophie.index<=omni.index[-1])
#use = (sophie.index>=dt.datetime(2003,1,1,0,0)) & (sophie.index<dt.datetime(2004,1,1,0,0))
sophie = sophie[use].copy()
exp = sophie.ssphase == 2 #expansion phase list
sophiexp = sophie[exp].copy()
#Process omni data
use = (omni.index >= sophiexp.index[0]) & (omni.index <= sophiexp.index[-1])
omni = omni[use].copy()
omni = omni.interpolate(limit=5, limit_direction='both')
bypos = (omni.BY_GSM>0)# & (np.abs(omni.BZ_GSM)<np.abs(omni.BY_GSM))
omni.loc[:,'bypos'] = omni.BY_GSM[bypos]
byneg = (omni.BY_GSM<0)# & (np.abs(omni.BZ_GSM)<np.abs(omni.BY_GSM))
omni.loc[:,'byneg'] = omni.BY_GSM[byneg]
omni.loc[:,'milan'] = 3.3e5 * (omni['flow_speed']*1000.)**(4./3) * (np.sqrt(omni.BY_GSM**2 + omni.BZ_GSM**2)) * 1e-9 * \
np.sin(np.abs(np.arctan2(omni['BY_GSM'],omni['BZ_GSM']))/2.)**(4.5) * 0.001
milanpos = 3.3e5 * (omni['flow_speed']*1000.)**(4./3) * (np.sqrt(omni.bypos**2 + omni.BZ_GSM**2)) * 1e-9 * \
np.sin(np.abs(np.arctan2(omni['bypos'],omni['BZ_GSM']))/2.)**(4.5) * 0.001
milanneg = 3.3e5 * (omni['flow_speed']*1000.)**(4./3) * (np.sqrt(omni.byneg**2 + omni.BZ_GSM**2)) * 1e-9 * \
np.sin(np.abs(np.arctan2(omni['byneg'],omni['BZ_GSM']))/2.)**(4.5) * 0.001
window = 60 #minutes
nobsinwindow = omni.milan.rolling(window).count()
cumsumneg = milanneg.rolling(window,min_periods=1).sum()
cumsumpos = milanpos.rolling(window,min_periods=1).sum()
omni.loc[:,'bxlong'] = omni.BX_GSE.rolling(window,min_periods=1).mean()
omni.loc[:,'bzlong'] = omni.BZ_GSM.rolling(window,min_periods=1).mean()
omni.loc[:,'bylong'] = omni.BY_GSM.rolling(window,min_periods=1).mean()
bxlim = 200
usepos = ((cumsumpos>2.*cumsumneg) & (nobsinwindow==window) & (np.abs(omni.bxlong)<bxlim)) | ((cumsumneg.isnull()) & (np.invert(cumsumpos.isnull())) & (nobsinwindow==window) & (np.abs(omni.bxlong)<bxlim))
useneg = ((cumsumneg>2.*cumsumpos) & (nobsinwindow==window) & (np.abs(omni.bxlong)<bxlim)) | ((cumsumpos.isnull()) & (np.invert(cumsumneg.isnull())) & (nobsinwindow==window) & (np.abs(omni.bxlong)<bxlim))
omni.loc[:,'usepos'] = usepos
omni.loc[:,'useneg'] = useneg
omni.loc[:,'milanlong'] = omni.milan.rolling(window, min_periods=window, center=False).mean() #average IMF data
#omni = omni.drop(['bxlong','bzlong','byneg','bypos','PC_N_INDEX','Beta','E','Mach_num','Mgs_mach_num','y'],axis=1) #need to drop PC index as it contain a lot of nans. Also other fields will exclude data when we later use dropna()
#Combine omni and sophie list
sophiexp.loc[:,'tilt'] = dipole.dipole_tilt(sophiexp.index)
omni.loc[:,'tilt'] = dipole.dipole_tilt(omni.index)
omni2 = omni.reindex(index=sophiexp.index, method='nearest', tolerance='30sec')
sophiexp.loc[:,'bylong'] = omni2.bylong
sophiexp.loc[:,'milanlong'] = omni2.milanlong
sophiexp.loc[:,'substorm'] = sophiexp.ssphase==2
bybins = np.append(np.append([-50],np.linspace(-9,9,10)),[50])
bybincenter = np.linspace(-10,10,11)
sgroup = sophiexp.groupby([pd.cut(sophiexp.tilt, bins=np.array([-35,-10,10,35])), \
| pd.cut(sophiexp.bylong, bins=bybins) | pandas.cut |
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
def plot_severity(data):
# absolute
plt.figure()
a = data.plot.barh(figsize=(7.7, 2.4),
width=.95,
color=("#BABDB6", "#8AE234", "#FCE94F", "#F57900", "#EF2929"))
a.set_xlabel("Absolute Häufigkeit")
a.set_ylabel("Website")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., title="Schweregrad")
plt.tight_layout()
plt.show()
# rel
plt.figure()
r = data.div(data.sum(axis=1), axis=0).plot.barh(stacked=True,
figsize=(7.7, 1.9),
width=.4,
color=("#BABDB6", "#8AE234", "#FCE94F", "#F57900", "#EF2929"))
plt.legend(bbox_to_anchor=(1.05, 1), loc=None, borderaxespad=0., title="Schweregrad")
r.set_xlabel("Relative Häufigkeit")
r.set_ylabel("Website")
plt.tight_layout()
plt.show()
def plot_problems(data):
ax = data.plot.box(vert=False,
figsize=(6, 2.5),
widths=[.45, .45],
color={"whiskers": "black", "boxes": "black", 'medians': '#D62728'},
medianprops={'linewidth': 2.8})
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Website")
plt.tight_layout()
plt.show()
def plot_concordance(data):
data["Sum"] = data.sum(axis=1)
data = data.sort_values("Sum")
data = pd.DataFrame([data["Irrelevant"],
data["Irrelevant_S"],
data["Kosmetisch"],
data["Kosmetisch_S"],
data["Gering"],
data["Gering_S"],
data["Bedeutend"],
data["Bedeutend_S"],
data["Katastrophe"],
data["Katastrophe_S"]]).T
color = ("#3465A4","#3465A4",
"#BABDB6","#8AE234",
"#888A85","#FCE94F",
"#4E4E4E","#F57900",
"#000000", "#EF2929")
# absolute
a = data.plot.barh(stacked=True, color=color, figsize=(7.7, 3.5))
plt.legend(bbox_to_anchor=(1.05, 1),
loc=None,
borderaxespad=0.,
title="Sym. Differenz:\nSchweregrad")
a.set_xlabel("Absolute Häufigkeit")
plt.tight_layout()
plt.show()
# relative
r = data.div(data.sum(axis=1), axis=0).plot.barh(stacked=True, color=color, figsize=(7.7, 3.5))
plt.legend(bbox_to_anchor=(1.05, 1),
loc=None,
borderaxespad=0.,
title="Schweregrad")
r.set_xlabel("Relative Häufigkeit")
plt.tight_layout()
plt.show()
def plot_experience(sample):
ax = sample.pre["vorkenntnisse"].replace("n. a.", "Keine Angabe").replace("spreadshirt.de", "Spreadshirt").replace("nein", "Keine").value_counts().sort_values().plot.barh(color="#555753", figsize=(6, 2.3))
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Vorkenntnisse")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.show()
def plot_gender(sample):
ax = sample.pre["geschlecht"].apply(lambda x: x.title()).value_counts().sort_values().plot.barh(color="#555753", figsize=(6, 2))
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Geschlecht")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.show()
def plot_education(sample):
ax = sample.pre["bildungsabschluss"].value_counts().sort_values().plot.barh(color="#555753", figsize=(7.7, 2.3))
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Höchster\nBildungsabschluss")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.show()
def plot_occupation(sample):
occupation = sample.pre["beschäftigung"].replace("MCS", "Student (Mensch-Computer-Systeme)")
occupation = occupation.replace("Mensch-Computer-Systeme", "Student (Mensch-Computer-Systeme)")
occupation = occupation.replace("Mensch-Computer-Systeme (Student)", "Student (Mensch-Computer-Systeme)")
occupation = occupation.replace("Chemie Bachelor", "Student (Chemie)")
occupation = occupation.replace("digital humanities", "Student (Digital Humanities)")
occupation = occupation.replace("Physik", "Student (Physik)")
occupation = occupation.replace("digital humanities", "Student (Digital Humanities)")
occupation = occupation.replace("Mensch-Computer-Systeme Student", "Student (Mensch-Computer-Systeme)")
occupation = occupation.replace("digital humanities".title(), "Student (Digital Humanities)")
occupation = occupation.replace("Student MCS", "Student (Mensch-Computer-Systeme)")
ax = occupation.value_counts().sort_values().plot.barh(color="#555753", figsize=(7.7, 3.5))
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Beschäftigung")
plt.tight_layout()
plt.show()
def plot_age(sample):
age = sample.pre["alter"].apply(lambda x: int(x))
age.name = "Alter"
ax = age.plot.box(vert=False,
figsize=(6, 2),
widths=.45,
color={"whiskers": "black", "boxes": "black", 'medians': '#D62728'},
medianprops={'linewidth': 2.8})
ax.set_xlabel("Alter, in Jahren")
ax.set_yticklabels("")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.show()
def plot_nasa_tlx(sample):
df = pd.DataFrame(columns=["Shirtinator", "Spreadshirt"])
df.loc[:, "Spreadshirt"] = sample.nasa[sample.nasa["website"] == "spreadshirt"]["score"].values
df.loc[:, "Shirtinator"] = sample.nasa[sample.nasa["website"] == "shirtinator"]["score"].values
ax = df.plot.box(vert=False,
figsize=(6, 2.5),
widths=[.45, .45],
color={"whiskers": "black", "boxes": "black", 'medians': '#D62728'},
medianprops={'linewidth': 2.8})
ax.set_xlabel("NASA-TLX Score")
plt.tight_layout()
plt.show()
def plot_quesi(sample):
df = pd.DataFrame(columns=["Shirtinator", "Spreadshirt"])
df.loc[:, "Spreadshirt"] = sample.quesi[sample.quesi["website"] == "spreadshirt"]["score"].values
df.loc[:, "Shirtinator"] = sample.quesi[sample.quesi["website"] == "shirtinator"]["score"].values
ax = df.plot.box(vert=False,
figsize=(6, 2.5),
widths=[.45, .45],
color={"whiskers": "black", "boxes": "black", 'medians': '#D62728'},
medianprops={'linewidth': 2.8})
ax.set_xlabel("QUESI Score")
plt.tight_layout()
plt.show()
def plot_feedback(sample):
df = pd.DataFrame(columns=["Shirtinator", "Spreadshirt"])
df.loc[:, "Spreadshirt"] = sample.quesi[sample.quesi["website"] == "spreadshirt"]["score"].values
df.loc[:, "Shirtinator"] = sample.quesi[sample.quesi["website"] == "shirtinator"]["score"].values
ax = df.plot.box(vert=False,
figsize=(6, 2.5),
widths=[.45, .45],
color={"whiskers": "black", "boxes": "black", 'medians': '#D62728'},
medianprops={'linewidth': 2.8})
ax.set_xlabel("positiv : negativ")
plt.tight_layout()
plt.show()
def plot_clicks(sample):
df = pd.DataFrame(columns=["Shirtinator", "Spreadshirt"])
df.loc[:, "Spreadshirt"] = sample.mouse[sample.mouse["website"] == "spreadshirt"]["clicks"].values
df.loc[:, "Shirtinator"] = sample.mouse[sample.mouse["website"] == "shirtinator"]["clicks"].values
ax = df.plot.box(vert=False,
figsize=(6, 2.5),
widths=[.45, .45],
color={"whiskers": "black", "boxes": "black", 'medians': '#D62728'},
medianprops={'linewidth': 2.8})
ax.set_xlabel("Absolute Häufigkiet")
plt.tight_layout()
plt.show()
def plot_choice(sample):
choice = sample.post.besser.value_counts()
choice.index = ["Shirtinator besser", "Spreadshirt besser", "Beide gleich gut"]
ax = choice.sort_values().plot.barh(color="#555753", figsize=(7.7, 2.3))
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Bewertung")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.show()
def plot_assistance(sample):
df = pd.DataFrame(columns=["Shirtinator", "Spreadshirt"])
df.loc[:, "Spreadshirt"] = sample.assistance[sample.assistance["website"] == "spreadshirt"]["n"].values
df.loc[:, "Shirtinator"] = sample.assistance[sample.assistance["website"] == "shirtinator"]["n"].values
assistance = pd.DataFrame({"Shirtinator": df["Shirtinator"].value_counts(),
"Spreadshirt": df["Spreadshirt"].value_counts()})
ax = assistance.plot.barh(figsize=(7.7, 2.4), color=("#D3D7CF", "grey"))
ax.set_xlabel("Absolute Häufigkeit")
ax.set_ylabel("Anzahl Hilfestellungen")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., title="Website")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.show()
def plot_mistakes(sample):
df = | pd.DataFrame(columns=["Shirtinator", "Spreadshirt"]) | pandas.DataFrame |
"""
.. module:: repeats
:synopsis: Repeats (transposon) related stuffs
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import csv
import subprocess
import os
import gzip
import glob
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
import uuid
import pandas as PD
import numpy as N
import matplotlib.pylab as P
from jgem import utils as UT
from jgem import fasta as FA
from jgem import filenames as FN
from jgem import bedtools as BT
from jgem import gtfgffbed as GGB
from jgem import assembler2 as A2
from jgem import assembler3 as A3
RMSKPARAMS = dict(
np = 4,
th_uexon=4,
th_bp_ovl=50,
th_ex_ovl=50,
datacode='',
gname='gname',
)
def filter_paths(mdstpre, rdstpre):
ex = UT.read_pandas(rdstpre+'.ex.txt.gz')
def select_chromwise(paths, ex):
npchrs = []
for chrom in paths['chr'].unique():
pchr = paths[paths['chr']==chrom]
echr = ex[ex['chr']==chrom]
exnames = set(echr['name'].values)
#e2gname = UT.df2dict(echr,'name','gname')
idx = [all([x in exnames for x in y.split('|')]) for y in pchr['name']]
npchrs.append(pchr[idx])
return PD.concat(npchrs, ignore_index=True)
paths = GGB.read_bed(mdstpre+'.paths.withse.bed.gz')
npaths = select_chromwise(paths, ex)
GGB.write_bed(npaths, rdstpre+'.paths.withse.bed.gz', ncols=12)
paths = GGB.read_bed(mdstpre+'.paths.txt.gz')
npaths = select_chromwise(paths, ex)
GGB.write_bed(npaths, rdstpre+'.paths.txt.gz', ncols=12)
def filter_sjexdf(mdstpre, rdstpre):
exdf = UT.read_pandas(mdstpre+'.exdf.txt.gz', names=A3.EXDFCOLS)
sedf = UT.read_pandas(mdstpre+'.sedf.txt.gz', names=A3.EXDFCOLS)
exdf = PD.concat([exdf, sedf], ignore_index=True)
sjdf = UT.read_pandas(mdstpre+'.sjdf.txt.gz', names=A3.SJDFCOLS)
ex = UT.read_pandas(rdstpre+'.ex.txt.gz')
sj = UT.read_pandas(rdstpre+'.sj.txt.gz')
def select_chromwise_df(exdf, ex):
npchrs = []
for chrom in exdf['chr'].unique():
pchr = exdf[exdf['chr']==chrom]
echr = ex[ex['chr']==chrom]
exnames = set(echr['name'].values)
idx = [x in exnames for x in pchr['name']]
npchrs.append(pchr[idx])
return | PD.concat(npchrs, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 25 10:11:28 2019
@author: yazdsous
"""
import numpy as np
import pyodbc
import pandas as pd
import datetime
#conn = pyodbc.connect('Driver={SQL Server};'
# 'Server=DSQL23CAP;'
# 'Database=Regulatory_Untrusted;'
# 'Trusted_Connection=yes;')
conn0 = pyodbc.connect('Driver={SQL Server};'
'Server=Hosaka\Sqlp2;'
'Database=Eforms;'
'Trusted_Connection=yes;')
conn1 = pyodbc.connect('Driver={SQL Server};'
'Server=ndb-a1;'
'Database=Core;'
'Trusted_Connection=yes;')
###############################################################################
#This function accepts the filigid of the application + the pyodbc object
#joins the Form and FormField tables from Eform DB (on FormId) and returns the
#corresponding dataframe with formId, Name, FilingIdm, ASPFieldIdName, and
#ASPFiledIdValue
#Output: A tuple with (Dataframe, FormId)
###############################################################################
def formfields_by_filingId(filingid:str, conn0) -> pd.DataFrame:
query = "SELECT f.[FormId]\
,f.[AddedOn]\
,[Name]\
,[FilingId]\
,[ASPFieldIdName]\
,[ASPFieldIdValue]\
FROM [Eforms].[dbo].[Form] f\
JOIN [FormField] ff\
ON f.FormId = ff.FormId\
WHERE FilingId IS NOT NULL AND [FilingId] = \'{}\'\
ORDER BY ff.FormId DESC".format(filingid)
df_filingid = pd.read_sql(query,conn0)
return df_filingid, df_filingid.FormId[0]
###############################################################################
###############################################################################
#This function accepts the FormId of the application + the pyodbc object
#and extracts the contact information filled by the applicant.
#Output: A dataframe with the information corresponding to each contact type
#for the application (with the FormId passed as the argument)
###############################################################################
def contact_info(filingid:str, conn0) -> pd.DataFrame:
query = "SELECT [ContactId]\
,ct.Name Contact_Type\
,ct.ContactTypeId\
,[FormId]\
,[FirstName]\
,[LastName]\
,[Salutation]\
,[Title]\
,[Organization]\
,[Email]\
,Country.Name Country\
,Province.Name Province\
,[Address]\
,[City]\
,[PostalCode]\
,[PhoneNumber]\
,[PhoneExt]\
,[FaxNumber]\
FROM [Eforms].[dbo].[Contact] c\
JOIN ContactType ct\
ON c.ContactTypeId = ct.ContactTypeId\
JOIN Country\
ON Country.CountryId = c.CountryId\
JOIN Province\
ON Province.ProvinceId = c.ProvinceId WHERE FormId = (SELECT FormId FROM [Eforms].[dbo].[Form] WHERE FilingId = \'{}\')".format(filingid)
df_fid = pd.read_sql(query,conn0)
return df_fid
###############################################################################
###############################################################################
#Input: FilingId of the application + the pyodbc object
#Output: A dataframe with the information in CORE corresponding to the apps
#joins of CORE tables and filtering by the FilingId
###############################################################################
def rts_by_filingid(filingid:str, conn1) -> pd.DataFrame:
query = "SELECT f.[FileId], f.[FileNumber],f.[RecordsTitle],f.[RecordsDescription],\
a.[ActivityId],a.[EnglishTitle],a.[FrenchTitle],a.[Description] ActivityDescription,\
a.[ApplicationDate],a.[ReceivedDate],a.[ExpectedCompletionDate],a.[InternalProjectFlag],\
a.[StatusId],a.[CompletedDate],a.[DeactivationDate] ActivityDeactivationDate,\
a.[LegacyProjectXKey],a.[LetterForCommentFlag],a.[LetterForCommentExpireDate],\
a.[BusinessUnitId],a.[FederalLandId],a.[DecisionOnCompleteness],a.[EnglishProjectShortName],\
a.[FrenchProjectShortName],a.[FrenchDescription] FrenchDescriptionOfActivity,\
aa.[ActivityAttachmentId] ,aa.[LivelinkCompoundDocumentId],\
be.[BoardEventId] ,be.[NextTimeToBoard] ,be.[PurposeId],be.[Description],\
be.[PrimaryContactId],be.[SecondaryContactId] ,\
di.[DecisionItemId],di.[DecisionItemStatusId],di.[RegulatoryInstrumentNumber],\
di.[IssuedDate],di.[EffectiveDate],di.[ExpireDate],di.[SunsetDate],\
di.[IssuedToNonRegulatedCompanyFlag],di.[LetterOnly],di.[Comments],di.[AddedBy],\
di.[AddedOn],di.[ModifiedBy],di.[ModifiedOn],di.[WalkaroundFolderId],\
di.[BoardDecisionDate],di.[ReasonForCancelling],di.[GicApproval],di.[SentToMinisterDate],\
di.[MinisterToPrivyCouncilOfficeDate],di.[PrivyCouncilOfficeApprovalNumber],\
di.[PrivyCouncilOfficeApprovalDate],di.[RegulatoryOfficerAssignedId],di.[IsNGLLicence],\
di.[FrenchComments],fc.[FileCompanyId] ,fc.[CompanyId],\
c.[CompanyId] CompanyIdC,c.[CompanyCode] ,c.[LegalName],c.[DeactivationDate],c.[IsGroup1]\
FROM [File] f\
JOIN [Activity] a\
ON f.FileId = a.FileId\
JOIN [ActivityAttachment] aa\
ON a.ActivityId = aa.ActivityId\
FULL JOIN [BoardEvent] be\
ON be.ActivityId = a.ActivityId\
FULL JOIN [DecisionItem] di\
ON be.BoardEventId = di.BoardEventId\
JOIN [FileCompany] fc \
ON fc.FileId = a.FileId\
JOIN [Company] c\
ON c.CompanyId = fc.CompanyId\
WHERE aa.LivelinkCompoundDocumentId = \'{}\'".format(filingid)
df_filingid = pd.read_sql(query,conn1)
return df_filingid, df_filingid.shape[0]
###############################################################################
###############################################################################
#A DATAFRAME is passed as the argument to this function. Input Dataframe is the
#output of function formfileds_by_filingId(...)
#Output: Commodity type (one commodity or a multiple commodities, depending on
# the application) and whether it is export or in the case of gas applications
#it is export or both
###############################################################################
def application_type(df:pd.DataFrame) -> str:
try:
#GAS
app_name = df.Name[0]
df_fields = df.loc[:,['ASPFieldIdName','ASPFieldIdValue']]
if app_name == 's15ab_ShrtTrmNtrlGs_ImprtExprt':
gas_import = df_fields.loc[df_fields['ASPFieldIdName'] == 'chkbx_s15ab_ShrtTrmNtrlGs_ImprtExprt_Athrztns_ImportOrder','ASPFieldIdValue'].values[0]
gas_export = df_fields.loc[df_fields['ASPFieldIdName'] == 'chkbx_s15ab_ShrtTrmNtrlGs_ImprtExprt_Athrztns_ExportOrder','ASPFieldIdValue'].values[0]
if all(map((lambda value: value == 'True'), (gas_import,gas_export))):
return 'gas','gas_export_import'
elif gas_import == 'False' and gas_export == 'True':
return 'gas','gas_export'
elif gas_import == 'True' and gas_export == 'False':
return 'gas','gas_import'
#NGL
elif app_name == 's22_ShrtTrmNgl_Exprt':
propane_export = df_fields.loc[df_fields['ASPFieldIdName'] == 'chkbx_s22_ShrtTrmNgl_Exprt_Athrztns_ProductType_Propane','ASPFieldIdValue'].values[0]
butanes_export = df_fields.loc[df_fields['ASPFieldIdName'] == 'chkbx_s22_ShrtTrmNgl_Exprt_Athrztns_ProductType_Butanes','ASPFieldIdValue'].values[0]
if all(map((lambda value: value == 'True'), (propane_export,butanes_export))):
return 'ngl','propane_butanes_export'
elif propane_export == 'False' and butanes_export == 'True':
return 'ngl','butanes_export'
elif propane_export == 'True' and butanes_export == 'False':
return 'ngl','propane_export'
#OIL
elif app_name == 's28_ShrtTrmLghtHvCrdRfnd_Exprt':
light_heavy_crude_export = df_fields.loc[df_fields['ASPFieldIdName'] == 'chkbx_s28_ShrtTrmLghtHvCrdRfnd_Exprt_Athrztns_HeavyCrude','ASPFieldIdValue'].values[0]
refined_products_export = df_fields.loc[df_fields['ASPFieldIdName'] == 'chkbx_s28_ShrtTrmLghtHvCrdRfnd_Exprt_Athrztns_RefinedProducts','ASPFieldIdValue'].values[0]
if all(map((lambda value: value == 'True'), (light_heavy_crude_export,refined_products_export))):
return 'oil','lightheavycrude_refinedproducts_export'
elif light_heavy_crude_export == 'False' and refined_products_export == 'True':
return 'oil','lightheavycrude_export'
elif light_heavy_crude_export == 'True' and refined_products_export == 'False':
return 'oil','refinedproducts_export'
elif app_name == 's28_ShrtTrmHvCrd_Exprt':
return 'oil','heavycrude_export'
else:
return 'this is not a gas, ngl, or oil order'
except ValueError:
return 'Value'
except TypeError:
return 'Type'
###############################################################################
# NOTE:
###############################################################################
#GasType -> 1 -> Natural Gas
#GasType -> 2 -> Natural Gas, in the form of Liquefied Natural Gas
#GasType -> 3 -> Natural Gas, in the form of Compressed Natural Gas
###############################################################################
#Input: Commodity name in english
#Output: Commodity name in French
###############################################################################
def comm_type_english_french(df:pd.DataFrame) -> list:
try:
if application_type(df)[0] == 'gas':
gas_en,gas_fr = str(),str()
gas_type = df.loc[df['ASPFieldIdName'] == 'rbl_s15ab_ShrtTrmNtrlGs_ImprtExprt_Athrztns_ExportOrder_GasType','ASPFieldIdValue'].values[0]
if gas_type == '2':
gas_en = 'natural gas, in the form of Liquefied Natural Gas'
gas_fr = 'gaz, sous la forme de gaz naturel liquéfié seulement'
elif gas_type == '3':
gas_en = 'natural gas, in the form of compressed natural gas'
gas_fr = 'gaz, sous la forme de gaz naturel comprimé'
return gas_en , gas_fr
if application_type(df)[0] == 'oil':
oil_en,oil_fr = str(),str()
oil_type = application_type(df)[1]
if oil_type == 'lightheavycrude_refinedproducts_export':
oil_en = 'light and heavy crude oil and pefined petroleum products'
oil_fr = 'pétrole brut léger et lourd et produits pétroliers raffinés'
elif oil_type == 'lightheavycrude_export':
oil_en = 'light and heavy crude oil'
oil_fr = 'pétrole brut léger et lourd'
elif oil_type == 'refinedproducts_export':
oil_en = 'refined petroleum products'
oil_fr = 'produits pétroliers raffinés'
elif oil_type == 'heavycrude_export':
oil_en = 'heavy crude oil'
oil_fr = 'pétrole brut lourd'
return oil_en , oil_fr
if application_type(df)[0] == 'ngl':
ngl_en,ngl_fr = str(),str()
return ngl_en , ngl_fr
else:
return ('other comms....')
exit
except ValueError:
return 'Value'
except TypeError:
return 'Type'
#**************************************************************************************************
# input: month of the year in English in full version
# output: French months
# This function converts English months to French
#**************************************************************************************************
def month_to_french(month):
fr_months = ['janvier','février','mars','avril','mai','juin','juillet','août','septembre','octobre','novembre','décembre']
switcher = {
"January": fr_months[0],
"February": fr_months[1],
"March": fr_months[2],
"April": fr_months[3],
"May": fr_months[4],
"June": fr_months[5],
"July": fr_months[6],
"August": fr_months[7],
"September": fr_months[8],
"October": fr_months[9],
"November": fr_months[10],
"December": fr_months[11],
}
# get() method of dictionary data type returns
# value of passed argument if it is present
# in dictionary otherwise second argument will
# be assigned as default value of passed argument
return switcher.get(month, "nothing")
#**************************************************************************************************
# input: Date in the form of XX Month(English) XXXX
# output: French version
# This function converts English date to French
#**************************************************************************************************
def date_french(date_en:str)-> str:
try:
return(date_en.split()[0]) + ' '+ month_to_french(date_en.split()[1]) + ' ' + str(date_en.split()[2])
except ValueError:
return 'Value'
except TypeError:
return 'Type'
except:
return 'Wrong date format'
#**************************************************************************************************
#Skip the Weekends
#refernce: https://stackoverflow.com/questions/12691551/add-n-business-days-to-a-given-date-ignoring-holidays-and-weekends-in-python/23352801
#**************************************************************************************************
def add_business_days(from_date, ndays):
business_days_to_add = abs(ndays)
current_date = from_date
sign = ndays/abs(ndays)
while business_days_to_add > 0:
current_date += datetime.timedelta(sign * 1)
weekday = current_date.weekday()
if weekday >= 5: # sunday = 6
continue
business_days_to_add -= 1
return current_date
###############################################################################
#Input: index[0] of output tuple function formfileds_by_filingId(...)
#Output: Order start and end date
###############################################################################
def commence_end_order_gas(ctype:str, df:pd.DataFrame) -> list:
export_order_commence_date = str()
export_order_termination_date = str()
import_order_commence_date =str()
import_order_termination_date = str()
export_order_commence_date_fr = str()
export_order_termination_date_fr = str()
import_order_commence_date_fr = str()
import_order_termination_date_fr = str()
dt = df.AddedOn[0].date()
application_date = dt.strftime("%d %B %Y")
try:
if ctype[0] == 'gas':
#For a period of two years less one day commencing upon approval of the Board
if df.loc[df['ASPFieldIdName'] == 'rbl_s15ab_ShrtTrmNtrlGs_ImprtExprt_Athrztns_ExportOrder_TimeFrame','ASPFieldIdValue'].values[0] == '1':
#commences the day after application received date
ex_order_commence_date = add_business_days(pd.to_datetime(application_date),2)
export_order_commence_date = ex_order_commence_date.strftime("%d %B %Y")
export_order_commence_date_fr = date_french(export_order_commence_date) if len(export_order_commence_date.split()) == 3 else 'NULL'
ex_order_termination_date = ex_order_commence_date + pd.DateOffset(years=2) - pd.DateOffset(days=1)
export_order_termination_date = ex_order_termination_date.strftime("%d %B %Y")
export_order_termination_date_fr = date_french(export_order_termination_date) if len(export_order_termination_date.split()) == 3 else 'NULL'
if df.loc[df['ASPFieldIdName'] == 'rbl_s15ab_ShrtTrmNtrlGs_ImprtExprt_Athrztns_ImportOrder_TimeFrame','ASPFieldIdValue'].values[0] == '1':
#commences the day after application received date
im_order_commence_date = add_business_days( | pd.to_datetime(application_date) | pandas.to_datetime |
# D:\Users\kozgen\PROJECT\log_channel.log file contains logs of discord server
# D:\Users\kozgen\PROJECT\bans-filtered.json file contains filtered logs of log_channel.log
# D:\Users\kozgen\PROJECT\welcome.log file contains member join logs of discord server
# Read files and import them to dataframes and visualize them
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json
import datetime
# print stacktrace, so import
import traceback
# Read bans-filtered.json file and convert it to a dataframe
print("Reading bans-filtered.json file...")
# 2 searial welcome log timestamp difference:
diff = 1300
# Threshold to trigger:
threshold = 15
bf = None
wf = None
bf_f = None
with open('D:\\Users\\kozgen\\PROJECT\\bans-filtered.json', encoding="utf8") as f:
#Also add welcome.log file
with open('D:\\Users\\kozgen\\PROJECT\\welcome.log', encoding="utf8") as f2:
try:
# File content is: {"1": {"channelId":"772827177804365885","guildId":"534402283149197319","deleted":false,"id":"923517752239620156","createdTimestamp":1640254190264,"type":"GUILD_MEMBER_JOIN","system":true,"content":"","authorId":"739610256614883439","pinned":false,"tts":false,"nonce":null,"embeds":[],"components":[],"attachments":[],"stickers":[],"editedTimestamp":null,"webhookId":null,"groupActivityApplicationId":null,"applicationId":null,"activity":null,"flags":0,"reference":null,"interaction":null,"cleanContent":""},
#"2": {"channelId":"772827177804365885","guildId":"534402283149197319","deleted":false,"id":"923495624203198485","createdTimestamp":1640248914529,"type":"GUILD_MEMBER_JOIN","system":true,"content":"","authorId":"8<PASSWORD>","pinned":false,"tts":false,"nonce":null,"embeds":[],"components":[],"attachments":[],"stickers":[],"editedTimestamp":null,"webhookId":null,"groupActivityApplicationId":null,"applicationId":null,"activity":null,"flags":0,"reference":null,"interaction":null,"cleanContent":""}}
# Load the json file into a dataframe
bf = pd.read_json(f, orient='index')
wf = pd.read_json(f2, orient='index')
# Columns are #Index(['channelId', 'guildId', 'deleted', 'id', 'createdTimestamp', 'type','system', 'content', 'authorId', 'pinned', 'tts', 'nonce', 'embeds','components', 'attachments', 'stickers', 'editedTimestamp', 'webhookId','groupActivityApplicationId', 'applicationId', 'activity', 'flags','reference', 'interaction', 'cleanContent'],dtype='object')
# Filter welcome logs by this algorithm: If there is more than threshold number of logs coming after each other after diff seconds, then add them to the filtered dataframe. createdTimestamp is in milliseconds.
bf_f = pd.DataFrame()
# Add a new column createdTimestamp_formatted to the dataframe. It is in format of MM/DD/YYYY.
bf['createdTimestamp_formatted'] = pd.to_datetime(bf['createdTimestamp'], unit='ms').dt.strftime('%m/%d/%Y')
# Print createdTimestamp_formatted column
wf['createdTimestamp_formatted'] = pd.to_datetime(wf['createdTimestamp'], unit='ms').dt.strftime('%m/%d/%Y')
print(bf['createdTimestamp_formatted'])
except Exception as e:
print("Error: " + str(e))
traceback.print_exc()
# Create a histogram and show createdTimestamp_formatted column with counts
plt.figure(figsize=(30,10))
plt.hist(bf['createdTimestamp_formatted'], bins=len(bf['createdTimestamp_formatted'].unique()))
plt.xticks(rotation=90)
plt.show()
# Create a histogram and show createdTimestamp_formatted
plt.figure(figsize=(30,10))
plt.hist(wf['createdTimestamp_formatted'], bins=len(wf['createdTimestamp_formatted'].unique()))
plt.xticks(wf['createdTimestamp_formatted'][::400], rotation=90)
plt.show()
# Print banned user count for each day
print(bf['createdTimestamp_formatted'].value_counts())
combined = pd.DataFrame()
# Add empty date, bans, and welcome logs columns to the combined dataframe
combined['date'] = ""
combined['bans'] = 0
combined['welcome'] = 0
# Visualize both dataframes together in a line chart
# For all unique dates in both dataframes, count the number of bans and welcome logs.
for i in wf['createdTimestamp_formatted'].unique():
# Insert row to combined dataframe. Date, banned count and welcome logs count.
combined.loc[len(combined)] = [i, bf[bf['createdTimestamp_formatted'] == i]['id'].count(), wf[wf['createdTimestamp_formatted'] == i]['id'].count()]
# Visualize combined dataframe in a stacked bar chart. X axis is date, Y axis is bans and welcome logs.
plt.figure(figsize=(30,10))
plt.bar(combined['date'], combined['bans'], color='red', label='Bans')
plt.bar(combined['date'], combined['welcome'], bottom=combined['bans'], color='green', label='Welcome logs')
plt.xticks(combined['date'][::20], rotation=90)
plt.legend()
plt.show()
# Create a new wf_f dataframe with same columns as wf dataframe.
wf_f = pd.DataFrame( columns=wf.columns )
# Print column names
print(wf_f.columns)
print(wf.columns)
current_date = ""
# Print 13th element of createdTimestamp_formatted column
# For all rows unique MM/DD/YYYY in createdTimestamp_formatted column filter 2 consecutive rows with same date. If there is more than threshold number of logs coming after each other after diff seconds, then add them to the filtered dataframe. createdTimestamp is in milliseconds.
for i in wf['createdTimestamp_formatted'].unique():
# Create a temporary dataframe to store the consecutive logs that come after each other.
tmp = pd.DataFrame( columns=wf.columns )
# For all rows in wf that have same createdTimestamp_formatted as i
for j in wf[wf['createdTimestamp_formatted'] == i].index:
# If this is the last row in wf, then break
if j == len(wf) - 1:
break
# If the difference between the current row and the next row is less than diff seconds, then add the next row to the list.
if abs(wf.iloc[j]['createdTimestamp'] - wf.iloc[j+1]['createdTimestamp']) < diff:
# Append elements to temporary dataframe
tmp.loc[len(tmp)] = wf.loc[j]
# If the length of the temporary dataframe is more than threshold number of logs, then add the temporary dataframe to the filtered dataframe.
if len(tmp) > threshold:
# Add the temporary dataframe to the filtered dataframe.
wf_f = wf_f.append(tmp)
# Print length of tmp for date i
print("Date: " + str(i) + " Length: " + str(len(tmp)))
# Create a new dataframe and store filtered frequency column with date information and actual frequency column and ban count for each date.
wf_f_f = pd.DataFrame( columns= ['date', 'frequency', 'filtered_frequency', 'bans'] )
# For all unique dates in wf_f dataframe, count the number of logs.
for i in wf_f['createdTimestamp_formatted'].unique():
# Insert row to wf_f_f dataframe. Date, frequency, filtered frequency and ban count.
frequency = wf[wf['createdTimestamp_formatted'] == i]['id'].count()
filtered_frequency = wf_f[wf_f['createdTimestamp_formatted'] == i]['id'].count()
bans = bf[bf['createdTimestamp_formatted'] == i]['id'].count()
wf_f_f.loc[len(wf_f_f)] = [i, frequency, filtered_frequency, bans]
wf_f_f.loc[len(wf_f_f)] = ['11/06/2021', wf[wf['createdTimestamp_formatted'] == '11/06/2021']['id'].count(), wf_f[wf_f['createdTimestamp_formatted'] == '11/06/2021']['id'].count(), bf[bf['createdTimestamp_formatted'] == '11/06/2021']['id'].count()]
try:
# Add 00:00:00 to the end of each date in date column
wf_f_f['date'] = wf_f_f['date'] + ' 00:00:00'
# Convert date column to datetime format then sort by date
wf_f_f['date'] = pd.to_datetime(wf_f_f['date'])
wf_f_f = wf_f_f.sort_values(by='date')
# Convert date column to string format as MM/DD/YYYY
wf_f_f['date'] = wf_f_f['date'].dt.strftime('%m/%d/%Y')
except Exception as e:
print("Error: " + str(e))
traceback.print_exc()
# Visualize as a bar chart. X axis is date, Y axis is filtered frequency, frequency and ban count. Write 1 to red, 2 to green and 3 to blue.
fig, ax = plt.subplots()
fig.set_size_inches(30,10)
x = np.arange(len(wf_f_f['date']))
width = 0.3
ax.bar(x - width, wf_f_f['filtered_frequency'], width, label='Filtered frequency', color='r')
ax.bar(x, wf_f_f['frequency'], width, label='Frequency', color='g')
ax.bar(x + width, wf_f_f['bans'], width, label='Bans', color='b')
ax.set_xticks(x)
ax.set_xticklabels(wf_f_f['date'], rotation=90)
ax.legend()
plt.show()
# A dataframe to store banned user names
banned_users = pd.DataFrame( columns= ['username', 'label'] )
# Get user ids usernames that have banned on 11/02/2021, 11/03/2021 and 11/06/2021.
# username is at "embeds":[{"type":"rich","title":null,"description":"<@373940551818412035> godxdchan#3351","url":null,"color":16729871,"timestamp":1633375294360,"fields":[],"image":null,"video":null,"provider":null}] godxdchan#3351 is username in this case and 373940551818412035 is user id.
try:
for bans in bf['createdTimestamp_formatted'].unique():
if bans == '11/02/2021' or bans == '11/03/2021' or bans == '11/06/2021':
# Convert createdTimestamp in milliseconds epoch to time and print id and username of users that have banned on 11/02/2021, 11/03/2021 and 11/06/2021.
desc = bf[bf['createdTimestamp_formatted'] == bans]['embeds'].apply(lambda x: x[0]['description'])
# Print Date user id and username from decription inside embeds "<@373940551818412035> godxdchan#3351" to 373940551818412035 godxdchan#3351
# Get description from embeds column
for i in desc:
# From "<@373940551818412035> godxdchan#3351" get 373940551818412035
id = i.split('@')[1].split('>')[0]
# Get username from description
name = i.split('>')[1].split('#')[0]
banned_users.loc[len(banned_users)] = [name, 1]
# Print user id and username
print(bans + " " + id + " " + banned_users.loc[len(banned_users)-1]['username'])
except Exception as e:
print("Error: " + str(e))
traceback.print_exc()
# Print 5 top and bottom banned users.
print(banned_users.sort_values(by='label', ascending=False).head(5))
print(banned_users.sort_values(by='label', ascending=True).head(5))
try:
all_users = pd.DataFrame( columns= ['username', 'label'] )
# Read allusers.txt from D:\Users\kozgen\PROJECT\allusers.txt
# File contents is like [{"id":"772805089035419678","username":"Yasin başar","discriminator":"3912","createdTimestamp":1604321491250,"avatarURL":null,"defaultAvatarURL":"https://cdn.discordapp.com/embed/avatars/2.png"},{"id":"324880771393519618","username":"Irresistable","discriminator":"0189","createdTimestamp":1497528011893,"avatarURL":"https://cdn.discordapp.com/avatars/324880771393519618/450ce5b6dcda02e993ca7f4f2bcfb0a2.webp","defaultAvatarURL":"https://cdn.discordapp.com/embed/avatars/4.png"}]
with open('D:\\Users\\kozgen\\PROJECT\\allusers.txt', 'r', encoding="utf8") as f:
# Read allusers.txt
data = json.load(f)
# Convert array to dataframe
df = pd.DataFrame(data)
# Check defaultAvatarURL and avatarURL and if they are not null and not equal print id and username
print("Finished reading allusers.txt")
# print(all_users)
except Exception as e:
print("Error: " + str(e))
traceback.print_exc()
# Print unique usernames from df
print(df['username'].unique())
# Import scikit-learn and use it to create the classifiers.
import sklearn
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import IsolationForest
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import VotingRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import IsolationForest
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.ensemble import StackingRegressor
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import AdaBoostClassifier
from sklearn import preprocessing
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
from sklearn.svm import SVC
from IPython.display import Javascript
# Print size of df
print(len(df))
# Create a training set.
# We will create a training set by creating a dataframe with user names. Add a label column to the dataframe.
train_users = pd.DataFrame( columns= ['username', 'label'] )
# Create a test set with remaining user names.
test_users = pd.DataFrame( columns= ['username', 'label'] )
regular_ones = pd.DataFrame( columns= ['username', 'label'] )
irregular_ones = pd.DataFrame( columns= ['username', 'label'] )
# Add user names from pd dataframe to all_users dataframe with label 0.
for i in df['username'].unique():
# Insert userid to all_users dataframe
regular_ones.loc[len(regular_ones)] = [i, 0]
# Add banned user names from bf dataframe to all_users dataframe with label 1.
for i in banned_users['username'].unique():
# Insert userid to all_users dataframe
irregular_ones.loc[len(irregular_ones)] = [i, 1]
try:
# Print length of all_users dataframe
print(len(all_users))
# Print length of irregular_ones dataframe
#print("Irregular ones: " + str(len(irregular_ones)))
rat_increase = 0.05
rat = 0.05
mispreds_df = | pd.DataFrame(columns=['classifier','array', 'training_ratio', 'array_size'] ) | pandas.DataFrame |
from flask import Flask, render_template, url_for, request, redirect
import pickle
import math
import pandas as pd
import numpy as np
app = Flask(__name__)
predicted_score = None
@app.route('/')
def index():
return render_template('index.html', score=predicted_score)
@app.route('/predict', methods=['GET', 'POST'])
def prediction():
with open('models/model.pkl' , 'rb') as f:
lr = pickle.load(f)
global predicted_score
if request.method == 'POST':
req = request.form
predict = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import warnings
from numpy import cumsum, log, polyfit, sqrt, std, subtract
from datetime import datetime, timedelta
import scipy.stats as st
import statsmodels.api as sm
import math
import matplotlib
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.stats import norm
from scipy import poly1d
warnings.simplefilter(action='ignore', category=Warning)
import plotly.express as px
import plotly.graph_objects as go
import scipy.stats as stats
from pandas.tseries.offsets import BDay
from plotly.subplots import make_subplots
matplotlib.rcParams['figure.figsize'] = (25.0, 15.0)
matplotlib.style.use('ggplot')
pd.set_option('display.float_format', lambda x: '%.4f' % x)
import plotly.io as pio
from numpy import median, mean
pio.templates.default = "plotly_white"
from functools import reduce
class Broker():
def __init__(self,
instrument=None,
strategy_obj=None,
min_tick_increment = 0.0001,
tick_value = 4.2,
entry_slippage_ticks = 12,
exit_slippage_ticks = 12,
default_lot_per_trade = 1,
use_default_lot_size = True,
trading_stop_day = 28,
overnight = True,
transaction_cost = 1,
pass_history = 1,
bid_data = None,
ask_data = None,
classifier = None,
classifier_type = 'keras'):
self.instrument = instrument
self.bid_data = bid_data
self.ask_data = ask_data
self.min_tick_increment = min_tick_increment
self.tick_value = tick_value
self.entry_slippage_ticks = entry_slippage_ticks
self.exit_slippage_ticks = exit_slippage_ticks
self.strategy_obj = strategy_obj
self.trading_stop_day = trading_stop_day
self.overnight = overnight
self.transaction_cost = transaction_cost
self.pass_history = pass_history
self.classifier = classifier
self.classifier_type = classifier_type
self.entry_price = None
self.exit_price = None
self.stop_price = None
self.target_price = None
self.position = 0
self.pnl = 0
self.lot_size = 0
self.default_lot_size = default_lot_per_trade
self.use_default_lot_size = use_default_lot_size
self.entry_bid_open = None
self.entry_bid_high = None
self.entry_bid_low = None
self.entry_bid_close = None
self.entry_bid_volume = None
self.exit_bid_open = None
self.exit_bid_high = None
self.exit_bid_low = None
self.exit_bid_close = None
self.exit_bid_volume = None
self.entry_ask_open = None
self.entry_ask_high = None
self.entry_ask_low = None
self.entry_ask_close = None
self.entry_ask_volume = None
self.exit_ask_open = None
self.exit_ask_high = None
self.exit_ask_low = None
self.exit_ask_close = None
self.exit_ask_volume = None
self.cumulative_pnl_array = []
self.pnl_array = []
self.cumulative_pnl = 0
self.trade_id = -1
self.TSL_logs = None
self.TSL_time_logs = None
self.trade_type = None
self.entry_time = None
self.exit_time = None
self.exit_type = None
self.max_adverse_excursion = None
self.max_favor_excursion = None
self.tradeLog = pd.DataFrame(columns=['Trade ID',
'Trade Type',
'Entry Bid Params',
'Entry Ask Params',
'Entry Time',
'Entry Price',
'Lots',
'Target Price',
'TSL',
'TSL time',
'Stop Price',
'Exit Bid Params',
'Exit Ask Params',
'Exit Time',
'Exit Price',
'PNL',
'Holding Time',
'Exit Type',
'Transaction Cost',
'MFE',
'MAE',
'Entry Efficiency',
'Exit Efficiency',
'Total Efficiency',
'ETD'
])
def tradeExit(self):
self.tradeLog.loc[self.trade_id, 'Trade ID'] = self.trade_id
self.tradeLog.loc[self.trade_id, 'Trade Type'] = self.trade_type
self.tradeLog.loc[self.trade_id, 'Entry Bid Params'] = (round(self.entry_bid_open,4), round(self.entry_bid_high,4), round(self.entry_bid_low,4), round(self.entry_bid_close,4), self.entry_bid_volume)
self.tradeLog.loc[self.trade_id, 'Entry Ask Params'] = (round(self.entry_ask_open,4), round(self.entry_ask_high,4), round(self.entry_ask_low,4), round(self.entry_ask_close,4), self.entry_ask_volume)
self.tradeLog.loc[self.trade_id, 'Entry Time'] = pd.to_datetime(self.entry_time, infer_datetime_format= True)
self.tradeLog.loc[self.trade_id, 'Entry Price'] = self.entry_price
self.tradeLog.loc[self.trade_id, 'Lots'] = self.lot_size
self.tradeLog.loc[self.trade_id, 'Target Price'] = self.target_price
self.tradeLog.loc[self.trade_id, 'TSL'] = self.TSL_logs
self.tradeLog.loc[self.trade_id, 'TSL time'] = self.TSL_time_logs
self.tradeLog.loc[self.trade_id, 'Stop Price'] = self.stop_price
self.tradeLog.loc[self.trade_id, 'Exit Bid Params'] = (round(self.exit_bid_open,4), round(self.exit_bid_high,4), round(self.exit_bid_low,4), round(self.exit_bid_close,4), self.exit_bid_volume)
self.tradeLog.loc[self.trade_id, 'Exit Ask Params'] = (round(self.exit_ask_open,4), round(self.exit_ask_high,4), round(self.exit_ask_low,4), round(self.exit_ask_close,4), self.exit_ask_volume)
self.tradeLog.loc[self.trade_id, 'Exit Time'] = pd.to_datetime(self.exit_time, infer_datetime_format= True)
self.tradeLog.loc[self.trade_id, 'Exit Price'] = self.exit_price
self.tradeLog.loc[self.trade_id, 'PNL'] = self.pnl - (self.transaction_cost * self.lot_size)
self.tradeLog.loc[self.trade_id, 'Holding Time'] = (self.exit_time - self.entry_time)
self.tradeLog.loc[self.trade_id, 'Exit Type'] = self.exit_type
self.tradeLog.loc[self.trade_id, 'Transaction Cost'] = self.transaction_cost * self.lot_size
if self.max_favor_excursion is not None:
self.tradeLog.loc[self.trade_id, 'MFE'] = abs(self.max_favor_excursion / self.min_tick_increment)
elif self.max_favor_excursion is None:
self.tradeLog.loc[self.trade_id, 'MFE'] = 0
if self.max_adverse_excursion is not None:
self.tradeLog.loc[self.trade_id, 'MAE'] = abs(self.max_adverse_excursion / self.min_tick_increment)
elif self.max_adverse_excursion is None:
self.tradeLog.loc[self.trade_id, 'MAE'] = 0
if self.max_favor_excursion is not None and self.max_adverse_excursion is not None and self.exit_price is not None:
movement_range = (self.max_favor_excursion + self.max_adverse_excursion)
if self.trade_type == 1:
minimum_price_seen = self.entry_price - self.max_adverse_excursion
maximum_price_seen = self.entry_price + self.max_favor_excursion
if self.trade_type == -1:
minimum_price_seen = self.entry_price + self.max_adverse_excursion
maximum_price_seen = self.entry_price - self.max_favor_excursion
self.tradeLog.loc[self.trade_id, 'Entry Efficiency'] = abs((maximum_price_seen- self.entry_price)/(movement_range))*100
self.tradeLog.loc[self.trade_id, 'Exit Efficiency'] = abs((self.exit_price - minimum_price_seen)/(movement_range))*100
self.tradeLog.loc[self.trade_id, 'ETD'] = abs(self.max_favor_excursion - abs(self.entry_price-self.exit_price))*self.min_tick_increment
if self.trade_type == 1:
self.tradeLog.loc[self.trade_id, 'Total Efficiency'] = abs((self.exit_price - self.entry_price)/(movement_range))*100
elif self.trade_type == -1:
self.tradeLog.loc[self.trade_id, 'Total Efficiency'] = abs((self.entry_price - self.exit_price)/(movement_range))*100
elif self.max_favor_excursion is None or self.max_adverse_excursion is None or self.exit_price is None:
self.tradeLog.loc[self.trade_id, 'Entry Efficiency'] = 0
self.tradeLog.loc[self.trade_id, 'Exit Efficiency'] = 0
self.tradeLog.loc[self.trade_id, 'Total Efficiency'] = 0
def testerAlgo(self):
def takeEntry():
current_month = self.bid_data.index[i].month
current_day_of_month = self.bid_data.index[i].day
if self.classifier_type=='keras':
if len(self.tradeLog) > 5:
secondary_df = self.tradeLog
temp_tradelog = pd.DataFrame()
temp_tradelog['PNL'] = secondary_df['PNL']
temp_tradelog['Trade Type'] = secondary_df['Trade Type']
temp_tradelog['Month'] = pd.to_datetime(secondary_df['Entry Time']).dt.month
temp_tradelog['Entry Hour'] = | pd.to_datetime(secondary_df['Entry Time']) | pandas.to_datetime |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp(
'2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00',
tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp(
'2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(value=5), exp)
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = | Series([999, np.nan, np.nan], index=[0, 1, 2]) | pandas.Series |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import mars.dataframe as md
import mars.tensor as mt
from mars.tests.core import require_cudf, require_cupy
from mars.utils import lazy_import
cupy = lazy_import('cupy', globals=globals())
cudf = lazy_import('cudf', globals=globals())
def test_dataframe_initializer(setup):
# from tensor
raw = np.random.rand(100, 10)
tensor = mt.tensor(raw, chunk_size=7)
r = md.DataFrame(tensor)
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, pd.DataFrame(raw))
r = md.DataFrame(tensor, chunk_size=13)
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, pd.DataFrame(raw))
# from Mars dataframe
raw = pd.DataFrame(np.random.rand(100, 10), columns=list('ABCDEFGHIJ'))
df = md.DataFrame(raw, chunk_size=15) * 2
r = md.DataFrame(df, num_partitions=11)
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, raw * 2)
# from tileable dict
raw_dict = {
'C': np.random.choice(['u', 'v', 'w'], size=(100,)),
'A': pd.Series(np.random.rand(100)),
'B': np.random.randint(0, 10, size=(100,)),
}
m_dict = raw_dict.copy()
m_dict['A'] = md.Series(m_dict['A'])
m_dict['B'] = mt.tensor(m_dict['B'])
r = md.DataFrame(m_dict, columns=list('ABC'))
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, pd.DataFrame(raw_dict, columns=list('ABC')))
# from tileable list
raw_list = [
np.random.choice(['u', 'v', 'w'], size=(3,)),
pd.Series(np.random.rand(3)),
np.random.randint(0, 10, size=(3,))
]
m_list = raw_list.copy()
m_list[1] = md.Series(m_list[1])
m_list[2] = mt.tensor(m_list[2])
r = md.DataFrame(m_list, columns=list('ABC'))
result = r.execute(extra_config={'check_dtypes': False}).fetch()
pd.testing.assert_frame_equal(result, pd.DataFrame(raw_list, columns=list('ABC')))
# from raw pandas initializer
raw = pd.DataFrame(np.random.rand(100, 10), columns=list('ABCDEFGHIJ'))
r = md.DataFrame(raw, num_partitions=10)
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, raw)
# from mars series
raw_s = np.random.rand(100)
s = md.Series(raw_s, chunk_size=20)
r = md.DataFrame(s, num_partitions=10)
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, pd.DataFrame(raw_s))
# test check instance
r = r * 2
assert isinstance(r, md.DataFrame)
@require_cudf
@require_cupy
def test_dataframe_gpu_initializer(setup_gpu):
# from raw cudf initializer
raw = cudf.DataFrame(cupy.random.rand(100, 10), columns=list('ABCDEFGHIJ'))
r = md.DataFrame(raw, chunk_size=13)
result = r.execute().fetch()
pd.testing.assert_frame_equal(result.to_pandas(), raw.to_pandas())
raw = cupy.random.rand(100, 10)
r = md.DataFrame(raw, columns=list('ABCDEFGHIJ'), chunk_size=13)
result = r.execute().fetch()
expected = cudf.DataFrame(raw, columns=list('ABCDEFGHIJ'))
pd.testing.assert_frame_equal(result.to_pandas(), expected.to_pandas())
def test_series_initializer(setup):
# from tensor
raw = np.random.rand(100)
tensor = mt.tensor(raw, chunk_size=7)
r = md.Series(tensor)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.Series(raw))
r = md.Series(tensor, chunk_size=13)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, | pd.Series(raw) | pandas.Series |
import scanpy as sc
import pandas as pd
import numpy as np
import scipy
import os
from anndata import AnnData,read_csv,read_text,read_mtx
from scipy.sparse import issparse
def prefilter_cells(adata,min_counts=None,max_counts=None,min_genes=200,max_genes=None):
if min_genes is None and min_counts is None and max_genes is None and max_counts is None:
raise ValueError('Provide one of min_counts, min_genes, max_counts or max_genes.')
id_tmp=np.asarray([True]*adata.shape[0],dtype=bool)
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,min_genes=min_genes)[0]) if min_genes is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,max_genes=max_genes)[0]) if max_genes is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,min_counts=min_counts)[0]) if min_counts is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,max_counts=max_counts)[0]) if max_counts is not None else id_tmp
adata._inplace_subset_obs(id_tmp)
adata.raw=sc.pp.log1p(adata,copy=True) #check the rowname
print("the var_names of adata.raw: adata.raw.var_names.is_unique=:",adata.raw.var_names.is_unique)
def prefilter_genes(adata,min_counts=None,max_counts=None,min_cells=10,max_cells=None):
if min_cells is None and min_counts is None and max_cells is None and max_counts is None:
raise ValueError('Provide one of min_counts, min_genes, max_counts or max_genes.')
id_tmp=np.asarray([True]*adata.shape[1],dtype=bool)
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,min_cells=min_cells)[0]) if min_cells is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,max_cells=max_cells)[0]) if max_cells is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,min_counts=min_counts)[0]) if min_counts is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,max_counts=max_counts)[0]) if max_counts is not None else id_tmp
adata._inplace_subset_var(id_tmp)
def prefilter_specialgenes(adata,Gene1Pattern="ERCC",Gene2Pattern="MT-"):
id_tmp1=np.asarray([not str(name).startswith(Gene1Pattern) for name in adata.var_names],dtype=bool)
id_tmp2=np.asarray([not str(name).startswith(Gene2Pattern) for name in adata.var_names],dtype=bool)
id_tmp=np.logical_and(id_tmp1,id_tmp2)
adata._inplace_subset_var(id_tmp)
def relative_func(expres):
#expres: an array counts expression for a gene
maxd = np.max(expres) - np.min(expres)
min_exp=np.min(expres)
rexpr = (expres - min_exp)/maxd
return rexpr
def plot_relative_exp(input_adata, gene, x_name, y_name,color,use_raw=False, spot_size=200000):
adata=input_adata.copy()
if use_raw:
X=adata.raw.X
else:
X=adata.X
if issparse(X):
X=pd.DataFrame(X.A)
else:
X=pd.DataFrame(X)
X.index=adata.obs.index
X.columns=adata.var.index
rexpr=relative_func(X.loc[:,gene])
adata.obs["rexpr"]=rexpr
fig=sc.pl.scatter(adata,x=x_name,y=y_name,color="rexpr",title=gene+"_rexpr",color_map=color,show=False,size=spot_size/adata.shape[0])
return fig
def plot_log_exp(input_adata, gene, x_name, y_name,color,use_raw=False):
adata=input_adata.copy()
if use_raw:
X=adata.X
else:
X=adata.raw.X
if issparse(X):
X=pd.DataFrame(X.A)
else:
X=pd.DataFrame(X)
X.index=adata.obs.index
X.columns=adata.var.index
adata.obs["log"]=np.log((X.loc[:,gene]+1).tolist())
fig=sc.pl.scatter(adata,x=x_name,y=y_name,color="log",title=gene+"_log",color_map=color,show=False,size=200000/adata.shape[0])
return fig
def refine_clusters(pred, resize_height, resize_width, threshold, radius):
pixel_num=pd.Series(pred).value_counts()
clusters=pixel_num.index.tolist()
reorder_map={}
for i in range(pixel_num.shape[0]):
reorder_map[clusters[i]]=i
pred_reordered= | pd.Series(pred) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.