prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Tests for the sdv.constraints.tabular module."""
import uuid
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
assert instance._high_is_scalar is None
assert instance._low_is_scalar is None
assert instance._drop is None
def test___init___all_parameters_passed(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
- drop = 'high'
- high_is_scalar = True
- low_is_scalar = False
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
- instance._drop = 'high'
- instance._high_is_scalar = True
- instance._low_is_scalar = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True, drop='high',
high_is_scalar=True, low_is_scalar=False)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop == 'high'
def test_fit__low_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a scalar if ``_low_is_scalar`` is None.
Input:
- Table without ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is True
def test_fit__low_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a column name if ``_low_is_scalar`` is None.
Input:
- Table with ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is False
def test_fit__high_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a scalar if ``_high_is_scalar`` is None.
Input:
- Table without ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is True
def test_fit__high_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a column name if ``_high_is_scalar`` is None.
Input:
- Table with ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is False
def test_fit__high_is_scalar__low_is_scalar_raises_error(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should raise an error if
`_low_is_scalar` and `_high_is_scalar` are true.
Input:
- Table with one column.
Side Effect:
- ``TypeError`` is raised.
"""
# Setup
instance = GreaterThan(low=1, high=2)
# Run / Asserts
table_data = pd.DataFrame({'a': [1, 2, 3]})
with pytest.raises(TypeError):
instance.fit(table_data)
def test_fit__column_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__column_to_reconstruct_default(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `low` if ``instance._high_is_scalar`` is ``True``.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__diff_column_one_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, high_is_scalar=True)
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#'
def test_fit__diff_column_multiple_columns(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#b'
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': | pd.to_datetime(['2020-01-02']) | pandas.to_datetime |
import json
import logging
import timeit
import numpy as np
import pandas as pd
from .mapping import Map
from .mappingprofile import Map_Profile
from juneau.utils.utils import jaccard_similarity
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
registered_attribute = ['last_name', 'first_name', 'full_name', 'gender', 'age', 'email', 'ssn', 'address']
class SchemaMapping(Map):
def __init__(self, sim_thres = 0.5):
self.sim_thres = sim_thres
self.map_class_profile = Map_Profile()
def load_index(self):
if self.map_class_profile:
self.map_class_profile.loading_index()
def dump_index(self):
if self.map_class_profile:
self.map_class_profile.dump_index()
def mapping_naive(self, tableA, tableB, mapped={}):
Mpair = mapped
MpairR = {}
for i in Mpair.keys():
MpairR[Mpair[i]] = i
matching = self.mapping(tableA, tableB, Mpair, MpairR)
for i in range(len(matching)):
if matching[i][2] < self.sim_thres:
break
else:
if matching[i][0] not in Mpair and matching[i][1] not in MpairR:
Mpair[matching[i][0]] = matching[i][1]
MpairR[matching[i][1]] = matching[i][0]
if len(matching) == 0:
rv = 0
else:
rv = matching[0][2]
return Mpair, rv
# Do full schema mapping
def mapping_naive_incremental(self, tableA, tableB, gid, meta_mapping, schema_linking, unmatched, mapped = {}):
start_time = timeit.default_timer()
time1 = 0
Mpair = mapped
MpairR = {}
for i in Mpair.keys():
MpairR[Mpair[i]] = i
matching = []
t_mapping = {}
for i in tableA.columns.tolist():
if i in Mpair:
continue
if i not in meta_mapping[gid]:
continue
t_mapping[schema_linking[gid][meta_mapping[gid][i]]] = i
for i in tableB.columns.tolist():
if i in MpairR:
continue
if schema_linking[gid][i] in t_mapping:
if tableB[i].dtype != tableA[t_mapping[schema_linking[gid][i]]].dtype:
continue
Mpair[t_mapping[schema_linking[gid][i]]] = i
MpairR[i] = t_mapping[schema_linking[gid][i]]
scma = tableA.columns.tolist()
scmb = tableB.columns.tolist()
shmal = len(scma)
shmbl = len(scmb)
acol_set = {}
for i in range(shmal):
nameA = scma[i]
if nameA in Mpair:
continue
if nameA == "Unnamed: 0" or "index" in nameA:
continue
if nameA not in acol_set:
colA = tableA[scma[i]][~pd.isnull(tableA[scma[i]])].values
acol_set[nameA] = list(set(colA))
else:
colA = acol_set[nameA]
for j in range(shmbl):
nameB = scmb[j] # .split('_')[0].lower()
if nameB in MpairR:
continue
if nameB == "Unnamed: 0" or "index" in nameB:
continue
if tableA[nameA].dtype != tableB[nameB].dtype:
continue
if nameB in unmatched[gid][nameA]:
continue
colB = tableB[scmb[j]][~pd.isnull(tableB[scmb[j]])].values
try:
colB = colB[~np.isnan(colB)]
except:
try:
colB = colB[colB != np.array(None)]
except:
colB = colB
s1 = timeit.default_timer()
sim_col = jaccard_similarity(colA, colB)
e1 = timeit.default_timer()
time1 += e1 - s1
if sim_col < self.sim_thres:
unmatched[gid][nameA][nameB] = ''
matching.append((nameA, nameB, sim_col))
matching = sorted(matching, key=lambda d: d[2], reverse=True)
for i in range(len(matching)):
if matching[i][2] < self.sim_thres:
break
else:
if matching[i][0] not in Mpair and matching[i][1] not in MpairR:
Mpair[matching[i][0]] = matching[i][1]
MpairR[matching[i][1]] = matching[i][0]
for i in tableA.columns.tolist():
if i in Mpair:
if i not in meta_mapping[gid]:
meta_mapping[gid][i] = Mpair[i]
for j in tableB.columns.tolist():
if j != Mpair[i]:
unmatched[gid][i][j] = ''
end_time = timeit.default_timer()
time_total = end_time - start_time
#print('full schema mapping: ', time_total)
return Mpair, meta_mapping, unmatched, time_total
# Do schema mapping for tables when looking for similar tables
def mapping_naive_tables(self, tableA, valid_keys, schema_element, schema_dtype, tflag = False):
start_time = timeit.default_timer()
time1 = 0
Mpair = {}
MpairR = {}
scma = tableA.columns.values
shmal = len(scma)
acol_set = {}
for group in schema_element.keys():
Mpair[group] = {}
MpairR[group] = {}
matching = []
for i in range(shmal):
nameA = scma[i]
if nameA == "Unnamed: 0" or "index" in nameA:
continue
if nameA not in valid_keys:
continue
if nameA not in acol_set:
colA = tableA[scma[i]][~pd.isnull(tableA[scma[i]])].values
acol_set[nameA] = list(set(colA))
else:
colA = acol_set[nameA]
for j in schema_element[group].keys():
nameB = j
if nameB == "Unnamed: 0" or "index" in nameB:
continue
colB = np.array(schema_element[group][nameB])
if schema_dtype[group][j] is not tableA[nameA].dtype:
continue
try:
colB = colB[~np.isnan(colB)]
except:
try:
colB = colB[colB != np.array(None)]
except:
colB = colB
s1 = timeit.default_timer()
sim_col = jaccard_similarity(colA, colB)
e1 = timeit.default_timer()
time1 += e1 - s1
matching.append((nameA, nameB, sim_col))
matching = sorted(matching, key=lambda d: d[2], reverse=True)
for i in range(len(matching)):
if matching[i][2] < self.sim_thres:
break
else:
if matching[i][0] not in Mpair[group] and matching[i][1] not in MpairR[group]:
Mpair[group][matching[i][0]] = matching[i][1]
MpairR[group][matching[i][1]] = matching[i][0]
end_time = timeit.default_timer()
if tflag:
print('Schema Mapping Before Search: %s Seconds.'%(end_time - start_time))
return Mpair
# Do schema mapping for tables when looking for joinable tables
def mapping_naive_tables_join(self, tableA, valid_keys, schema_element_sample, schema_element, schema_dtype, unmatched, tflag = False):
start_time = timeit.default_timer()
time1 = 0
Mpair = {}
MpairR = {}
scma = tableA.columns.values
shmal = len(scma)
acol_set = {}
for group in schema_element.keys():
Mpair[group] = {}
MpairR[group] = {}
matching = []
for i in range(shmal):
nameA = scma[i]
if nameA == "Unnamed: 0" or "index" in nameA:
continue
if nameA not in valid_keys:
continue
if nameA not in acol_set:
A_index = ~pd.isnull(tableA[nameA])
colA = (tableA[nameA][A_index]).values
acol_set[nameA] = list(set(colA))
else:
colA = acol_set[nameA]
for j in schema_element[group].keys():
nameB = j
if nameB == "Unnamed: 0" or "index" in nameB:
continue
if schema_dtype[group][j] is not tableA[nameA].dtype:
continue
colB = np.array(schema_element[group][nameB])
try:
colB = colB[~np.isnan(colB)]
except:
try:
colB = colB[colB != np.array(None)]
except:
colB = colB
s1 = timeit.default_timer()
try:
sim_col = jaccard_similarity(colA, colB)
except:
print(colA)
print(colB)
if sim_col < self.sim_thres:
unmatched[group][nameA][nameB] = ''
e1 = timeit.default_timer()
time1 += e1 - s1
matching.append((nameA, nameB, sim_col))
for i in schema_element_sample[group].keys():
nameB = i
if nameB == "Unnamed: 0" or "index" in nameB:
continue
colB = np.array(schema_element_sample[group][nameB])
try:
colB = colB[~np.isnan(colB)]
except:
try:
colB = colB[colB != np.array(None)]
except:
colB = colB
for j in range(shmal):
nameA = scma[j]
if nameA == "Unnamed: 0" or "index" in nameA:
continue
if nameB in unmatched[group][nameA]:
continue
if nameA not in acol_set:
colA = tableA[nameA][~pd.isnull(tableA[nameA])].values
acol_set[nameA] = list(set(colA))
else:
colA = acol_set[nameA]
if schema_dtype[group][nameB] is not tableA[nameA].dtype:
continue
s1 = timeit.default_timer()
sim_col = jaccard_similarity(colA, colB)
e1 = timeit.default_timer()
time1 += e1 - s1
if sim_col < self.sim_thres:
unmatched[group][nameA][nameB] = ''
matching.append((nameA, nameB, sim_col))
matching = sorted(matching, key=lambda d: d[2], reverse=True)
for i in range(len(matching)):
if matching[i][2] < self.sim_thres:
break
else:
if matching[i][0] not in Mpair[group] and matching[i][1] not in MpairR[group]:
Mpair[group][matching[i][0]] = matching[i][1]
MpairR[group][matching[i][1]] = matching[i][0]
end_time = timeit.default_timer()
if tflag:
print('raw schema mapping: ', end_time - start_time)
print('sim schema mapping: ', time1)
return Mpair, unmatched
# Do schema mapping on Groups
def mapping_naive_groups(self, tableA, tableA_valid, schema_element):
start_time = timeit.default_timer()
time1 = 0
Mpair = {}
MpairR = {}
scma = tableA.columns.values
shmal = len(scma)
acol_set = {}
group_list = []
for group in schema_element.keys():
Mpair[group] = {}
MpairR[group] = {}
matching = []
for i in range(shmal):
nameA = scma[i]
if nameA not in tableA_valid:
continue
if nameA == "Unnamed: 0" or "index" in nameA:
continue
colA = tableA[scma[i]][~pd.isnull(tableA[scma[i]])].values
if nameA not in acol_set:
acol_set[nameA] = list(set(colA))
#try:
# colA = colA[~np.isnan(colA)]
#except:
# try:
# colA = colA[colA != np.array(None)]
# except:
# colA = colA
for j in schema_element[group].keys():
nameB = j
colB = np.array(schema_element[group][nameB])
try:
colB = colB[~np.isnan(colB)]
except:
try:
colB = colB[colB != np.array(None)]
except:
colB = colB
s1 = timeit.default_timer()
sim_col = jaccard_similarity(acol_set[nameA], colB)
e1 = timeit.default_timer()
time1 += e1 - s1
#c1 += 1
matching.append((nameA, nameB, sim_col))
matching = sorted(matching, key=lambda d: d[2], reverse=True)
if len(matching) == 0:
continue
if matching[0][2] < self.sim_thres:
continue
else:
group_list.append(group)
end_time = timeit.default_timer()
return group_list
def mapping_to_columns_index2(self, tableA, tableAname, thres):
tableA = tableA.head(1000)
mapped_pairs = self.map_class_profile.compute_candidate_pairs_index(tableA, thres)
logging.info("matching paris: ")
logging.info(mapped_pairs)
return_mapp_keys = {}
rev_return_mapp_keys = {}
for i, j, k in mapped_pairs:
if i not in return_mapp_keys and j not in rev_return_mapp_keys:
return_mapp_keys[i] = j
rev_return_mapp_keys[j] = i
for colA in tableA.columns.tolist():
if colA not in return_mapp_keys:
if tableA[colA].dtype == object:
if len(set(tableA[colA].dropna().values)) > 20:
return_mapp_keys[colA] = colA
#logging.info(self.bigtable.keys())
self.map_class_profile.update_cached_columns2(return_mapp_keys, tableA, tableAname)
#logging.info("update cache")
#logging.info(self.bigtable.keys())
#logging.info("Update Profile Column Time: " + str(time_updatec_profile))
#times = timeit.default_timer()
self.map_class_profile.update_cached_tindex2(return_mapp_keys, tableA, tableAname)
#logging.info("update dependencies")
#timee = timeit.default_timer()
#time_updatei_profile = time_updatei_profile + timee - times
#logging.info("Update Profile Index Time: " + str(time_updatei_profile))
#self.psql_eng = None
#logging.info("Create Profile Time: " + str(time_create_profile))
#logging.info("Match Profile Time: " + str(time_match_profile))
self.map_class_profile.table_stored_str[tableA.to_string()] = tableAname
return mapped_pairs
def mapping_to_columns_search(self, tableA, thres):
tableA = tableA.dropna().head(500)
mapped_pairs = self.map_class_profile.compute_candidate_pairs_index(tableA, thres)
return_mapp_keys = {}
rev_return_mapp_keys = {}
for i, j, k in mapped_pairs:
if i not in return_mapp_keys and j not in rev_return_mapp_keys:
return_mapp_keys[i] = j
rev_return_mapp_keys[j] = i
return_mapps = {}
return_mapps_cached = {}
for mkey in return_mapp_keys.keys():
if return_mapp_keys[mkey] in self.map_class_profile.bigtable_table:
return_mapps[mkey] = self.map_class_profile.bigtable_table[return_mapp_keys[mkey]]
for mkey in return_mapp_keys.keys():
return_mapps_cached[mkey] = return_mapp_keys[mkey]
return return_mapps, return_mapps_cached
def add_mapping_by_workflow(self, query_name, partial_mapping, real_tables, table_group):
#get groups
query = real_tables[query_name]
schema_map_partial = {}
connected_tables = []
connected_tables_mapped_col = {}
for pkey in partial_mapping.keys():
for pt in partial_mapping[pkey]:
connected_tables.append(pt[0][6:])
if pt[0][6:] not in connected_tables_mapped_col:
connected_tables_mapped_col[pt[0][6:]] = pt[1]
if pt[0][6:] not in schema_map_partial:
schema_map_partial[pt[0][6:]] = {}
schema_map_partial[pt[0][6:]][pkey] = pt[1]
print(table_group)
print(query_name)
connected_groups = []
connected_groups.append(table_group[query_name[6:]])
for ct in connected_tables:
connected_groups.append(table_group[ct])
connected_groups = list(set(connected_groups))
#do search
for i in real_tables.keys():
if i == query_name:
continue
tname = i[6:]
tableB = real_tables[i]
gid = table_group[tname]
if gid not in connected_groups:
continue
if tname not in schema_map_partial:
better_mapping = self.map_class_profile.schema_mapping_float_prov(query, tableB, {})
else:
better_mapping = self.map_class_profile.schema_mapping_float_prov(query, tableB, schema_map_partial[tname])
schema_map_partial[tname] = better_mapping
return schema_map_partial
def mapping_to_columns_keys_search(self, tableA, thres, num = 1):
mapping, mapping_cached = self.mapping_to_columns_search(tableA, thres)
mapping_key = {}
for keyA in mapping.keys():
mapping_key[keyA] = self.map_class_profile.bigtable_keyscore[mapping_cached[keyA]]
mapping_key = sorted(mapping_key.items(), key = lambda d:d[1], reverse=True)
return_key = []
if len(mapping_key) < num:
for k, s in mapping_key:
return_key.append(k)
return mapping, return_key, mapping_cached
else:
for k, s in mapping_key:
return_key.append(k)
if len(return_key) == num:
break
return mapping, return_key, mapping_cached
#def mapping_to_columns_keys_search_incremental(self, tableA, thres, pre_mapping = {}, num = 1):
def detect_key_constraints(self, tableA_name, tableB_name, mapping, mapping2c, candidate_keys, tableA, tableB,
key_thres=0.01, dep_thres=0.01):
key_rank = []
key_depen = {}
key_indexA = {}
key_indexB = {}
cached_dep_key = []
for ck in candidate_keys:
if ck not in mapping:
continue
if ck not in mapping2c:
continue
if mapping2c[ck] in self.cached_table_depend:
cached_dep_key.append(ck)
if len(cached_dep_key) > 0:
cached_dep_key_rank = []
for ck in cached_dep_key:
return_key = ck
return_dep = []
possible_dep = self.cached_table_depend[mapping2c[ck]]
for ckey in mapping2c.keys():
if ckey == ck:
continue
if ckey not in mapping:
continue
if mapping2c[ckey] in possible_dep:
return_dep.append(ckey)
cached_dep_key_rank.append((return_key, return_dep, len(return_dep)))
cached_dep_key_rank = sorted(cached_dep_key_rank, key=lambda d: d[2], reverse=True)
cdk = mapping2c[cached_dep_key_rank[0][0]]
if tableA_name in self.cached_group_tables[cdk].columns and tableB_name in self.cached_group_tables[
cdk].columns:
join_index = self.cached_group_tables[cdk][[tableA_name, tableB_name]].dropna()
key_indexC = []
key_indexD = []
identical_cnt = 0
for iter, row in join_index.iterrows():
indexA_list = json.loads(row[tableA_name])
indexB_list = json.loads(row[tableB_name])
if len(indexA_list) != 1 or len(indexB_list) != 1:
continue
else:
key_indexC.append(indexA_list[0])
key_indexD.append(indexB_list[0])
identical_cnt = identical_cnt + 1
if float(identical_cnt) / float(join_index.shape[0]) > key_thres:
return cached_dep_key_rank[0][0], cached_dep_key_rank[0][1], key_indexC, key_indexD
for ck in candidate_keys:
if ck not in mapping:
continue
if tableA[ck].dropna().shape[0] == 0:
continue
if tableB[mapping[ck]].dropna().shape[0] == 0:
continue
pruning_scoreA = float(len(set(tableA[ck].dropna().values))) / float(tableA[ck].dropna().shape[0])
pruning_scoreB = float(len(set(tableB[mapping[ck]].dropna().values))) / float(
tableB[mapping[ck]].dropna().shape[0])
if tableA[ck].dtype == np.float64:
continue
if pruning_scoreA < 0.01:
continue
if pruning_scoreB < 0.01:
continue
# logging.info(pruning_scoreA)
# logging.info(pruning_scoreB)
if ck in mapping2c:
# logging.info("here keys")
key_depen[ck] = []
keyA = ck
keyB = mapping[keyA]
keyC = mapping2c[keyA]
key_indexA[keyA] = []
key_indexB[keyB] = []
# joinA = None
# for it in range(self.cached_group_cnt[keyC]):
# if tableA_name in self.cached_group_tables[keyC][it].columns:
# joinA = self.cached_group_tables[keyC][it][[tableA_name]]
# break
# joinB = None
# for it in range(self.cached_group_cnt[keyC]):
# if tableB_name in self.cached_group_tables[keyC][it].columns:
# joinB = self.cached_group_tables[keyC][it][[tableB_name]]
# break
nameC = keyC # + "_index0"
tableC = self.cached_group_tables[nameC]
if tableA_name not in tableC.columns or tableB_name not in tableC.columns:
join_index = pd.DataFrame(columns=[tableA_name, tableB_name])
else:
join_index = self.cached_group_tables[nameC][[tableA_name, tableB_name]].dropna()
# join_index = pd.concat([joinA, joinB], axis=1).dropna()
index_pair = []
identical_num = 0
for iter, row in join_index.iterrows():
indexA_list = json.loads(row[tableA_name])
indexB_list = json.loads(row[tableB_name])
if len(indexA_list) != 1 or len(indexB_list) != 1:
continue
else:
index_pair.append((indexA_list[0], indexB_list[0]))
key_indexA[keyA].append(indexA_list[0])
key_indexB[keyB].append(indexB_list[0])
# logging.info("here3")
# logging.info(float(len(index_pair))/float(join_index.shape[0]))
if join_index.shape[0] == 0:
continue
if float(len(index_pair)) / float(join_index.shape[0]) < key_thres:
continue
for cck in mapping.keys():
if cck == ck:
continue
cckb = mapping[cck]
identical_cnt = 0
for a, b in index_pair:
if tableA[cck].loc[a] == tableB[cckb].loc[b]:
identical_num = identical_num + 1
identical_cnt = identical_cnt + 1
identical_flt = float(identical_cnt) / float(len(index_pair))
logging.info(identical_flt)
if identical_flt > dep_thres:
key_depen[ck].append(cck)
key_rank.append((ck, identical_num))
else:
key_depen[ck] = []
keyA = ck
keyB = mapping[keyA]
key_indexA[keyA] = []
key_indexB[keyB] = []
join_index = {}
value_all = np.union1d(tableA[keyA].dropna().values, tableB[keyB].dropna().values)
for v in value_all:
join_index[v] = {}
join_index[v]['A'] = []
join_index[v]['B'] = []
for iter, row in tableA.iterrows():
if row[keyA] in join_index:
join_index[row[keyA]]['A'].append(iter)
for iter, row in tableB.iterrows():
if row[keyB] in join_index:
join_index[row[keyB]]['B'].append(iter)
index_pair = []
identical_num = 0
for v in join_index.keys():
indexA_list = join_index[v]['A']
indexB_list = join_index[v]['B']
if len(indexA_list) != 1 or len(indexB_list) != 1:
continue
else:
index_pair.append((indexA_list[0], indexB_list[0]))
key_indexA[keyA].append(indexA_list[0])
key_indexB[keyB].append(indexB_list[0])
# logging.info("here2")
# logging.info(float(len(index_pair)) / float(len(join_index.keys())))
if float(len(index_pair)) / float(len(join_index.keys())) < key_thres:
continue
for cck in mapping.keys():
if cck == ck:
continue
cckb = mapping[cck]
identical_cnt = 0
for a, b in index_pair:
if tableA[cck].loc[a] == tableB[cckb].loc[b]:
identical_num = identical_num + 1
identical_cnt = identical_cnt + 1
identical_flt = float(identical_cnt) / float(len(index_pair))
# logging.info(identical_flt)
if identical_flt > dep_thres:
key_depen[ck].append(cck)
key_rank.append((ck, identical_num))
if len(key_rank) == 0:
return [], {}, [], []
key_rank = sorted(key_rank, key=lambda d: d[1], reverse=True)
logging.info(key_rank)
key_return = key_rank[0][0]
col_depend = key_depen[key_return]
if key_return in mapping2c:
key_return_cached = mapping2c[key_return]
col_depend_cached = []
for col in col_depend:
if col in mapping2c:
col_depend_cached.append(mapping2c[col])
if key_return_cached not in self.cached_table_depend:
self.cached_table_depend[key_return_cached] = col_depend_cached
else:
a_cached = self.cached_table_depend[key_return_cached]
for col_insert in col_depend_cached:
if col_insert not in a_cached:
a_cached.append(col_insert)
self.cached_table_depend[key_return_cached] = a_cached
store_dep = self.dumps_json(self.cached_table_depend)
col = []
col_value = []
for t_key in store_dep.keys():
col.append(t_key)
col_value.append(store_dep[t_key])
dict_df = {'col': col, 'value': col_value}
df_store = pd.DataFrame.from_dict(dict_df)
df_store.to_sql("key_dep", self.psql_eng, schema="table_profile", if_exists='replace', index=False)
return [key_return], col_depend, key_indexA[key_return], key_indexB[mapping[key_return]]
def detect_kfjey_constraints(self, tableA_name, tableB_name, mapping, mapping2C, candidate_keys, tableA, tableB,
key_thres=0.1, dep_thres=0.05):
key_rank = []
key_depen = {}
cached_dep_key = []
for ck in candidate_keys:
if ck not in mapping:
continue
if ck not in mapping2C:
continue
if mapping2C[ck] in self.cached_table_fdepen:
cached_dep_key.append(ck)
if len(cached_dep_key) > 0:
cached_dep_key_rank = []
for ck in cached_dep_key:
return_key = ck
return_dep = []
possible_dep = self.cached_table_fdepen[mapping2C[ck]]
for ckey in mapping2C.keys():
if ckey == ck:
continue
if mapping2C[ckey] in possible_dep:
return_dep.append(ckey)
cached_dep_key_rank.append((return_key, return_dep, len(return_dep)))
cached_dep_key_rank = sorted(cached_dep_key_rank, key=lambda d: d[2], reverse=True)
return cached_dep_key_rank[0][0], cached_dep_key_rank[0][1]
# logging.info(candidate_keys)
for ck in candidate_keys:
if ck not in mapping:
continue
if tableA[ck].dtype == np.float64:
continue
key_depen[ck] = []
keyA = ck
keyB = mapping[keyA]
try:
prunningA = float(len(set(tableA[keyA].dropna().values))) / float(len(tableA[keyA].dropna().values))
except:
prunningA = 0
try:
prunningB = float(len(set(tableB[keyB].dropna().values))) / float(len(tableB[keyB].dropna().values))
except:
prunningB = 0
if prunningA < 0.5 and prunningB < 0.5:
continue
if keyA in mapping2C:
keyC = mapping2C[keyA]
try:
join_index = self.cached_group_tables[keyC][[tableA_name, tableB_name]].dropna()
index_pairA = []
index_pairB = []
for iter, row in join_index.iterrows():
indexA_list = json.loads(row[tableA_name])
indexB_list = json.loads(row[tableB_name])
if len(indexA_list) == 1:
index_pairA.append((indexA_list, indexB_list))
if len(indexB_list) == 1:
index_pairB.append((indexA_list, indexB_list))
except:
index_pairA = []
index_pairB = []
else:
join_index = {}
value_all = np.union1d(tableA[keyA].dropna().values, tableB[keyB].dropna().values)
for v in value_all:
join_index[v] = {}
join_index[v]['A'] = []
join_index[v]['B'] = []
for iter, row in tableA.iterrows():
if row[keyA] in join_index:
join_index[row[keyA]]['A'].append(iter)
for iter, row in tableB.iterrows():
if row[keyB] in join_index:
join_index[row[keyB]]['B'].append(iter)
index_pairA = []
index_pairB = []
for v in join_index.keys():
indexA_list = join_index[v]['A']
indexB_list = join_index[v]['B']
if len(indexA_list) == 1:
index_pairA.append((indexA_list, indexB_list))
if len(indexB_list) == 1:
index_pairB.append((indexA_list, indexB_list))
index_pair = []
if len(index_pairA) > len(index_pairB):
index_pair = index_pairA
if len(index_pair) == 0:
continue
identical_num = 0
for cck in mapping.keys():
if cck == keyA:
continue
cckb = mapping[cck]
identical_cnt = 0
for a, b in index_pair:
try:
if tableA[cck].loc[a[0]] in tableB[cckb].loc[b].tolist():
identical_num = identical_num + 1
identical_cnt = identical_cnt + 1
except:
logging.info(a)
logging.info(b)
if len(index_pair) == 0:
identical_flt = 0
else:
identical_flt = float(identical_cnt) / float(len(index_pair))
# print(identical_flt)
if identical_flt > dep_thres:
if ck not in key_depen:
key_depen[ck] = []
key_depen[ck].append(cck)
key_rank.append((ck, identical_num))
else:
index_pair = index_pairB
if len(index_pair) == 0:
continue
identical_num = 0
for cck in mapping.keys():
if cck == keyA:
continue
cckb = mapping[cck]
identical_cnt = 0
for a, b in index_pair:
if tableB[cckb].loc[b[0]] in tableA[cck].loc[a].tolist():
identical_num = identical_num + 1
identical_cnt = identical_cnt + 1
if len(index_pair) == 0:
identical_flt = 0
else:
identical_flt = float(identical_cnt) / float(len(index_pair))
# print(identical_flt)
if identical_flt > dep_thres:
if ck not in key_depen:
key_depen[ck] = []
key_depen[ck].append(cck)
key_rank.append((ck, identical_num))
key_rank = sorted(key_rank, key=lambda d: d[1], reverse=True)
# print(key_rank)
if len(key_rank) == 0:
return [], {}
key_return = key_rank[0][0]
col_depend = key_depen[key_return]
if key_return in mapping2C:
key_return_cached = mapping2C[key_return]
col_depend_cached = []
for col in col_depend:
col_depend_cached.append(mapping2C[col])
if key_return_cached not in self.cached_table_fdepen:
self.cached_table_fdepen[key_return_cached] = col_depend_cached
else:
a_cached = self.cached_table_fdepen[key_return_cached]
for col_insert in col_depend_cached:
if col_insert not in a_cached:
a_cached.append(col_insert)
self.cached_table_fdepen[key_return_cached] = a_cached
store_dep = self.dumps_json(self.cached_table_fdepen)
col = []
col_value = []
for t_key in store_dep.keys():
col.append(t_key)
col_value.append(store_dep[t_key])
dict_df = {'col': col, 'value': col_value}
df_store = | pd.DataFrame.from_dict(dict_df) | pandas.DataFrame.from_dict |
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas.util._test_decorators as td
from pandas import DataFrame, Series, Timedelta, concat, date_range
import pandas._testing as tm
from pandas.api.indexers import BaseIndexer
@td.skip_if_no_scipy
def test_constructor(frame_or_series):
# GH 12669
c = frame_or_series(range(5)).rolling
# valid
c(win_type="boxcar", window=2, min_periods=1)
c(win_type="boxcar", window=2, min_periods=1, center=True)
c(win_type="boxcar", window=2, min_periods=1, center=False)
@pytest.mark.parametrize("w", [2.0, "foo", np.array([2])])
@td.skip_if_no_scipy
def test_invalid_constructor(frame_or_series, w):
# not valid
c = frame_or_series(range(5)).rolling
with pytest.raises(ValueError, match="min_periods must be an integer"):
c(win_type="boxcar", window=2, min_periods=w)
with pytest.raises(ValueError, match="center must be a boolean"):
c(win_type="boxcar", window=2, min_periods=1, center=w)
@pytest.mark.parametrize("wt", ["foobar", 1])
@td.skip_if_no_scipy
def test_invalid_constructor_wintype(frame_or_series, wt):
c = frame_or_series(range(5)).rolling
with pytest.raises(ValueError, match="Invalid win_type"):
c(win_type=wt, window=2)
@td.skip_if_no_scipy
def test_constructor_with_win_type(frame_or_series, win_types):
# GH 12669
c = frame_or_series(range(5)).rolling
c(win_type=win_types, window=2)
@pytest.mark.parametrize("method", ["sum", "mean"])
def test_numpy_compat(method):
# see gh-12811
w = Series([2, 4, 6]).rolling(window=2)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(w, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(w, method)(dtype=np.float64)
@td.skip_if_no_scipy
@pytest.mark.parametrize("arg", ["median", "kurt", "skew"])
def test_agg_function_support(arg):
df = DataFrame({"A": np.arange(5)})
roll = df.rolling(2, win_type="triang")
msg = f"'{arg}' is not a valid function for 'Window' object"
with pytest.raises(AttributeError, match=msg):
roll.agg(arg)
with pytest.raises(AttributeError, match=msg):
roll.agg([arg])
with pytest.raises(AttributeError, match=msg):
roll.agg({"A": arg})
@td.skip_if_no_scipy
def test_invalid_scipy_arg():
# This error is raised by scipy
msg = r"boxcar\(\) got an unexpected"
with pytest.raises(TypeError, match=msg):
Series(range(3)).rolling(1, win_type="boxcar").mean(foo="bar")
@td.skip_if_no_scipy
def test_constructor_with_win_type_invalid(frame_or_series):
# GH 13383
c = frame_or_series(range(5)).rolling
msg = "window must be an integer 0 or greater"
with pytest.raises(ValueError, match=msg):
c(-1, win_type="boxcar")
@td.skip_if_no_scipy
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_window_with_args():
# make sure that we are aggregating window functions correctly with arg
r = Series(np.random.randn(100)).rolling(
window=10, min_periods=1, win_type="gaussian"
)
expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1)
expected.columns = ["<lambda>", "<lambda>"]
result = r.aggregate([lambda x: x.mean(std=10), lambda x: x.mean(std=0.01)])
tm.assert_frame_equal(result, expected)
def a(x):
return x.mean(std=10)
def b(x):
return x.mean(std=0.01)
expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1)
expected.columns = ["a", "b"]
result = r.aggregate([a, b])
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
def test_win_type_with_method_invalid():
with pytest.raises(
NotImplementedError, match="'single' is the only supported method type."
):
Series(range(1)).rolling(1, win_type="triang", method="table")
@td.skip_if_no_scipy
@pytest.mark.parametrize("arg", [2000000000, "2s", | Timedelta("2s") | pandas.Timedelta |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = f"index > '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = 10_000
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = f"index <= '{beg_dt}' & index >= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
def test_frame_select(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import torch
import random
import numpy as np
import sys
import torch.nn as nn
import platalea.basic as basic
import platalea.encoders as encoders
import platalea.attention
import platalea.config
import os.path
import logging
import json
from plotnine import *
import pandas as pd
import ursa.similarity as S
import ursa.util as U
import pickle
_device = platalea.config.device(0)
## Models
### Local
def local_diagnostic(config):
directory = config['directory']
logging.getLogger().setLevel('INFO')
output = []
data_mfcc = pickle.load(open('{}/local_input.pkl'.format(directory), 'rb'))
#for mode in ['trained', 'random']:
for mode in ['random', 'trained']:
logging.info("Fitting local classifier for mfcc")
result = local_classifier(data_mfcc['features'], data_mfcc['labels'], epochs=config['epochs'], device=_device, hidden=config['hidden'])
logging.info("Result for {}, {} = {}".format(mode, 'mfcc', result['acc']))
result['model'] = mode
result['layer'] = 'mfcc'
output.append(result)
for layer in config['layers']:
data = pickle.load(open('{}/local_{}_{}.pkl'.format(directory, mode, layer), 'rb'))
logging.info("Fitting local classifier for {}, {}".format(mode, layer))
result = local_classifier(data[layer]['features'], data[layer]['labels'], epochs=config['epochs'], device=_device, hidden=config['hidden'])
logging.info("Result for {}, {} = {}".format(mode, layer, result['acc']))
result['model'] = mode
result['layer'] = layer
output.append(result)
json.dump(output, open("local_diagnostic.json", "w"), indent=True)
def local_rsa(config):
logging.getLogger().setLevel('INFO')
if config['matrix']:
raise NotImplementedError
#result = framewise_RSA_matrix(directory, layers=config['layers'], size=config['size'])
else:
del config['matrix']
result = framewise_RSA(**config)
json.dump(result, open('local_rsa.json', 'w'), indent=2)
### Global
def global_rsa(config):
logging.getLogger().setLevel('INFO')
result = weighted_average_RSA(**config)
json.dump(result, open('global_rsa.json', 'w'), indent=2)
def global_rsa_partial(config):
logging.getLogger().setLevel('INFO')
result = weighted_average_RSA_partial(**config)
json.dump(result, open('global_rsa_partial.json', 'w'), indent=2)
def global_diagnostic(config):
logging.getLogger().setLevel('INFO')
result = weighted_average_diagnostic(**config)
json.dump(result, open('global_diagnostic.json', 'w'), indent=2)
def plots():
local_diagnostic_plot()
global_diagnostic_plot()
local_rsa_plot()
global_rsa_plot()
## Plotting
def local_diagnostic_plot():
data = pd.read_json("local_diagnostic.json", orient='records')
order = list(data['layer'].unique())
data['layer_id'] = [ order.index(x) for x in data['layer'] ]
data['rer'] = rer(data['acc'], data['baseline'])
g = ggplot(data, aes(x='layer_id', y='rer', color='model')) + geom_point(size=2) + geom_line(size=2) + ylim(0, 1) + ggtitle("Local diagnostic")
ggsave(g, 'local_diagnostic.png')
def global_diagnostic_plot():
data = | pd.read_json("global_diagnostic.json", orient='records') | pandas.read_json |
"""dynaPreprocessing Class"""
#!/usr/bin/env python
import itertools
from optimalflow.funcPP import PPtools
import pandas as pd
import joblib
import datetime
import numpy as np
from time import time
from optimalflow.utilis_func import update_progress,delete_old_log_files
import warnings
import os
path = os.getcwd()
def warn(*args, **kwargs):
pass
warnings.warn = warn
import logging
LOG_TS = datetime.datetime.now().strftime("%Y.%m.%d.%H.%M.%S")
logs_folder = os.path.join(os.getcwd(),'logs')
if not os.path.exists(logs_folder):
os.makedirs(logs_folder)
log_name = os.path.join(logs_folder, f'{os.path.basename(__file__).split(".")[0]}_log_{LOG_TS}.log')
LOG_LEVEL = logging.DEBUG
DELETE_FLAG = True
TS = time()
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s','%d/%m %H:%M:%S')
fh = logging.FileHandler(filename = log_name)
fh.setLevel(LOG_LEVEL)
fh.setFormatter(formatter)
logger.addHandler(fh)
Test_case = f'Optimal Flow - autoCV - Auto PreProcessing :: {LOG_TS}'
Test_comment = '-' * len(Test_case) * 3
Start_log = '#' * len(Test_case) * 3
logger.info(Start_log)
logger.info(Test_case)
logger.info(Start_log)
delete_old_log_files(directory = logs_folder ,delete_flag = DELETE_FLAG, logger = logger, extension_list = ['.log'],filename_list = ['autoPP_log'],log_ts = LOG_TS)
logger.info(Test_comment)
class dynaPreprocessing:
"""Automated feature preprocessing including imputation, winsorization, encoding, and scaling in ensemble algorithms, to generate permutation input datasets for further pipeline components.
Parameters
----------
custom_parameters: dictionary, default = None
Custom parameters settings input.
NOTE: default_parameters
= {
"scaler" : ["standard", "minmax", "maxabs", "robust"],
"encode_band" : [10],
"low_encode" : ["onehot","label"],
"high_encode" : ["frequency", "mean"],
"winsorizer" : [(0.01,0.01),(0.05,0.05)],
"sparsity" : [0.50],
"cols" : [100]
}
label_col: str, default = None
Name of label column.
model_type: str, default = "reg"
"reg" for regression problem or "cls" for classification problem - Default: "reg".
export_output_files: bool, default = False
Export qualified permutated datasets to ./df_folder.
Example
-------
.. [Example] https://Optimal-Flow.readthedocs.io/en/latest/demos.html#feature-preprocessing-for-a-regression-problem-using-autopp
References
----------
None
"""
def __init__(self, custom_parameters = None, label_col = None, model_type = "reg",export_output_files = False):
default_parameters = {
"scaler" : ["standard", "minmax", "maxabs", "robust"],
"encode_band" : [10],
"low_encode" : ["onehot","label"],
"high_encode" : ["frequency", "mean"],
"winsorizer" : [(0.01,0.01),(0.05,0.05)],
"sparsity" : [0.50],
"cols" : [100]
}
if(custom_parameters is None):
self.parameters = default_parameters
else:
self.parameters = custom_parameters
self.model_type = model_type
self.export_output_files = export_output_files
self.label_col = label_col
def fit(self, input_data = None):
"""Fits and transforms a pandas dataframe to non-missing values, outlier excluded, categories encoded and scaled datasets by all algorithms permutation.
Parameters
----------
input_data : pandas dataframe, shape = [n_samples, n_features]
NOTE:
The input_data should be the datasets after basic data cleaning & well feature deduction, the more features involve will result in more columns permutation outputs.
Returns
-------
DICT_PREP_DF : dictionary
Each key is the # of output preprocessed dataset, each value stores the dataset
DICT_PREP_INFO : dictionary
Dictionary for reference. Each key is the # of the output preprocessed dataset, each value stores the column names of the dataset
NOTE - Log records will generate and save to ./logs folder automatedly.
"""
if (self.export_output_files):
df_folder = os.path.join(os.getcwd(),'dfs')
if not os.path.exists(df_folder):
os.makedirs(df_folder)
for l in os.listdir(df_folder):
os.remove(os.path.join(df_folder,l))
DICT_DFS={}
for i in range(len(self.parameters.get("winsorizer"))):
pp = PPtools(label_col = self.label_col, data = input_data, model_type = self.model_type)
pp.split_category_cols()
initial_num_cols = pp.num_df.columns
pp.impute_tool()
pp.winsorize_tool(lower_ban = self.parameters.get("winsorizer")[i][0],upper_ban = self.parameters.get("winsorizer")[i][1])
winsorized_df_cols_list = list(pp.num_df.columns)
encoded_cols_list = {}
for col in pp.cat_df.columns:
encoded_cols_list[col] = []
if(pp.cat_df[col].nunique() < self.parameters.get("encode_band")[0]):
for en_type in self.parameters.get("low_encode"):
encoded_col = pp.encode_tool(en_type = en_type ,category_col = col)
encoded_cols_list[col].append(list(encoded_col.columns))
pp.num_df = | pd.concat([pp.num_df,encoded_col],axis = 1) | pandas.concat |
# coding: utf-8
"""基于HDF文件的数据库"""
import pandas as pd
import numpy as np
import os
import warnings
from multiprocessing import Lock
from ..utils.datetime_func import Datetime2DateStr, DateStr2Datetime
from ..utils.tool_funcs import ensure_dir_exists
from ..utils.disk_persist_provider import DiskPersistProvider
from .helpers import handle_ids, FIFODict
from pathlib import Path
from FactorLib.utils.tool_funcs import is_non_string_iterable
pd.options.compute.use_numexpr = True
lock = Lock()
warnings.simplefilter('ignore', category=FutureWarning)
def append_along_index(df1, df2):
df1, df2 = df1.align(df2, axis='columns')
new = pd.DataFrame(np.vstack((df1.values, df2.values)),
columns=df1.columns,
index=df1.index.append(df2.index))
new.sort_index(inplace=True)
return new
def auto_increase_keys(_dict, keys):
if _dict:
max_v = max(_dict.values())
else:
max_v = 0
for key in keys:
if key not in _dict:
max_v += 1
_dict[key] = max_v
return _dict
class H5DB(object):
def __init__(self, data_path, max_cached_files=30):
self.data_path = str(data_path)
self.feather_data_path = os.path.abspath(self.data_path+'/../feather')
self.csv_data_path = os.path.abspath(self.data_path+'/../csv')
self.data_dict = None
self.cached_data = FIFODict(max_cached_files)
self.max_cached_files = max_cached_files
# self._update_info()
def _update_info(self):
factor_list = []
for root, subdirs, files in os.walk(self.data_path):
relpath = "/%s/"%os.path.relpath(root, self.data_path).replace("\\", "/")
for file in files:
if file.endswith(".h5"):
factor_list.append([relpath, file[:-3]])
self.data_dict = pd.DataFrame(
factor_list, columns=['path', 'name'])
def _read_h5file(self, file_path, key):
if file_path in self.cached_data:
return self.cached_data[file_path]
lock.acquire()
try:
data = pd.read_hdf(file_path, key)
except KeyError:
data = pd.read_hdf(file_path, 'data')
finally:
lock.release()
# update at 2020.02.15: surpport wide dataframe
columns_mapping = self._read_columns_mapping(file_path)
if not columns_mapping.empty:
data.rename(
columns=pd.Series(columns_mapping.index, index=columns_mapping.to_numpy()),
inplace=True
)
if self.max_cached_files > 0:
self.cached_data[file_path] = data
return data
def _read_columns_mapping(self, file_path):
try:
data = pd.read_hdf(file_path, 'column_name_mapping')
except KeyError:
data = pd.Series()
return data
def _normalize_columns(self, input, column_mapping):
return column_mapping[column_mapping.index.isin(input)].tolist()
def _save_h5file(self, data, file_path, key,
complib='blosc', complevel=9,
mode='w', **kwargs):
try:
lock.acquire()
# update at 2020.02.15: surpport wide dataframe
if data.shape[1] > 1000:
columns_mapping = {x:y for x, y in zip(data.columns, range(data.shape[1]))}
data2 = data.rename(columns=columns_mapping)
else:
data2 = data
columns_mapping = {}
with pd.HDFStore(file_path, mode=mode, complevel=complevel,
complib=complib) as f:
f.put(key, data2, **kwargs)
f.put('column_name_mapping', pd.Series(columns_mapping))
if file_path in self.cached_data:
self.cached_data.update({file_path: data})
lock.release()
except Exception as e:
lock.release()
raise e
def _read_pklfile(self, file_path):
if file_path in self.cached_data:
return self.cached_data[file_path]
lock.acquire()
try:
d = pd.read_pickle(file_path)
if self.max_cached_files > 0:
self.cached_data[file_path] = d
lock.release()
except Exception as e:
lock.release()
raise e
return d
def _save_pklfile(self, data, file_dir, name, protocol=-1):
dumper = DiskPersistProvider(
os.path.join(self.data_path, file_dir.strip('/')))
file_path = os.path.join(
self.data_path, file_dir.strip('/'), name+'.pkl'
)
lock.acquire()
try:
dumper.dump(data, name, protocol)
if file_path in self.cached_data:
self.cached_data[file_path] = data
except Exception as e:
lock.release()
raise e
lock.release()
def _delete_cached_factor(self, file_path):
if file_path in self.cached_data:
del self.cached_data[file_path]
def set_data_path(self, path):
self.data_path = path
# self._update_info()
# ---------------------------因子管理---------------------------------------
# 查看因子是否存在
def check_factor_exists(self, factor_name, factor_dir='/'):
file_path = self.abs_factor_path(factor_dir, factor_name)
return os.path.isfile(file_path)
# 删除因子
def delete_factor(self, factor_name, factor_dir='/'):
factor_path = self.abs_factor_path(factor_dir, factor_name)
try:
os.remove(factor_path)
self._delete_cached_factor(factor_path)
except Exception as e:
print(e)
pass
self._update_info()
# 列出因子名称
def list_factors(self, factor_dir):
dir_path = self.data_path + factor_dir
factors = [x[:-3] for x in os.listdir(dir_path) if x.endswith('.h5')]
return factors
# 重命名因子
def rename_factor(self, old_name, new_name, factor_dir):
factor_path = self.abs_factor_path(factor_dir, old_name)
temp_factor_path = self.abs_factor_path(factor_dir, new_name)
factor_data = self._read_h5file(factor_path, old_name).rename(columns={old_name: new_name})
self._save_h5file(factor_data, temp_factor_path, new_name)
self.delete_factor(old_name, factor_dir)
# 新建因子文件夹
def create_factor_dir(self, factor_dir):
if not os.path.isdir(self.data_path+factor_dir):
os.makedirs(self.data_path+factor_dir)
# 因子的时间区间
def get_date_range(self, factor_name, factor_path):
try:
max_date = self.read_h5file_attr(factor_name, factor_path, 'max_date')
min_date = self.read_h5file_attr(factor_name, factor_path, 'min_date')
except Exception:
try:
panel = self._read_h5file(
self.abs_factor_path(factor_path, factor_name), key='data')
except KeyError:
panel = self._read_h5file(
self.abs_factor_path(factor_path, factor_name), key=factor_name)
if isinstance(panel, pd.Panel):
min_date = Datetime2DateStr(panel.major_axis.min())
max_date = Datetime2DateStr(panel.major_axis.max())
else:
min_date = panel.index.get_level_values('date').min()
max_date = panel.index.get_level_values('date').max()
return min_date, max_date
# 读取多列因子的属性
def read_h5file_attr(self, factor_name, factor_path):
attr_file_path = self.abs_factor_attr_path(factor_path, factor_name)
print(attr_file_path)
if os.path.isfile(attr_file_path):
return self._read_pklfile(attr_file_path)
else:
raise FileNotFoundError('找不到因子属性文件!')
def clear_cache(self):
self.cached_data = FIFODict(self.max_cached_files)
# --------------------------数据管理-------------------------------------------
@handle_ids
def load_factor(self, factor_name, factor_dir=None, dates=None, ids=None, idx=None,
date_level=0):
"""
加载一个因子
因子格式
-------
因子的存储格式是DataFrame(index=[date,IDs], columns=factor)
Parameters:
-----------
factor_name: str
因子名称
factor_dir: str
因子路径
dates: list
日期
ids: list
代码
idx: DataFrame or Series
索引
date_level: int
日期索引在多层次索引中的位置
"""
if idx is not None:
dates = idx.index.get_level_values('date').unique()
return (self
.load_factor(factor_name, factor_dir=factor_dir, dates=dates)
.reindex(idx.index, copy=False)
)
factor_path = self.abs_factor_path(factor_dir, factor_name)
data = self._read_h5file(factor_path, factor_name)
query_str = ""
if ids is not None:
if isinstance(ids, list):
query_str += "IDs in @ids"
else:
query_str += "IDs == @ids"
if len(query_str) > 0:
query_str += " and "
if dates is not None:
if is_non_string_iterable(dates):
query_str += "date in @dates"
else:
query_str += "date == @dates"
if query_str.endswith(" and "):
query_str = query_str.strip(" and ")
if query_str:
df = data.query(query_str)
return df
else:
return data
def load_factor2(self, factor_name, factor_dir=None, dates=None, ids=None, idx=None,
stack=False, check_A=False):
"""加载另外一种类型的因子
因子的格式是一个二维DataFrame,行索引是DatetimeIndex,列索引是股票代码。
check_A: 过滤掉非A股股票
"""
if idx is not None:
dates = idx.index.get_level_values('date').unique().tolist()
ids = idx.index.get_level_values('IDs').unique().tolist()
factor_path = self.abs_factor_path(factor_dir, factor_name)
columns_mapping = self._read_columns_mapping(factor_path)
if not columns_mapping.empty and ids is not None:
ids_normalized = self._normalize_columns(ids, columns_mapping)
if not ids_normalized:
return pd.DataFrame(columns=ids)
else:
ids_normalized = ids
where_term = None
if dates is not None:
dates = pd.to_datetime(dates)
where_term = "index in dates"
with pd.HDFStore(factor_path, mode='r') as f:
try:
data = pd.read_hdf(f, key='data', where=where_term, columns=ids_normalized)
except NotImplementedError as e:
data = pd.read_hdf(f, key='data').reindex(index=dates, columns=ids)
except KeyError as e:
f.close()
data = self.load_factor(factor_name, factor_dir, dates, ids)[factor_name].unstack()
if ids_normalized is not None and data.shape[1] != len(ids_normalized):
data = data.reindex(columns=ids_normalized)
if not columns_mapping.empty:
data.rename(columns=pd.Series(columns_mapping.index, index=columns_mapping.to_numpy()), inplace=True)
data.name = factor_name
if check_A:
data = data.filter(regex='^[6,0,3]', axis=1)
if stack:
data = data.stack().to_frame(factor_name)
data.index.names = ['date', 'IDs']
if idx is not None:
data = data.reindex(idx.index)
return data
def show_symbol_name(self, factor_data=None, factor_name=None,
factor_dir=None, dates=None, data_source=None):
"""返回带有股票简称的因子数据
Note:
factor_data应为AST或者SAST数据
"""
if data_source is None:
data_source = 'D:/data/factors'
import pandas as pd
names = pd.read_csv(os.path.join(data_source,'base','ashare_list_delist_date.csv'),
header=0,index_col=0,usecols=[0,1,2],
converters={'IDs': lambda x: str(x).zfill(6)},
encoding='GBK')
names.set_index('IDs', inplace=True)
if factor_data is None:
factor_data = self.load_factor2(factor_name, factor_dir, dates=dates)
factor_data = factor_data.stack().to_frame(factor_data.name)
if isinstance(factor_data.index, pd.MultiIndex):
factor_data = factor_data.reset_index().join(names, on='IDs', how='left')
elif isinstance(factor_data, pd.Series):
factor_data = factor_data.reset_index().join(names, on='IDs', how='left')
else:
factor_data = factor_data.stack().reset_index().join(names, on='IDs', how='left')
return factor_data
def read_h5file(self, file_name, path, group='data', check_A=None):
file_path = self.abs_factor_path(path, file_name)
data = self._read_h5file(file_path, key=group)
if check_A is not None:
data = data[data[check_A].str.match('^[0,3,6]')]
return data
def save_h5file(self, data, name, path, group='data', ignore_index=True,
drop_duplicated_by_index=True, drop_duplicated_by_keys=None,
if_exists='append', sort_by_fields=None, sort_index=False,
append_axis=0, **kwargs):
"""直接把DataFrame保存成h5文件
Parameters
----------
use_index: bool
当文件已存在,去重处理时按照索引去重。
ignore_index: bool:
if_exists='append'时, 是否重新建立索引。
if_exists: str
文件已存在时的处理方式:'append', 'replace' or 'update'.
'append': 直接添加,不做去重处理
'update': 添加后做去重处理,当'use_index'为TRUE时,按照
Index去重。
'replace': 重写文件
sort_by_fields: None or list
写入之前,DataFrame先按照字段排序
sort_index: bool, 默认为False
写入之前,是否按照索引排序
kwargs: 传入_save_h5file
"""
file_path = self.abs_factor_path(path, name)
if self.check_factor_exists(name, path):
df = self.read_h5file(name, path, group=group)
if if_exists == 'append':
data = pd.concat([df, data], axis=append_axis, ignore_index=ignore_index)
elif if_exists == 'replace':
pass
elif if_exists=='update':
data = pd.concat([df, data], axis=append_axis)
if drop_duplicated_by_index:
if append_axis == 0:
data = data[~data.index.duplicated(keep='last')]
else:
data = data.iloc[:, ~data.columns.duplicated(keep='last')]
else:
data.drop_duplicates(subset=drop_duplicated_by_keys,
keep='last',
inplace=True)
data.reset_index(drop=True, inplace=True)
else:
raise NotImplementedError
if ignore_index and not drop_duplicated_by_index:
data.reset_index(drop=True, inplace=True)
if sort_by_fields is not None:
data.sort_values(sort_by_fields, inplace=True)
if sort_index:
data.sort_index(inplace=True)
self._save_h5file(data, file_path, group, **kwargs)
def list_h5file_factors(self, file_name, file_pth):
""""提取h5File的所有列名"""
attr_file_path = self.data_path + file_pth + file_name + '_attr.pkl'
file_path = self.abs_factor_path(file_pth, file_name)
if os.path.isfile(attr_file_path):
attr = pd.read_pickle(attr_file_path)
return attr['factors']
attr_file_path = self.data_path + file_pth + file_name + '_mapping.pkl'
try:
attr = pd.read_pickle(attr_file_path)
return attr
except FileNotFoundError:
df = self._read_h5file(file_path, "data")
return df.columns.tolist()
def load_factors(self, factor_names_dict, dates=None, ids=None):
_l = []
for factor_path, factor_names in factor_names_dict.items():
for factor_name in factor_names:
df = self.load_factor(factor_name, factor_dir=factor_path, dates=dates, ids=ids)
_l.append(df)
return pd.concat(_l, axis=1)
def load_factors2(self, factor_names_dict, dates=None, ids=None, idx=None,
merge=True, stack=True):
assert not (merge is True and stack is False)
_l = []
for factor_path, factor_names in factor_names_dict.items():
for factor_name in factor_names:
df = self.load_factor2(factor_name, factor_dir=factor_path, dates=dates, ids=ids,
idx=idx, stack=stack)
_l.append(df)
if merge:
return pd.concat(_l, axis=1)
return tuple(_l)
def load_factors3(self, factor_names_dict, dates=None, ids=None,
idx=None):
if (dates is None or ids is None) and (idx is None):
raise ValueError("idx must not be None, or both date and ids must not be None!")
l = []
factor_name_list = []
for factor_path, factor_names in factor_names_dict.items():
for factor_name in factor_names:
factor_name_list.append(factor_name)
df = self.load_factor2(factor_name, factor_dir=factor_path, dates=dates, ids=ids,
idx=idx, stack=False)
l.append(df.to_numpy())
K = len(factor_name_list)
T, N = l[0].shape
threeD = np.concatenate(l, axis=0).reshape((K, T*N)).T
df = pd.DataFrame(threeD,
index=pd.MultiIndex.from_product([df.index,df.columns], names=['date', 'IDs']),
columns=factor_name_list)
return df
def load_macro_factor(self, factor_name, factor_dir, ids=None, ann_dates=None, dates=None,
date_level=0, time='15:00'):
data = self.load_factor(factor_name, factor_dir, ids=ids, date_level=date_level)
if 'ann_dt' in data.columns and ann_dates is not None:
data = data.reset_index().set_index('ann_dt').sort_index()
dates = pd.to_datetime(ann_dates, format='%Y%m%d') + pd.Timedelta(hours=int(time[:2]), minutes=int(time[-2:]))
df = data.groupby('name').apply(lambda x: x.reindex(dates, method='ffill'))[['data']]
else:
if dates is None:
dates = slice(None)
else:
dates = pd.to_datetime(dates, format='%Y%m%d')
if date_level == 0:
df = data.loc[pd.IndexSlice[dates, :], ['data']]
else:
df = data.loc[pd.IndexSlice[:, dates], ['data']]
return df
def save_factor(self, factor_data, factor_dir, if_exists='update'):
"""往数据库中写数据
数据格式:DataFrame(index=[date,IDs],columns=data)
Parameters:
-----------
factor_data: DataFrame
"""
if isinstance(factor_data, pd.Series):
factor_data = factor_data.to_frame()
if factor_data.index.nlevels == 1:
if isinstance(factor_data.index, pd.DatetimeIndex):
factor_data['IDs'] = '111111'
factor_data.set_index('IDs', append=True, inplace=True)
else:
factor_data['date'] = DateStr2Datetime('19000101')
factor_data.set_index('date', append=True, inplace=True)
factor_data.sort_index(inplace=True)
self.create_factor_dir(factor_dir)
for column in factor_data.columns:
factor_path = self.abs_factor_path(factor_dir, column)
if not self.check_factor_exists(column, factor_dir):
self._save_h5file(factor_data[[column]].dropna(),
factor_path, column)
elif if_exists == 'update':
old_panel = self._read_h5file(factor_path, column)
new_frame = old_panel.append(factor_data[[column]].dropna())
new_panel = new_frame[~new_frame.index.duplicated(keep='last')].sort_index()
self._save_h5file(new_panel,
factor_path,
column
)
elif if_exists == 'replace':
self._save_h5file(factor_data[[column]].dropna(),
factor_path,
column
)
else:
raise KeyError("please make sure if_exists is validate")
def save_factor2(self, factor_data, factor_dir, if_exists='append',
fillvalue=None, fillmethod=None):
"""往数据库中写数据
数据格式:DataFrame(index=date, columns=IDs)
"""
if isinstance(factor_data, pd.Series):
if isinstance(factor_data.index, pd.MultiIndex):
factor_name = factor_data.name
factor_data = factor_data.unstack()
else:
raise ValueError("Format of factor_data is invalid.")
elif isinstance(factor_data, pd.DataFrame):
if factor_data.shape[1] > 1 and factor_data.index.nlevels > 1:
raise ValueError("Column of factor_data must be one.")
elif factor_data.index.nlevels > 1:
factor_name = factor_data.columns[0]
factor_data = factor_data[factor_name].unstack()
else:
factor_name = factor_data.name
else:
raise NotImplementedError
self.create_factor_dir(factor_dir)
factor_path = self.abs_factor_path(factor_dir, factor_name)
if not self.check_factor_exists(factor_name, factor_dir):
self._save_h5file(factor_data, factor_path, 'data', complevel=9,
format='table')
elif if_exists == 'append':
raw = self._read_h5file(factor_path, key='data')
new = factor_data[~factor_data.index.isin(raw.index)]
d = append_along_index(raw, new)
if fillvalue:
d = d.sort_index().fillna(fillvalue)
if fillmethod:
d = d.sort_index().fillna(method=fillmethod)
self._save_h5file(d, factor_path, 'data', complevel=0,
format='table')
elif if_exists == 'update':
raw = self._read_h5file(factor_path, key='data')
raw, factor_data = raw.align(factor_data, axis='columns')
raw.update(factor_data)
d = append_along_index(raw, factor_data[~factor_data.index.isin(raw.index)])
if fillvalue:
d = d.sort_index().fillna(fillvalue)
if fillmethod:
d = d.sort_index().fillna(method=fillmethod)
self._save_h5file(d, factor_path, 'data', complevel=0,
format='table')
elif if_exists == 'replace':
self._save_h5file(factor_data, factor_path, 'data', complevel=0,
format='table')
else:
pass
def save_as_dummy(self, factor_data, factor_dir, indu_name=None, if_exists='update'):
"""往数据库中存入哑变量数据
factor_data: pd.Series or pd.DataFrame
当factor_data是Series时,首先调用pd.get_dummy()转成行业哑变量
"""
if isinstance(factor_data, pd.Series):
assert factor_data.name is not None or indu_name is not None
factor_data.dropna(inplace=True)
indu_name = indu_name if indu_name is not None else factor_data.name
factor_data = pd.get_dummies(factor_data)
else:
assert isinstance(factor_data, pd.DataFrame) and indu_name is not None
factor_data = factor_data.drop('T00018', axis=0, level='IDs').fillna(0)
factor_data = factor_data.loc[(factor_data != 0).any(axis=1)]
file_pth = self.abs_factor_path(factor_dir, indu_name)
if self.check_factor_exists(indu_name, factor_dir) and if_exists=='update':
mapping = self._read_pklfile(file_pth.replace('.h5', '_mapping.pkl'))
factor_data = factor_data.reindex(columns=mapping)
new_saver = pd.DataFrame(np.argmax(factor_data.values, axis=1), columns=[indu_name],
index=factor_data.index)
else:
new_saver = pd.DataFrame(np.argmax(factor_data.values, axis=1), columns=[indu_name],
index=factor_data.index)
mapping = factor_data.columns.values.tolist()
self.save_factor(new_saver, factor_dir, if_exists=if_exists)
self._save_pklfile(mapping, factor_dir, indu_name+'_mapping', protocol=2)
def save_as_dummy2(self, factor_data, factor_dir, indu_name=None, if_exists='update'):
"""往数据库中存入哑变量数据
factor_data: pd.Series or pd.DataFrame
当factor_data是Series时,首先调用pd.get_dummy()转成行业哑变量
"""
if isinstance(factor_data, pd.Series):
assert factor_data.name is not None or indu_name is not None
factor_data.dropna(inplace=True)
indu_name = indu_name if indu_name is not None else factor_data.name
factor_data = pd.get_dummies(factor_data)
else:
assert isinstance(factor_data, pd.DataFrame) and indu_name is not None
factor_data = factor_data.drop('T00018', axis=0, level='IDs', errors='ignore').fillna(0)
factor_data = factor_data.loc[(factor_data != 0).any(axis=1)]
file_pth = self.abs_factor_path(factor_dir, indu_name)
if self.check_factor_exists(indu_name, factor_dir) and if_exists=='update':
mapping = self._read_pklfile(file_pth.replace('.h5', '_mapping.pkl'))
mapping = mapping + [x for x in factor_data.columns if x not in mapping] # 新增的哑变量后放
factor_data = factor_data.reindex(columns=mapping, fill_value=0)
new_saver = pd.DataFrame(np.argmax(factor_data.values, axis=1), columns=[indu_name],
index=factor_data.index)
else:
new_saver = pd.DataFrame(np.argmax(factor_data.values, axis=1), columns=[indu_name],
index=factor_data.index)
mapping = factor_data.columns.values.tolist()
self.save_factor2(new_saver, factor_dir, if_exists=if_exists)
self._save_pklfile(mapping, factor_dir, indu_name+'_mapping', protocol=2)
def load_as_dummy(self, factor_name, factor_dir, dates=None, ids=None, idx=None):
"""读取行业哑变量"""
mapping_pth = self.data_path + factor_dir + factor_name + '_mapping.pkl'
mapping = self._read_pklfile(mapping_pth)
data = self.load_factor(factor_name, factor_dir, dates=dates, ids=ids, idx=idx).dropna()
dummy = np.zeros((len(data), len(mapping)))
dummy[np.arange(len(data)), data[factor_name].values.astype('int')] = 1
return | pd.DataFrame(dummy, index=data.index, columns=mapping, dtype='int8') | pandas.DataFrame |
"""Contains a collection of MTR equipment parsing.
These include:
* Version 3/4 (old version) [ ]
* Version 5 (MTRduino) [x]
"""
import pandas as pd
class rcm(object):
r""" Anderaa instruments (RCM 4, 7, 9, 11's
EcoFOCI QC procedure developed by <NAME>. and done within excel spreadsheet
<NAME>. usually performed the engr conversions and trimmed the data via excel
Also calculated parameters to monitor the health of the rcm during deployment
For now... stick to excel qc procedure, and just archive exceldata
TODO: This should be a replacement of the original rcm mooring analyis software"""
@staticmethod
def parse_excel(filename=None, datetime_index=True):
r"""
Basic Method to open and read rcm excel files
"""
rawdata_df = | pd.read_excel(filename, skiprows=4, parse_dates=["date/time"], index_col="date/time") | pandas.read_excel |
#!/usr/bin/env python
# coding: utf-8
# # <<<<<<<<<<<<<<<<<<<< Tarea Número 4>>>>>>>>>>>>>>>>>>>>>>>>
# ## Estudiante: <NAME>
# ## Ejercicio 1
# In[1]:
import os
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
import numpy as np
from math import pi
# #### a) Cargue la tabla de datos SpotifyTop2018 40 V2
# In[2]:
# Cargando datos
data = pd.read_csv("SpotifyTop2018_40_V2.csv", delimiter = ',', decimal = '.', index_col=0)
print(data)
print(data.head())
# In[3]:
# Normalizando y centrando la tabla
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(data)
data.loc[:,:] = scaled_values
print(data)
datos = data
# #### b) Ejecute el metodo k−medias para k = 3. Modificaremos los atributos de la clase KMeans(...) como sigue: max iter : int, default: 300: Numero maximo de iteraciones del algoritmo kmedias para una sola ejecucion. Para este ejercicio utilice max iter = 1000. n init : int, default: 10 (Formas Fuertes): Numero de veces que el algoritmo kmedias se ejecutara con diferentes semillas de centroides. Los resultados finales seran la mejor salida de n init ejecuciones consecutivas en terminos de inercia intra-clases. Para este ejercicio utilice n init = 100.
# In[4]:
# Función para graficar los gráficos de Barras para la interpretación de clústeres
def bar_plot(centros, labels, cluster = None, var = None):
from math import ceil, floor
from seaborn import color_palette
colores = color_palette()
minimo = floor(centros.min()) if floor(centros.min()) < 0 else 0
def inside_plot(valores, labels, titulo):
plt.barh(range(len(valores)), valores, 1/1.5, color = colores)
plt.xlim(minimo, ceil(centros.max()))
plt.title(titulo)
if var is not None:
centros = np.array([n[[x in var for x in labels]] for n in centros])
colores = [colores[x % len(colores)] for x, i in enumerate(labels) if i in var]
labels = labels[[x in var for x in labels]]
if cluster is None:
for i in range(centros.shape[0]):
plt.subplot(1, centros.shape[0], i + 1)
inside_plot(centros[i].tolist(), labels, ('Cluster ' + str(i)))
plt.yticks(range(len(labels)), labels) if i == 0 else plt.yticks([])
else:
pos = 1
for i in cluster:
plt.subplot(1, len(cluster), pos)
inside_plot(centros[i].tolist(), labels, ('Cluster ' + str(i)))
plt.yticks(range(len(labels)), labels) if pos == 1 else plt.yticks([])
pos += 1
# In[5]:
# Función para graficar los gráficos tipo Radar para la interpretación de clústeres
def radar_plot(centros, labels):
from math import pi
centros = np.array([((n - min(n)) / (max(n) - min(n)) * 100) if
max(n) != min(n) else (n/n * 50) for n in centros.T])
angulos = [n / float(len(labels)) * 2 * pi for n in range(len(labels))]
angulos += angulos[:1]
ax = plt.subplot(111, polar = True)
ax.set_theta_offset(pi / 2)
ax.set_theta_direction(-1)
plt.xticks(angulos[:-1], labels)
ax.set_rlabel_position(0)
plt.yticks([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
["10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%"],
color = "grey", size = 8)
plt.ylim(-10, 100)
for i in range(centros.shape[1]):
valores = centros[:, i].tolist()
valores += valores[:1]
ax.plot(angulos, valores, linewidth = 1, linestyle = 'solid',
label = 'Cluster ' + str(i))
ax.fill(angulos, valores, alpha = 0.3)
plt.legend(loc='upper right', bbox_to_anchor = (0.1, 0.1))
# #### Formas fuertes (n_init) y Número de Iteraciones (max_iter) [Default]
# In[11]:
# Solo 3 iteraciones y una forma fuerte.
kmedias = KMeans(n_clusters=3, max_iter=300, n_init=10) # Declaración de la instancia
kmedias.fit(datos)
centros = np.array(kmedias.cluster_centers_)
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos.columns)
# #### Formas fuertes (n_init) y Número de Iteraciones (max_iter) [Modificado]
# In[12]:
# Volviendo a recargar datos para ver la asignacion final de los clusters
kmedias = KMeans(n_clusters=3, max_iter=1000, n_init=100)
kmedias.fit(datos)
centros = np.array(kmedias.cluster_centers_)
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos.columns)
# #### c) Interprete los resultados del ejercicio anterior usando graficos de barras y graficos tipo Radar. Compare respecto a los resultados obtenidos en la tarea anterior en la que uso Clustering Jerarquico.
# In[14]:
# Ejecuta k-medias con 3 clusters
kmedias = KMeans(n_clusters=3)
kmedias.fit(datos)
print(kmedias.predict(datos))
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[15]:
# Ploteando grafico de barras
plt.figure(1, figsize = (12, 8))
bar_plot(centros, datos.columns)
# ### Interpretacion
# In[15]:
# En cuanto a la interpretacion se puede ver lo siguiente:
# Despues de haber corrido el K-medias con el primer set de parametros, donde init = 10 y max_iter = 300:
# se obtiene:
# primer cluster de color azul, en el que spechiness, loudness, tempo y acoustiness son las variables mas altas, es decir
# las canciones clasificadas en un primer momento tienen registros de palabras en sus canciones, son acosticas y tienen
# los mayores tiempos por minutos expresados en beats, ademas de time_signature y danceability, es decir, las canciones
# son bailables y hay altos volumenes de beats en cada barra, las demas variables son bajas.
# Un segundo cluster naranja, tiene registros altos en cuanto a danceability, time signature, energy, loudness, valence
# e instrumentalness, es decir, estas canciones sosn buenas para bailar, hay altos beats por barra por minuto, tienen
# intensidades buenas, tienen alta sonoridad por pista, ademas de que son canciones bastante positivas asi como instrumen-
# tales, presentan cierto speechiness pero no mas de un 50% lo que quiere decir, es que hay moderada cantidad de palabras
# y la duracion en milisegundos de las canciones es muy baja, es decir, son canciones energeticas y buenas para bailar
# pero duran poco.
# En el cluster 3 (verde): se observa que son las canciones que tienen mayor duracion en milisegundos de todas, y
# presentan cierta acustica, asi como sonoridad y cierta intensidad pero es baja, en las demas variables son bajas.
# Segunda interpretacion con init = 100 y max_iter = 1000
# En este punto, se ve como las iteraciones estabilizan los clusters y estos cambian en algunas representaciones de
# variables ya que se tiene:
# Cluster 1 (azul): se mantienen spechiness, time_signature, danceability, acoustiness, y se agregan liveness y valance
# lo que quiere decir que las canciones en este cluster se caracterizan por tener niveles altos de beats por cada barra
# o medida, son canciones que registran altos registros de letras, son canciones bailables, son acusticas, y se detecta
# presencia de publica en ellas asi como alta positividad musical, es decir son canciones alegres y en la que la gente
# al escucharlas puede bailar y cantar, aunque por otro lado, son canciones cortas ya que presentan bajos registros de
# duration_ms, es decir su duracion en milisegundo es poca, al igual que su intensidad y su deteccion de instrumentalidad.
# Cluster 2 (naranja): se caracteriza por tener las variables mas altas en time_signature, danceability, energy, loudness,
# valence y liveness con respecto a los valores por default, no fue mucho el cambio que hubo y solo instrumentals fue el
# que se cambio, este cluster se caracteriza por tener canciones beats por barra de volumen muy altos, ser canciones
# aptas para bailar, poseen alta sonoridad en la pista medida en decibeles, son canciones que tienen alta presencia de
# positivismo en las letras y que presentan alta presencia de publico. En realidad este cluster es muy parecido al numero 1
# solo que presenta variables como energy y loudness que el cluster 1 no presenta, por otro lado en este cluster estan
# las canciones que registran baja presencia de palabras, acustica e instrumentalidad, y son canciones que tienen duraciones
# mayores en milisegundos que las del cluster 1, es decir, son aptas para bailar, son positivas pero quiza no son canciones
# aptas para cantar, porque registran indices bajos de esta variable.
# Cluster 3 (verde): con respecto al primer cluster por default, en este nuevo cluster ahora se presenta la variable
# de instrumentalidad, y otras como tempo y duration_ms siguen manteniendose, asi como ahora hay presencia moderada de
# energy y loudness. En este cluster va a estar representado por todas aquellas canciones que tienen lo registros mas
# altos de duracion por milisegundos, asi como las que poseen mayor instrumentalidad y tiempo aproximado por beats, asi
# como las que transmiten un relativo alto grado de positividad y presencia de publico pero bajos registros de intensidad
# y de sonoridad. Presenta bajos niveles de palabras en canciones y no son para nada bailables.
# Comparacion con Clustering Jerarquico:
# Se puede ver como el cluster 1 (azul) es bastante similar, habiendo solo uno ligero cambio a nivel de duration_ms ya que
# en Clustering Jerarquico se ve como mas de un 25% de los datos presentaban algo de duration_ms (duracion en milisegundos)
# sin embargo es apenas notorio.
# Con respecto al cluster 2 (naranja) hay muchis cambios, ya que en Jerarquico solo se tenian altas las variables de
# duration_ms, tempo y un poco acoustiness, mientras que en k-medias estas mismas variables no se encuentra altas
# y mas bien en k-medias estas estan bastante bajas y las que estaban bajas en Jerarquico aqui estan altas como es el caso
# de danceability, energy, etc.
# Con el cluster 3 (verde): las variables que se siguen manteniendo son intrsumentalness, tempo y un poco de loudness, aunque
# en Jerarquico instrumentalness estaba alta y en K-Medias esta en menos del 50% sin embargo este cluster sigue siendo
# caracterizado por canciones bastante instumentales y con beats por minuto bastante altos.
# #### d) Grafique usando colores sobre las dos primeras componentes del plano principal en el Analisis en Componentes Principales los clusteres obtenidos segun k-medias (usando k =3).
# In[22]:
pca = PCA(n_components=2)
componentes = pca.fit_transform(datos)
componentes
print(datos.shape)
print(componentes.shape)
plt.scatter(componentes[:, 0], componentes[:, 1],c=kmedias.predict(datos))
plt.xlabel('componente 1')
plt.ylabel('componente 2')
plt.title('3 Cluster K-Medias')
# #### e) Usando 50 ejecuciones del metodo k−medias grafique el “Codo de Jambu” para este ejemplo. ¿Se estabiliza en algun momento la inercia inter–clases?
# In[7]:
# Solo 3 iteraciones y usando 50 ejecuciones con valores con defecto de max_iter = 300 e init = 50
kmedias = KMeans(n_clusters=3, max_iter=300, n_init=50) # Declaración de la instancia
kmedias.fit(datos)
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[10]:
Nc = range(1, 20)
kmediasList = [KMeans(n_clusters=i) for i in Nc]
varianza = [kmediasList[i].fit(datos).inertia_ for i in range(len(kmediasList))]
plt.plot(Nc,varianza,'o-')
plt.xlabel('Número de clústeres')
plt.ylabel('Varianza explicada por cada cluster (Inercia Intraclases)')
plt.title('Codo de Jambu')
# #### Interpretacion
# In[11]:
# En este caso no hay mucha claridad, ya que en realidad en ningun punto se ve que se estabilice y se forme la linea
# recta, aunque tal vez se podria decir que en K = 5, K = 7 o K = 13 podrian ser opciones viables.
# ## Ejercicio #2
# #### a) Repita el ejercicio 1 usando k = 3 usando esta tabla de datos, usando solo las variables numericas. Modificaremos los atributos de la clase KMeans (...) como sigue: max iter : int, default: 300: Numero maximo de iteraciones del algoritmo kmedias para una sola ejecucion. Para este ejercicio utilice max iter = 2000, n init : int, default: 10 (Formas Fuertes): Numero de veces que el algoritmo kmedias se ejecutara con diferentes semillas de centroides. Los resultados finales sera la mejor salida de n init ejecuciones consecutivas en terminos de inercia intra-clases. Para este ejercicio utilice n init = 150.
# #### Carga de la Tabla de Datos SAHeart
# In[43]:
corazon = pd.read_csv('SAheart.csv', delimiter = ';', decimal = '.')
print(corazon)
# In[44]:
# Seleccionando solo variables numericas
corazon2 = pd.DataFrame(data = corazon2, columns = (['sbp', 'tobacco', 'ldl', 'adiposity', 'typea', 'obesity',
'alcohol', 'age']))
print(corazon2)
corazon2.head()
# In[45]:
# Normalizando y centrando la tabla
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(corazon2)
corazon2.loc[:,:] = scaled_values
print(corazon2)
# In[25]:
# Solo 3 iteraciones y valores modificables en max_iter = 2000 e init = 150
kmedias = KMeans(n_clusters=3, max_iter=2000, n_init=150) # Declaración de la instancia
kmedias.fit(datos)
centros = np.array(kmedias.cluster_centers_)
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos.columns)
# In[46]:
# Ejecuta k-medias con 3 clusters
kmedias = KMeans(n_clusters=3)
kmedias.fit(corazon2)
print(kmedias.predict(corazon2))
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[27]:
# Ploteando grafico de barras
plt.figure(1, figsize = (12, 8))
bar_plot(centros, datos.columns)
# #### Interprete los resultados del ejercicio anterior usando graficos de barras y graficos tipo Radar. Compare respecto a los resultados obtenidos en la tarea anterior en la que uso Clustering Jerarquico.
# In[41]:
# Comparando con el ejercicio pasado con Clustering Jerarquico se puede apreciar que en realidad el plot de radar con
# K - Means es practicamente identico al plot de radar pasado, se puede observar como los clusters mantienen igual
# casi todas sus variables, sin embargo el cambio mas grande que se tiene es en el numero de Cluster, ya que para el Jerarquico
# el Cluster 1, eran los individuos que tenian un alto typea A y las demas variables eran bastante bajas, en este caso
# con el k-means este paso a ser el cluster 2.
# El cluster 2 en el Jerarquico, representado por los indidvuos con un sbp alto, las edades mas altas, asi como presencia
# de alto colesterol, adiposidad y alto sobrepeso en el K - Means paso a ser el cluster 3 y ahora los individuos presentan
# mediciones mas altas de SBP y de adiposidad (llegando a lo mas alto) comparadi con el pasado.
# Finalmente el cluster 3 en el Jerarquico, ahora pasa a ser el cluster 1 en el K - medias y sigue teniendo las mismas variables
# originales, como alto colesterol, adiposidad, obesidad, relativamente alta presencia de mediciones alta de SBP y edad,
# pero ahora el K - medias incluyo a la variable typea A alta, y no en un estado medio como el clustering Jerarquico, haciendo
# que los individuos de este cluster sean los que presentan altas edades y enfermedades como obesidad, alto colesterol y
# adiposidad, pero ahora sumado con mayor medida un factor de tipo A asociado a personas mas competitivas y orientada a
# resultados pero que pasan mas estresadas y ansiosas.
# #### Grafique usando colores sobre las dos primeras componentes del plano principal en el Analisis en Componentes Principales los clusteres obtenidos segun k-medias (usando k =3).
# In[47]:
pca = PCA(n_components=2)
componentes = pca.fit_transform(corazon2)
componentes
print(corazon2.shape)
print(componentes.shape)
plt.scatter(componentes[:, 0], componentes[:, 1],c=kmedias.predict(corazon2))
plt.xlabel('componente 1')
plt.ylabel('componente 2')
plt.title('3 Cluster K-Medias')
# #### Usando 50 ejecuciones del metodo k−medias grafique el “Codo de Jambu” para este ejemplo. ¿Se estabiliza en algun momento la inercia inter–clases?
# In[48]:
# Solo 3 iteraciones y usando 50 ejecuciones con valores con defecto de max_iter = 300 e init = 50
kmedias = KMeans(n_clusters=3, max_iter=300, n_init=50) # Declaración de la instancia
kmedias.fit(corazon2)
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[49]:
Nc = range(1, 20)
kmediasList = [KMeans(n_clusters=i) for i in Nc]
varianza = [kmediasList[i].fit(corazon2).inertia_ for i in range(len(kmediasList))]
plt.plot(Nc,varianza,'o-')
plt.xlabel('Número de clústeres')
plt.ylabel('Varianza explicada por cada cluster (Inercia Intraclases)')
plt.title('Codo de Jambu')
# ### Interpretacion
# In[ ]:
# En este caso no hay mucha claridad, pero se podria decir que en K = 2 o K = 6 podrian ser opciones viables.
# #### b)Repita los ejercicios anteriores pero esta vez incluya las variables categoricas usando codigos disyuntivos completos. ¿Son mejores los resultados?
# In[28]:
# Recodificacion
def recodificar(col, nuevo_codigo):
col_cod = pd.Series(col, copy=True)
for llave, valor in nuevo_codigo.items():
col_cod.replace(llave, valor, inplace=True)
return col_cod
# #### Cargando las variables numericas asi como categoricas y convirtiendolas a codigo disyuntivo completo
# In[54]:
# Conviertiendo la variables en Dummy
datos_dummies = pd.get_dummies(corazon)
print(datos_dummies.head())
print(datos_dummies.dtypes)
# In[57]:
# Centrando y normalizando los datos convertidos en dummies
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(datos_dummies)
datos_dummies.loc[:,:] = scaled_values
print(datos_dummies)
dummy = datos_dummies
# In[63]:
# Solo 3 iteraciones y valores modificables en max_iter = 2000 e init = 150
kmedias = KMeans(n_clusters=3, max_iter=2000, n_init=150) # Declaración de la instancia
kmedias.fit(dummy)
centros = np.array(kmedias.cluster_centers_)
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, dummy.columns)
# In[33]:
# Ejecuta k-medias con 3 clusters
kmedias = KMeans(n_clusters=3)
kmedias.fit(datos_dummy)
print(kmedias.predict(datos_dummy))
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[34]:
# Ploteando grafico de barras
plt.figure(1, figsize = (12, 8))
bar_plot(centros, datos_dummy.columns)
# #### Interprete los resultados del ejercicio anterior usando graficos de barras y graficos tipo Radar. Compare respecto a los resultados obtenidos en la tarea anterior en la que uso Clustering Jerarquico.
# In[51]:
# En este caso se ve que de nuevo que los clusters junto con su asignacion de variables en cada uno comparado con el
# Jerarquico es similar, sin embargo paso el mismo problema de que se cambiaron los numeros de los cluster, por ejemplo
# el cluster 1 en el k - medias es el cluster 3 en el Jerarquico, se siguen manteniendo altas las variables de chd_no,
# no hay historial familiar, y el comportamiento tipo A, en K - medias es alto, mientras que en el jerarquico era medio
# otra diferencia es que en el jerarquico los individuos tenian altos indices de toma de alcohol mientras que en el k
# Medias estos sujetos presentan bajos indices de tomas de alcohol. Por lo demas estan iguales y se siguen manteniendo
# las variables no mencionadas bajas.
# El Cluster 2 en el k - medias es el cluster 1 en el jerarquico y se siguen reportando que en este cluster estan
# los individuos que han sido diagnosticados de enfermedad del corazon, pero ahora la herencia familiar esta un poco
# mas alta, se siguen reportando que son personas son edades altas, y ahora se suma otra variable que es una alta ingesta
# de alcohol (con respecto al Jerarquico esta era mas baja) y se sigue manteniendo la variable de obesidad como alta,
# pero ademas con el K-Means estos individuos ahora presentan altos indices de adiposidad, colesterol, consumen mas tabaco
# y tienen registros de presion cardiaca mas elevados, en el Jerarquico, estas 4 ultimas variables eran exactamente iguales
# (con excepcion de la adiposidad que en el K - Medias esta un poco mas baja) con la intromision de las variables categoricas
# varias variables tendieron a subir, y se ve una fuerte correlacion entre las variables categoricas y las numericas.
# Finalmente el Cluster 3 en el k - medias es el cluster 2 en el Jerarquico, pero muchas de las variables se mantienen
# igual como es el caso de la edad que se posiciona alta, la adiposidad, el colesterol, la ingesta de tabaco, asi como
# la medicion del ritmo cardiaco o sbp con el K - medias ahora esta mas alta que con el Jerarquico. Ambos siguen manteniendo
# que no se les ha detectado enfermedad del corazon a estos individuos y en el Jerarquico habia una alta presencia de
# historial familiar, mientras que en el K - medias este bajo levemente y la obesidad tambien bajo, pero en este nuevo
# se presenta una mayor ingesta de alcohol, misma que en el Jerarquico aparecia como baja o casi nula y ahora en el K -
# medias varia parte de los individuos presentan que no presentan historial familiar, mientras que en el Jerarquico era
# casi nulo o muy bajo.
# En este nuevo cluster formado por K - means, se ve que estan las personas que no han sido diagnosticadas de enfermedad
# del corazon pero una fuerte parte de los datos tiene historial familiar de padecimiento sumado al hecho de que son personas
# con edades altas y otras enfermedades y que ademas, consumen altos indices de alcohol.
# #### Grafique usando colores sobre las dos primeras componentes del plano principal en el Analisis en Componentes Principales los clusteres obtenidos segun k-medias (usando k =3).
# In[59]:
pca = PCA(n_components=2)
componentes = pca.fit_transform(dummy)
componentes
print(dummy.shape)
print(componentes.shape)
plt.scatter(componentes[:, 0], componentes[:, 1],c=kmedias.predict(dummy))
plt.xlabel('componente 1')
plt.ylabel('componente 2')
plt.title('3 Cluster K-Medias')
# #### Usando 50 ejecuciones del metodo k−medias grafique el “Codo de Jambu” para este ejemplo. ¿Se estabiliza en algun momento la inercia inter–clases?¶
# In[60]:
# Solo 3 iteraciones y usando 50 ejecuciones con valores con defecto de max_iter = 300 e init = 50
kmedias = KMeans(n_clusters=3, max_iter=300, n_init=50) # Declaración de la instancia
kmedias.fit(dummy)
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[61]:
Nc = range(1, 20)
kmediasList = [KMeans(n_clusters=i) for i in Nc]
varianza = [kmediasList[i].fit(dummy).inertia_ for i in range(len(kmediasList))]
plt.plot(Nc,varianza,'o-')
plt.xlabel('Número de clústeres')
plt.ylabel('Varianza explicada por cada cluster (Inercia Intraclases)')
plt.title('Codo de Jambu')
# ### Interpretacion
# In[62]:
# En este caso no hay mucha claridad, pero se podria decir que en K = 5 o K = 8 podrian ser opciones viables, ya que es
# donde se normaliza el codo
# ### Interpretacion Jerarquico con variables categoricas vs k - means con variables categoricas
# In[36]:
# Con la agregacion de las variables categoricas los resultados si se ven mucho mejores y se asemejan mucho a lo que
# habia dado cuando se hizo por Jerarquico, ya que se puede ver un primer cluster (azul) que esta representado por
# las personas "sanas" que son las que no presentan enfermedad de corazon, que no tienen historial familiar con este
# padecimiento pero que son altos en el comportamiento A, que significa que son personas mas orientadas a los resultados
# mas competitivas y que por ende pasan mas estresadas y tensas.
# En el cluster 2 (naranja): se ve que esta representado por las personas que no han sido diagnosticas de enfermedad del
# corazon pero que son obesas, ya tienen las mayores edades, presentan adiposidad, tienen una tendencia a la alta en el
# colesterol, asi como en las mediciones de la presion cardiaca, y consumen altos indices de alcohol y tambien fuman
# y algunos presentan historial familiar de padecimientos del corazon mientras que otros no. Este es el grupo de las
# personas que presentan cierta herencia de la enfermedad pero su condicion de salud se ve agrabada por su estilo de
# alimentacion y de vida.
# Finalmente en el cluster 3 (verde): estas las personas que ya han sido diagnosticas de enfermedad del corazon, tambien
# son personas con las mayores edades, tienen alta su mediciones de presion cardiaca y presentan colesterol, adiposidad
# sobrepeso, son de comportamiento tipo A, consumen mucho tabaco, toman indices altos de alcohol y la enfermedad del
# corazon a nivel hereditario esta muy altamente presente, en lo que se diferencia este cluster del 2 es que estos si han
# sido diagnosticados de enfermedad del corazon, mientras que los del cluster 2 no, pero son sujetos en riesgo.
# Con el primer radar y graficos de las variables numericas, arrojaba informacion pero era muy escueta y no se veia
# una alta correlacion entre las variables y como la herencia o estar diagnosticado o no jugaba un papel importante en el
# analisis.
# ## Ejercicio 3
# ### Programe la Jerarquia de clases de acuerdo al siguiente diagrama
# In[39]:
#Configuraciones para imagen
import pandas as pd
pd.options.display.max_rows = 10
from IPython.display import Image
Image(filename='/Users/heinerleivagmail.com/Jerarquia.png')
# In[1]:
import pandas as pd
import numpy as np
import scipy.linalg as la
from sklearn import preprocessing
import matplotlib.pyplot as plt
from math import sqrt
import os
import scipy.stats
import os
from math import pi
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, ward, single, complete,average,linkage, fcluster
import scipy.cluster.hierarchy as sch
from scipy.spatial.distance import pdist
from sklearn.preprocessing import StandardScaler
from math import ceil, floor
from seaborn import color_palette
from sklearn.decomposition import PCA
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
# In[3]:
class exploratorio:
def __init__(self, datos = pd.DataFrame()):
self.__datos = pd.DataFrame(datos)
@property
def datos(self):
return self.__datos
# Definiendo variables para analisis exploratorio de datos
def head(self):
return self.__datos.head()
def dimension(self):
return self.__datos.shape
def estadisticas(self):
return self.__datos.describe()
def valores_atipicos(self):
boxplots = self.__datos.boxplot(return_type='axes')
return boxplots
def histograma(self):
plt.style.use('seaborn-white')
return plt.hist(self.__datos)
def grafico_densidad(self):
grafo = self.__datos.plot(kind='density')
return grafo
def test_normalidad(self):
X = self.__datos['Matematicas']
print(X)
shapiro_resultados = scipy.stats.shapiro(X)
print(shapiro_resultados)
p_value = shapiro_resultados[1]
print(p_value)
# interpretación
alpha = 0.05
if p_value > alpha:
print('Sí sigue la curva Normal (No se rechaza H0)')
else:
print('No sigue la curva Normal (Se rechaza H0)')
# In[5]:
class PCA(exploratorio):
def __init__(self, datos = pd.DataFrame()):
super().__init__(datos = | pd.DataFrame() | pandas.DataFrame |
import csv
import pandas as pd
import logging
class OnetSkillImportanceExtractor(object):
"""
An object that creates a skills importance CSV based on ONET data
"""
def __init__(self, onet_source, output_filename, hash_function):
"""
Args:
output_filename: A filename to write the final dataset
onet_source: An object that is able to fetch ONET files by name
hash_function: A function that can hash a given string
"""
self.output_filename = output_filename
self.onet_source = onet_source
self.hash_function = hash_function
def onet_to_pandas(self, filename, col_name):
"""
Args:
filename: an unpathed filename referring to an ONET skill file
col_name: A list of columns to extract from the file
Returns:
A pandas DataFrame
"""
logging.info('Converting ONET %s to pandas', filename)
with self.onet_source.ensure_file(filename) as fullpath:
with open(fullpath) as f:
onet = [row for row in csv.DictReader(f, delimiter='\t')]
onet = | pd.DataFrame(onet) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Unit tests for cartoframes.data.services.Geocode"""
import unittest
import os
import sys
import json
import warnings
import pandas as pd
import geopandas as gpd
from carto.exceptions import CartoException
from cartoframes.data import Dataset
from cartoframes.auth import Credentials
from cartoframes.data.clients import SQLClient
from cartoframes.data.services import Geocoding
from cartoframes.utils.columns import normalize_name
from cartoframes.utils.geom_utils import RESERVED_GEO_COLUMN_NAME
from ...helpers import _UserUrlLoader, _ReportQuotas
warnings.filterwarnings('ignore')
class TestGeocoding(unittest.TestCase, _UserUrlLoader, _ReportQuotas):
"""Tests for cartoframes.data.service.Geocoding"""
def setUp(self):
if (os.environ.get('APIKEY') is None or
os.environ.get('USERNAME') is None):
try:
creds = json.loads(open('tests/e2e/secret.json').read())
self.apikey = creds['APIKEY']
self.username = creds['USERNAME']
except Exception: # noqa: E722
warnings.warn("Skipping Context tests. To test it, "
"create a `secret.json` file in test/ by "
"renaming `secret.json.sample` to `secret.json` "
"and updating the credentials to match your "
"environment.")
self.apikey = None
self.username = None
else:
self.apikey = os.environ['APIKEY']
self.username = os.environ['USERNAME']
# sets skip value
self.no_credentials = self.apikey is None or self.username is None
# table naming info
has_mpl = 'mpl' if os.environ.get('MPLBACKEND') else 'nonmpl'
pyver = sys.version[0:3].replace('.', '_')
buildnum = os.environ.get('TRAVIS_BUILD_NUMBER') or 'none'
# Skip tests checking quotas when running in TRAVIS
# since usually multiple tests will be running concurrently
# in that case
self.no_credits = self.no_credentials or buildnum != 'none'
self.test_slug = '{ver}_{num}_{mpl}'.format(
ver=pyver, num=buildnum, mpl=has_mpl
)
self.test_tables = []
self.base_url = self.user_url().format(username=self.username)
self.credentials = Credentials(self.username, self.apikey, self.base_url)
self.sql_client = SQLClient(self.credentials)
self.tearDown()
def skip(self, if_no_credits=False, if_no_credentials=False):
if self.no_credits and if_no_credits:
raise unittest.SkipTest("skipping this test to avoid consuming credits")
if self.no_credentials and if_no_credentials:
raise unittest.SkipTest("no carto credentials, skipping this test")
def get_test_table_name(self, name):
n = len(self.test_tables) + 1
table_name = normalize_name(
'cf_test_table_{name}_{n}_{slug}'.format(name=name, n=n, slug=self.test_slug)
)
self.test_tables.append(table_name)
return table_name
def tearDown(self):
"""restore to original state"""
sql_drop = 'DROP TABLE IF EXISTS {};'
for table in self.test_tables:
try:
Dataset(table, credentials=self.credentials).delete()
self.sql_client.query(sql_drop.format(table))
except CartoException:
warnings.warn('Error deleting tables')
def used_quota(self, gc):
return TestGeocoding.update_quotas('geocode', gc.used_quota())
def test_invalid_arguments(self):
gc = Geocoding(credentials=self.credentials)
df = pd.DataFrame([['Gran Via 46', 'Madrid'], ['Ebro 1', 'Sevilla']], columns=['address', 'city'])
with self.assertRaises(ValueError):
gc.geocode(df, street='address', city={'columna': 'city'})
with self.assertRaises(ValueError):
gc.geocode(df, street='address', state={'columna': 'city'})
with self.assertRaises(ValueError):
gc.geocode(df, street='address', country={'columna': 'city'})
with self.assertRaises(ValueError):
gc.geocode(df, street='address', city={'column': 'ciudad'})
with self.assertRaises(ValueError):
gc.geocode(df, street='address', state={'column': 'ciudad'})
with self.assertRaises(ValueError):
gc.geocode(df, street='address', country={'column': 'ciudad'})
with self.assertRaises(ValueError):
gc.geocode(df, street='address', city='ciudad')
with self.assertRaises(ValueError):
gc.geocode(df, street='address', state='ciudad')
with self.assertRaises(ValueError):
gc.geocode(df, street='address', country='ciudad')
with self.assertRaises(ValueError):
gc.geocode(df, street='address', city="'city'")
with self.assertRaises(ValueError):
gc.geocode(df, street='address', city={'column': 'city', 'value': 'London'})
def test_geocode_dataframe(self):
self.skip(if_no_credits=True, if_no_credentials=True)
gc = Geocoding(credentials=self.credentials)
df = pd.DataFrame([['Gran Via 46', 'Madrid'], ['Ebro 1', 'Sevilla']], columns=['address', 'city'])
quota = self.used_quota(gc)
# Preview
info = gc.geocode(df, street='address', city='city', country={'value': 'Spain'}, dry_run=True).metadata
self.assertEqual(info.get('required_quota'), 2)
self.assertEqual(self.used_quota(gc), quota)
# Geocode
gc_df, info = gc.geocode(df, street='address', city='city', country={'value': 'Spain'})
self.assertTrue(isinstance(gc_df, gpd.GeoDataFrame))
self.assertEqual(info.get('required_quota'), 2)
self.assertEqual(info.get('successfully_geocoded'), 2)
self.assertEqual(info.get('final_records_with_geometry'), 2)
quota += 2
self.assertEqual(self.used_quota(gc), quota)
self.assertIsNotNone(gc_df.the_geom)
self.assertTrue('cartodb_id' in gc_df)
self.assertEqual(gc_df.index.name, 'cartodb_id')
# Preview, Geocode again (should do nothing)
info = gc.geocode(gc_df, street='address', city='city', country={'value': 'Spain'}, dry_run=True).metadata
self.assertEqual(info.get('required_quota'), 0)
self.assertEqual(self.used_quota(gc), quota)
info = gc.geocode(gc_df, street='address', city='city', country={'value': 'Spain'}).metadata
self.assertEqual(info.get('required_quota'), 0)
self.assertEqual(self.used_quota(gc), quota)
# Incremental geocoding: modify one row
gc_df.at[1, 'address'] = 'Gran Via 48'
info = gc.geocode(gc_df, street='address', city='city', country={'value': 'Spain'}, dry_run=True).metadata
self.assertEqual(info.get('required_quota'), 1)
self.assertEqual(self.used_quota(gc), quota)
info = gc.geocode(gc_df, street='address', city={'column': 'city'}, country={'value': 'Spain'}).metadata
self.assertEqual(info.get('required_quota'), 1)
quota += 1
self.assertEqual(self.used_quota(gc), quota)
def test_geocode_dataframe_preserves_input_cartodb(self):
self.skip(if_no_credits=True, if_no_credentials=True)
gc = Geocoding(credentials=self.credentials)
df = pd.DataFrame(
[[1, 'Gran Via 46', 'Madrid'], [2, 'Ebro 1', 'Sevilla']], columns=['cartodb_id', 'address', 'city'])
quota = self.used_quota(gc)
gc_df = gc.geocode(df, street='address', city='city', country={'value': 'Spain'}).data
self.assertTrue(isinstance(gc_df, gpd.GeoDataFrame))
quota += 2
self.assertEqual(self.used_quota(gc), quota)
self.assertTrue('cartodb_id' in gc_df)
def test_geocode_dataframe_as_new_table(self):
self.skip(if_no_credits=True, if_no_credentials=True)
gc = Geocoding(credentials=self.credentials)
df = pd.DataFrame([['Gran Via 46', 'Madrid'], ['Ebro 1', 'Sevilla']], columns=['address', 'city'])
quota = self.used_quota(gc)
table_name = self.get_test_table_name('gcdf')
# Preview
info = gc.geocode(df, street='address', city='city', country={'value': 'Spain'},
table_name=table_name, dry_run=True).metadata
self.assertEqual(info.get('required_quota'), 2)
self.assertEqual(self.used_quota(gc), quota)
# Geocode
gc_df, info = gc.geocode(df, street='address', city='city', country={'value': 'Spain'}, table_name=table_name)
self.assertTrue(isinstance(gc_df, pd.DataFrame))
self.assertEqual(info.get('required_quota'), 2)
self.assertEqual(info.get('successfully_geocoded'), 2)
self.assertEqual(info.get('final_records_with_geometry'), 2)
quota += 2
self.assertEqual(self.used_quota(gc), quota)
# This could change with provider:
# self.assertEqual(gc_df.the_geom[1], '0101000020E61000002F34D769A4A50DC0C425C79DD2354440')
# self.assertEqual(gc_df.the_geom[2], '0101000020E6100000912C6002B7EE17C0C45A7C0A80AD4240')
self.assertIsNotNone(gc_df.the_geom)
dataset = Dataset(table_name, credentials=self.credentials)
dl_df = dataset.download()
self.assertIsNotNone(dl_df.the_geom)
self.assertTrue(dl_df.equals(gc_df.drop(RESERVED_GEO_COLUMN_NAME, 1)))
self.assertTrue('cartodb_id' in dataset.get_column_names())
self.assertEqual(dl_df.index.name, 'cartodb_id')
def test_geocode_table(self):
self.skip(if_no_credits=True, if_no_credentials=True)
gc = Geocoding(credentials=self.credentials)
df = pd.DataFrame([['Gran Via 46', 'Madrid'], ['Ebro 1', 'Sevilla']], columns=['address', 'city'])
table_name = self.get_test_table_name('gctb')
Dataset(df).upload(table_name=table_name, credentials=self.credentials)
ds = Dataset(table_name, credentials=self.credentials)
quota = self.used_quota(gc)
# Preview
info = gc.geocode(ds, street='address', city='city', country={'value': 'Spain'}, dry_run=True).metadata
self.assertEqual(info.get('required_quota'), 2)
self.assertEqual(self.used_quota(gc), quota)
# Geocode
gc_ds, info = gc.geocode(ds, street='address', city='city', country={'value': 'Spain'})
self.assertTrue(isinstance(gc_ds, Dataset))
self.assertEqual(info.get('required_quota'), 2)
self.assertEqual(info.get('successfully_geocoded'), 2)
self.assertEqual(info.get('final_records_with_geometry'), 2)
quota += 2
self.assertEqual(self.used_quota(gc), quota)
self.assertEqual(gc_ds.table_name, table_name)
self.assertTrue('cartodb_id' in gc_ds.get_column_names())
# Preview, Geocode again (should do nothing)
info = gc.geocode(ds, street='address', city='city', country={'value': 'Spain'}, dry_run=True).metadata
self.assertEqual(info.get('required_quota'), 0)
self.assertEqual(self.used_quota(gc), quota)
info = gc.geocode(ds, street='address', city='city', country={'value': 'Spain'}).metadata
self.assertEqual(info.get('required_quota'), 0)
self.assertEqual(self.used_quota(gc), quota)
# Incremental geocoding: modify one row
self.sql_client.query("UPDATE {table} SET address='Gran Via 48' WHERE cartodb_id=1".format(table=table_name))
info = gc.geocode(ds, street='address', city='city', country={'value': 'Spain'}, dry_run=True).metadata
self.assertEqual(info.get('required_quota'), 1)
self.assertEqual(self.used_quota(gc), quota)
info = gc.geocode(ds, street='address', city='city', country={'value': 'Spain'}).metadata
self.assertEqual(info.get('required_quota'), 1)
quota += 1
self.assertEqual(self.used_quota(gc), quota)
def test_geocode_table_as_new_table(self):
self.skip(if_no_credits=True, if_no_credentials=True)
gc = Geocoding(credentials=self.credentials)
df = pd.DataFrame([['Gran Via 46', 'Madrid'], ['Ebro 1', 'Sevilla']], columns=['address', 'city'])
table_name = self.get_test_table_name('gctb')
Dataset(df).upload(table_name=table_name, credentials=self.credentials)
ds = Dataset(table_name, credentials=self.credentials)
new_table_name = self.get_test_table_name('gctb')
quota = self.used_quota(gc)
# Preview
info = gc.geocode(ds, street='address', city='city', country={'value': 'Spain'},
table_name=new_table_name, dry_run=True).metadata
self.assertEqual(info.get('required_quota'), 2)
self.assertEqual(self.used_quota(gc), quota)
# Geocode
gc_ds, info = gc.geocode(ds, street='address', city='city', country={'value': 'Spain'},
table_name=new_table_name)
self.assertTrue(isinstance(gc_ds, Dataset))
self.assertEqual(info.get('required_quota'), 2)
self.assertEqual(info.get('successfully_geocoded'), 2)
self.assertEqual(info.get('final_records_with_geometry'), 2)
quota += 2
self.assertEqual(self.used_quota(gc), quota)
self.assertEqual(gc_ds.table_name, new_table_name)
self.assertTrue('cartodb_id' in gc_ds.get_column_names())
# Original table should not have been geocoded
info = gc.geocode(ds, street='address', city='city', country={'value': 'Spain'}, dry_run=True).metadata
self.assertEqual(info.get('required_quota'), 2)
self.assertEqual(self.used_quota(gc), quota)
# Preview, Geocode again (should do nothing)
info = gc.geocode(gc_ds, street='address', city='city', country={'value': 'Spain'}, dry_run=True).metadata
self.assertEqual(info.get('required_quota'), 0)
self.assertEqual(self.used_quota(gc), quota)
info = gc.geocode(gc_ds, street='address', city='city', country={'value': 'Spain'}).metadata
self.assertEqual(info.get('required_quota'), 0)
self.assertEqual(self.used_quota(gc), quota)
def test_geocode_dataframe_dataset(self):
self.skip(if_no_credits=True, if_no_credentials=True)
gc = Geocoding(credentials=self.credentials)
df = pd.DataFrame([['Gran Via 46', 'Madrid'], ['Ebro 1', 'Sevilla']], columns=['address', 'city'])
ds = Dataset(df)
quota = self.used_quota(gc)
# Preview
info = gc.geocode(ds, street='address', city='city', country={'value': 'Spain'}, dry_run=True).metadata
self.assertEqual(info.get('required_quota'), 2)
self.assertEqual(self.used_quota(gc), quota)
# Geocode
gc_ds, info = gc.geocode(ds, street='address', city='city', country={'value': 'Spain'})
self.assertTrue(isinstance(gc_ds, Dataset))
self.assertEqual(info.get('required_quota'), 2)
self.assertEqual(info.get('successfully_geocoded'), 2)
self.assertEqual(info.get('final_records_with_geometry'), 2)
quota += 2
self.assertEqual(self.used_quota(gc), quota)
self.assertIsNotNone(gc_ds.dataframe.the_geom)
self.assertTrue('cartodb_id' in gc_ds.get_column_names())
self.assertTrue('cartodb_id' in gc_ds.dataframe)
self.assertEqual(gc_ds.dataframe.index.name, 'cartodb_id')
def test_geocode_dataframe_dataset_as_new_table(self):
self.skip(if_no_credits=True, if_no_credentials=True)
gc = Geocoding(credentials=self.credentials)
df = | pd.DataFrame([['Gran Via 46', 'Madrid'], ['Ebro 1', 'Sevilla']], columns=['address', 'city']) | pandas.DataFrame |
import pandas as pd
def get_concatenated_df(files, separator, fields_to_keep = None):
dfs = [ | pd.read_csv(file, sep=separator) | pandas.read_csv |
#dependencies
from sklearn.cross_decomposition import PLSRegression
from sklearn.model_selection import cross_validate
import pandas as pd
import numpy as np
from scipy.signal import savgol_filter
from sklearn.base import TransformerMixin, RegressorMixin, BaseEstimator
from scipy import sparse, signal
from BaselineRemoval import BaselineRemoval
from sklearn.model_selection import ShuffleSplit
from scipy.sparse.linalg import spsolve
#Import prep methods
import sklearn
from sklearn.preprocessing import StandardScaler, MinMaxScaler,MaxAbsScaler, RobustScaler
from sklearn.preprocessing import FunctionTransformer, PowerTransformer, QuantileTransformer
from sklearn.decomposition import PCA, KernelPCA
class SavgolFilter(BaseEstimator,TransformerMixin):
def __init__(self,window_length=5,polyorder=2,axis=1):
self.__name__='SavgolFilter'
self.window_length=window_length
self.polyorder=polyorder
self.axis=axis
self.output=None
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
self.output=savgol_filter(X,window_length=self.window_length,polyorder=self.polyorder,axis=self.axis)
return self.output
def fit_transform(self,X,y=None):
self.output=savgol_filter(X,window_length=self.window_length,polyorder=self.polyorder,axis=self.axis)
return self.output
class BaselineASLS(BaseEstimator,TransformerMixin):
#Asymmetric Least Squares
def __init__(self, lam=1e5, p=1e-3, niter=10):
self.__name__='BaselineAsLS'
self.lam=lam
self.p=p
self.niter=niter
self.y=None
self.output=None
def fit(self,X,y=None):
self.y=y
def transform(self,X,y=None):
y=self.y
self.output=np.apply_along_axis(lambda x: self.line_remove(x), 0, X)
return self.output
def line_remove(self,f):
L = len(f)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
z = 0
for i in range(self.niter):
W = sparse.spdiags(w, 0, L, L)
Z=W + self.lam * D.dot(D.transpose())
z = spsolve(Z, w * f)
w = self.p * (f > z) + (1 - self.p) * (f < z)
return z
def fit_transform(self,X,y=None):
self.y=y
return self.transform(X,y)
class BaselineModpoly(BaseEstimator,TransformerMixin):
def __init__(self, degree=2):
self.__name__='BaselineModPoly'
self.degree=degree
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.ModPoly(self.degree)
del MP
return X_
def fit_transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.ModPoly(self.degree)
del MP
return X_
class BaselineZhangFit(BaseEstimator,TransformerMixin):
def __init__(self, itermax=50):
self.__name__='BaselineZhangFit'
self.itermax=itermax
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.ZhangFit(itermax=self.itermax)
del MP
return X_
def fit_transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.ZhangFit(itermax=self.itermax)
del MP
return X_
class BaselineIModPoly(BaseEstimator,TransformerMixin):
def __init__(self, degree=2):
self.__name__='BaselineImprovedModPoly'
self.degree=degree
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.IModPoly(self.degree)
del MP
return X_
def fit_transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.IModPoly(self.degree)
del MP
return X_
class BaselineLinear(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='BaselineLinear'
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
return signal.detrend(X)
def fit_transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
return signal.detrend(X)
class BaselineSecondOrder(BaseEstimator,TransformerMixin):
def __init__(self,degree=2):
self.__name__='BaselineSecondOrder'
self.degree=degree
def fit(self,X,y=None):
pass
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
t=np.arange(0,X.shape[1])
X_s= X.apply(lambda x: x- np.polyval(np.polyfit(t,x,self.degree), t),axis=1)
return X_s
def transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
t=np.arange(0,X.shape[1])
X_s= X.apply(lambda x: x- np.polyval(np.polyfit(t,x,self.degree), t),axis=1)
return X_s
class MSC(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='MSC'
self.mean=None
def fit(self,X,y=None):
self.mean= np.array(X.mean(axis=0))
def transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
#self.mean= np.array(X.mean(axis=0))
def transformMSC(x,mean):
m,b= np.polyfit(mean,x,1)
return (x-b)*m
return X.apply(transformMSC,args=(self.mean,),axis=1).values
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.mean= np.array(X.mean(axis=0))
def transformMSC(x,mean):
m,b= np.polyfit(mean,x,1)
return (x-b)*m
return X.apply(transformMSC,args=(self.mean,),axis=1).values
class FirstDerivative(BaseEstimator,TransformerMixin):
def __init__(self,d=2):
self.__name__='First Derivative'
self.d=d
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
X_=X.diff(self.d,axis=1)
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
return X_
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
X_=X.diff(self.d,axis=1)
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
return X_
# TO DO:
#Piecewise MSC (PMSC)
#Extended MSC (2nd order), Inverse MSC, EIMSC
#Weighted MSC, Loopy MSC (LMSC)
#Norris-Williams
#WhittakerSmooth
class SecondDerivative(BaseEstimator,TransformerMixin):
def __init__(self,d=2):
self.__name__='Second Derivative'
self.d=d
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
X_=X.diff(self.d,axis=1)
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
X_=X_.diff(self.d,axis=1) #second dev
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
return X_
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
X_=X.diff(self.d,axis=1)
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
X_=X_.diff(self.d,axis=1) #second dev
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
return X_
class SNV(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='SNV'
self.mean=None
self.std=None
def fit(self,X):
try:
X=pd.DataFrame(X)
except:
pass
self.mean=X.mean(axis=0)
self.std=X.std(axis=0)
def transform(self,X, y=None):
try:
X= | pd.DataFrame(X) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import skbio
import numpy as np
from biom.table import Table
import pandas as pd
import pandas.testing as pdt
from q2_feature_table import merge, merge_seqs, merge_taxa
from q2_feature_table._merge import _merge_feature_data, _get_overlapping
class MergeTableTests(unittest.TestCase):
def test_single_table(self):
t = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t])
self.assertEqual(t, obs)
def test_valid_overlapping_feature_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S5', 'S6'])
obs = merge([t1, t2])
exp = Table(np.array([[0, 1, 3, 0, 2, 6], [1, 1, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 4]]),
['O1', 'O2', 'O3'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_valid_non_overlapping_feature_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'],
['S4', 'S5', 'S6'])
obs = merge([t1, t2])
exp = Table(np.array([[0, 1, 3, 0, 0, 0], [1, 1, 2, 0, 0, 0],
[0, 0, 0, 0, 2, 6], [0, 0, 0, 2, 2, 4]]),
['O1', 'O2', 'O3', 'O4'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_invalid_overlapping_feature_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S5', 'S6'])
with self.assertRaisesRegex(ValueError, 'features are present'):
merge([t1, t2], 'error_on_overlapping_feature')
def test_valid_overlapping_sample_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'],
['S1', 'S5', 'S6'])
obs = merge([t1, t2], 'error_on_overlapping_feature')
exp = Table(np.array([[0, 1, 3, 0, 0], [1, 1, 2, 0, 0],
[0, 0, 0, 2, 6], [2, 0, 0, 2, 4]]),
['O1', 'O2', 'O3', 'O4'],
['S1', 'S2', 'S3', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_invalid_overlapping_sample_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S1', 'S5', 'S6'])
with self.assertRaisesRegex(ValueError, 'samples.*S1'):
merge([t1, t2])
def test_invalid_overlap_method(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S1', 'S5', 'S6'])
with self.assertRaisesRegex(ValueError, 'overlap method'):
merge([t1, t2], 'peanut')
def test_sum_full_overlap(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t1, t2], 'sum')
exp = Table(np.array([[0, 3, 9], [3, 3, 6]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
self.assertEqual(obs, exp)
def test_sum_triple_overlap(self):
t1 = Table(np.array([[1, 1, 1], [1, 1, 1]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t1] * 3, 'sum')
exp = Table(np.array([[3, 3, 3], [3, 3, 3]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
self.assertEqual(obs, exp)
def test_sum_some_overlap(self):
# Did I stutter?
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S2', 'S5'])
obs = merge([t1, t2], 'sum')
exp = Table(np.array([[0, 3, 3, 0, 6], [1, 1, 2, 0, 0],
[0, 2, 0, 2, 4]]),
['O1', 'O2', 'O3'],
['S1', 'S2', 'S3', 'S4', 'S5'])
self.assertEqual(obs, exp)
def test_sum_overlapping_sample_ids(self):
# This should produce the same result as `error_on_overlapping_feature`
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'],
['S1', 'S5', 'S6'])
obs = merge([t1, t2], 'sum')
exp = Table(np.array([[0, 1, 3, 0, 0], [1, 1, 2, 0, 0],
[0, 0, 0, 2, 6], [2, 0, 0, 2, 4]]),
['O1', 'O2', 'O3', 'O4'],
['S1', 'S2', 'S3', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_sum_overlapping_feature_ids(self):
# This should produce the same result as `error_on_overlapping_sample`
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S5', 'S6'])
obs = merge([t1, t2], 'sum')
exp = Table(np.array([[0, 1, 3, 0, 2, 6], [1, 1, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 4]]),
['O1', 'O2', 'O3'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_average(self):
t1 = Table(np.array([[1, 1, 1], [1, 1, 1]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t1] * 3, 'average')
exp = Table(np.array([[1, 1, 1], [1, 1, 1]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
self.assertEqual(obs, exp)
def test_average_relative_frequency(self):
t1 = Table(np.array([[0.75, 0.75, 0.75], [0.75, 0.75, 0.75]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0.25, 0.25, 0.25], [0.25, 0.25, 0.25]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t1, t2], 'average')
exp = Table(np.array([[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
self.assertEqual(obs, exp)
class UtilTests(unittest.TestCase):
def test_get_overlapping(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'], ['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'], ['S1', 'S5', 'S6'])
# samples
obs = _get_overlapping([t1, t2], 'sample')
self.assertEqual(set(['S1']), obs)
# features
obs = _get_overlapping([t1, t2], 'observation')
self.assertEqual(set(['O1']), obs)
def test_get_overlapping_no_overlap(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'], ['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'], ['S4', 'S5', 'S6'])
# samples
obs = _get_overlapping([t1, t2], 'sample')
self.assertEqual(set(), obs)
# features
obs = _get_overlapping([t1, t2], 'observation')
self.assertEqual(set(), obs)
def test_get_overlapping_multiple(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'], ['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'], ['S1', 'S5', 'S6'])
t3 = Table(np.array([[3, 3, 1], [0, 2, 1]]),
['O1', 'O2'], ['S1', 'S3', 'S6'])
# samples
obs = _get_overlapping([t1, t2, t3], 'sample')
self.assertEqual({'S1', 'S3', 'S6'}, obs)
# features
obs = _get_overlapping([t1, t2, t3], 'observation')
self.assertEqual({'O1', 'O2'}, obs)
class MergeFeatureDataTests(unittest.TestCase):
def test_merge_single(self):
d = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
obs = _merge_feature_data([d])
pdt.assert_series_equal(obs, d)
def test_valid_overlapping_feature_ids(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGT', 'ACCA'], index=['f1', 'f3'])
obs = _merge_feature_data([d1, d2])
exp = pd.Series(['ACGT', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
def test_first_feature_data_retained(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGAAA', 'ACCA'], index=['f1', 'f3'])
obs = _merge_feature_data([d1, d2])
exp = | pd.Series(['ACGT', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3']) | pandas.Series |
import os
import sqlite3
from unittest import TestCase
import warnings
from contextlib2 import ExitStack
from logbook import NullHandler, Logger
import numpy as np
import pandas as pd
from six import with_metaclass, iteritems, itervalues
import responses
from toolz import flip, groupby, merge
from trading_calendars import (
get_calendar,
register_calendar_alias,
)
import h5py
import zipline
from zipline.algorithm import TradingAlgorithm
from zipline.assets import Equity, Future
from zipline.assets.continuous_futures import CHAIN_PREDICATES
from zipline.data.fx import DEFAULT_FX_RATE
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.utils.memoize import classlazyval
from zipline.pipeline import SimplePipelineEngine
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.data.testing import TestingDataSet
from zipline.pipeline.domain import GENERIC, US_EQUITIES
from zipline.pipeline.loaders import USEquityPricingLoader
from zipline.pipeline.loaders.testing import make_seeded_random_loader
from zipline.protocol import BarData
from zipline.utils.paths import ensure_directory, ensure_directory_containing
from .core import (
create_daily_bar_data,
create_minute_bar_data,
make_simple_equity_info,
tmp_asset_finder,
tmp_dir,
write_hdf5_daily_bars,
)
from .debug import debug_mro_failure
from ..data.adjustments import (
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
)
from ..data.bcolz_daily_bars import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
)
from ..data.data_portal import (
DataPortal,
DEFAULT_MINUTE_HISTORY_PREFETCH,
DEFAULT_DAILY_HISTORY_PREFETCH,
)
from ..data.fx import (
InMemoryFXRateReader,
HDF5FXRateReader,
HDF5FXRateWriter,
)
from ..data.hdf5_daily_bars import (
HDF5DailyBarReader,
HDF5DailyBarWriter,
MultiCountryDailyBarReader,
)
from ..data.loader import (
get_benchmark_filename,
)
from ..data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY,
FUTURES_MINUTES_PER_DAY,
)
from ..data.resample import (
minute_frame_to_session_frame,
MinuteResampleSessionBarReader
)
from ..finance.trading import SimulationParameters
from ..utils.classproperty import classproperty
from ..utils.final import FinalMeta, final
from ..utils.memoize import remember_last
zipline_dir = os.path.dirname(zipline.__file__)
class DebugMROMeta(FinalMeta):
"""Metaclass that helps debug MRO resolution errors.
"""
def __new__(mcls, name, bases, clsdict):
try:
return super(DebugMROMeta, mcls).__new__(
mcls, name, bases, clsdict
)
except TypeError as e:
if "(MRO)" in str(e):
msg = debug_mro_failure(name, bases)
raise TypeError(msg)
else:
raise
class ZiplineTestCase(with_metaclass(DebugMROMeta, TestCase)):
"""
Shared extensions to core unittest.TestCase.
Overrides the default unittest setUp/tearDown functions with versions that
use ExitStack to correctly clean up resources, even in the face of
exceptions that occur during setUp/setUpClass.
Subclasses **should not override setUp or setUpClass**!
Instead, they should implement `init_instance_fixtures` for per-test-method
resources, and `init_class_fixtures` for per-class resources.
Resources that need to be cleaned up should be registered using
either `enter_{class,instance}_context` or `add_{class,instance}_callback}.
"""
_in_setup = False
@final
@classmethod
def setUpClass(cls):
# Hold a set of all the "static" attributes on the class. These are
# things that are not populated after the class was created like
# methods or other class level attributes.
cls._static_class_attributes = set(vars(cls))
cls._class_teardown_stack = ExitStack()
try:
cls._base_init_fixtures_was_called = False
cls.init_class_fixtures()
assert cls._base_init_fixtures_was_called, (
"ZiplineTestCase.init_class_fixtures() was not called.\n"
"This probably means that you overrode init_class_fixtures"
" without calling super()."
)
except BaseException: # Clean up even on KeyboardInterrupt
cls.tearDownClass()
raise
@classmethod
def init_class_fixtures(cls):
"""
Override and implement this classmethod to register resources that
should be created and/or torn down on a per-class basis.
Subclass implementations of this should always invoke this with super()
to ensure that fixture mixins work properly.
"""
if cls._in_setup:
raise ValueError(
'Called init_class_fixtures from init_instance_fixtures.'
' Did you write super(..., self).init_class_fixtures() instead'
' of super(..., self).init_instance_fixtures()?',
)
cls._base_init_fixtures_was_called = True
@final
@classmethod
def tearDownClass(cls):
# We need to get this before it's deleted by the loop.
stack = cls._class_teardown_stack
for name in set(vars(cls)) - cls._static_class_attributes:
# Remove all of the attributes that were added after the class was
# constructed. This cleans up any large test data that is class
# scoped while still allowing subclasses to access class level
# attributes.
delattr(cls, name)
stack.close()
@final
@classmethod
def enter_class_context(cls, context_manager):
"""
Enter a context manager to be exited during the tearDownClass
"""
if cls._in_setup:
raise ValueError(
'Attempted to enter a class context in init_instance_fixtures.'
'\nDid you mean to call enter_instance_context?',
)
return cls._class_teardown_stack.enter_context(context_manager)
@final
@classmethod
def add_class_callback(cls, callback, *args, **kwargs):
"""
Register a callback to be executed during tearDownClass.
Parameters
----------
callback : callable
The callback to invoke at the end of the test suite.
"""
if cls._in_setup:
raise ValueError(
'Attempted to add a class callback in init_instance_fixtures.'
'\nDid you mean to call add_instance_callback?',
)
return cls._class_teardown_stack.callback(callback, *args, **kwargs)
@final
def setUp(self):
type(self)._in_setup = True
self._pre_setup_attrs = set(vars(self))
self._instance_teardown_stack = ExitStack()
try:
self._init_instance_fixtures_was_called = False
self.init_instance_fixtures()
assert self._init_instance_fixtures_was_called, (
"ZiplineTestCase.init_instance_fixtures() was not"
" called.\n"
"This probably means that you overrode"
" init_instance_fixtures without calling super()."
)
except BaseException: # Clean up even on KeyboardInterrupt
self.tearDown()
raise
finally:
type(self)._in_setup = False
def init_instance_fixtures(self):
self._init_instance_fixtures_was_called = True
@final
def tearDown(self):
# We need to get this before it's deleted by the loop.
stack = self._instance_teardown_stack
for attr in set(vars(self)) - self._pre_setup_attrs:
delattr(self, attr)
stack.close()
@final
def enter_instance_context(self, context_manager):
"""
Enter a context manager that should be exited during tearDown.
"""
return self._instance_teardown_stack.enter_context(context_manager)
@final
def add_instance_callback(self, callback):
"""
Register a callback to be executed during tearDown.
Parameters
----------
callback : callable
The callback to invoke at the end of each test.
"""
return self._instance_teardown_stack.callback(callback)
def alias(attr_name):
"""Make a fixture attribute an alias of another fixture's attribute by
default.
Parameters
----------
attr_name : str
The name of the attribute to alias.
Returns
-------
p : classproperty
A class property that does the property aliasing.
Examples
--------
>>> class C(object):
... attr = 1
...
>>> class D(C):
... attr_alias = alias('attr')
...
>>> D.attr
1
>>> D.attr_alias
1
>>> class E(D):
... attr_alias = 2
...
>>> E.attr
1
>>> E.attr_alias
2
"""
return classproperty(flip(getattr, attr_name))
class WithDefaultDateBounds(with_metaclass(DebugMROMeta, object)):
"""
ZiplineTestCase mixin which makes it possible to synchronize date bounds
across fixtures.
This fixture should always be the last fixture in bases of any fixture or
test case that uses it.
Attributes
----------
START_DATE : datetime
END_DATE : datetime
The date bounds to be used for fixtures that want to have consistent
dates.
"""
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-12-29', tz='utc')
class WithLogger(object):
"""
ZiplineTestCase mixin providing cls.log_handler as an instance-level
fixture.
After init_instance_fixtures has been called `self.log_handler` will be a
new ``logbook.NullHandler``.
Methods
-------
make_log_handler() -> logbook.LogHandler
A class method which constructs the new log handler object. By default
this will construct a ``NullHandler``.
"""
make_log_handler = NullHandler
@classmethod
def init_class_fixtures(cls):
super(WithLogger, cls).init_class_fixtures()
cls.log = Logger()
cls.log_handler = cls.enter_class_context(
cls.make_log_handler().applicationbound(),
)
class WithAssetFinder(WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.asset_finder as a class-level fixture.
After init_class_fixtures has been called, `cls.asset_finder` is populated
with an AssetFinder.
Attributes
----------
ASSET_FINDER_EQUITY_SIDS : iterable[int]
The default sids to construct equity data for.
ASSET_FINDER_EQUITY_SYMBOLS : iterable[str]
The default symbols to use for the equities.
ASSET_FINDER_EQUITY_START_DATE : datetime
The default start date to create equity data for. This defaults to
``START_DATE``.
ASSET_FINDER_EQUITY_END_DATE : datetime
The default end date to create equity data for. This defaults to
``END_DATE``.
ASSET_FINDER_EQUITY_NAMES: iterable[str]
The default names to use for the equities.
ASSET_FINDER_EQUITY_EXCHANGE : str
The default exchange to assign each equity.
ASSET_FINDER_COUNTRY_CODE : str
The default country code to assign each exchange.
Methods
-------
make_equity_info() -> pd.DataFrame
A class method which constructs the dataframe of equity info to write
to the class's asset db. By default this is empty.
make_futures_info() -> pd.DataFrame
A class method which constructs the dataframe of futures contract info
to write to the class's asset db. By default this is empty.
make_exchanges_info() -> pd.DataFrame
A class method which constructs the dataframe of exchange information
to write to the class's assets db. By default this is empty.
make_root_symbols_info() -> pd.DataFrame
A class method which constructs the dataframe of root symbols
information to write to the class's assets db. By default this is
empty.
make_asset_finder_db_url() -> string
A class method which returns the URL at which to create the SQLAlchemy
engine. By default provides a URL for an in-memory database.
make_asset_finder() -> pd.DataFrame
A class method which constructs the actual asset finder object to use
for the class. If this method is overridden then the ``make_*_info``
methods may not be respected.
See Also
--------
zipline.testing.make_simple_equity_info
zipline.testing.make_jagged_equity_info
zipline.testing.make_rotating_equity_info
zipline.testing.make_future_info
zipline.testing.make_commodity_future_info
"""
ASSET_FINDER_EQUITY_SIDS = ord('A'), ord('B'), ord('C')
ASSET_FINDER_EQUITY_SYMBOLS = None
ASSET_FINDER_EQUITY_NAMES = None
ASSET_FINDER_EQUITY_EXCHANGE = 'TEST'
ASSET_FINDER_EQUITY_START_DATE = alias('START_DATE')
ASSET_FINDER_EQUITY_END_DATE = alias('END_DATE')
ASSET_FINDER_FUTURE_CHAIN_PREDICATES = CHAIN_PREDICATES
ASSET_FINDER_COUNTRY_CODE = '??'
@classmethod
def _make_info(cls, *args):
return None
make_futures_info = _make_info
make_exchanges_info = _make_info
make_root_symbols_info = _make_info
make_equity_supplementary_mappings = _make_info
del _make_info
@classmethod
def make_equity_info(cls):
return make_simple_equity_info(
cls.ASSET_FINDER_EQUITY_SIDS,
cls.ASSET_FINDER_EQUITY_START_DATE,
cls.ASSET_FINDER_EQUITY_END_DATE,
cls.ASSET_FINDER_EQUITY_SYMBOLS,
cls.ASSET_FINDER_EQUITY_NAMES,
cls.ASSET_FINDER_EQUITY_EXCHANGE,
)
@classmethod
def make_asset_finder_db_url(cls):
return 'sqlite:///:memory:'
@classmethod
def make_asset_finder(cls):
"""Returns a new AssetFinder
Returns
-------
asset_finder : zipline.assets.AssetFinder
"""
equities = cls.make_equity_info()
futures = cls.make_futures_info()
root_symbols = cls.make_root_symbols_info()
exchanges = cls.make_exchanges_info(equities, futures, root_symbols)
if exchanges is None:
exchange_names = [
df['exchange']
for df in (equities, futures, root_symbols)
if df is not None
]
if exchange_names:
exchanges = pd.DataFrame({
'exchange': pd.concat(exchange_names).unique(),
'country_code': cls.ASSET_FINDER_COUNTRY_CODE,
})
return cls.enter_class_context(tmp_asset_finder(
url=cls.make_asset_finder_db_url(),
equities=equities,
futures=futures,
exchanges=exchanges,
root_symbols=root_symbols,
equity_supplementary_mappings=(
cls.make_equity_supplementary_mappings()
),
future_chain_predicates=cls.ASSET_FINDER_FUTURE_CHAIN_PREDICATES,
))
@classmethod
def init_class_fixtures(cls):
super(WithAssetFinder, cls).init_class_fixtures()
cls.asset_finder = cls.make_asset_finder()
@classlazyval
def all_assets(cls):
"""A list of Assets for all sids in cls.asset_finder.
"""
return cls.asset_finder.retrieve_all(cls.asset_finder.sids)
@classlazyval
def exchange_names(cls):
"""A list of canonical exchange names for all exchanges in this suite.
"""
infos = itervalues(cls.asset_finder.exchange_info)
return sorted(i.canonical_name for i in infos)
@classlazyval
def assets_by_calendar(cls):
"""A dict from calendar -> list of assets with that calendar.
"""
return groupby(lambda a: get_calendar(a.exchange), cls.all_assets)
@classlazyval
def all_calendars(cls):
"""A list of all calendars for assets in this test suite.
"""
return list(cls.assets_by_calendar)
# TODO_SS: The API here doesn't make sense in a multi-country test scenario.
class WithTradingCalendars(object):
"""
ZiplineTestCase mixin providing cls.trading_calendar,
cls.all_trading_calendars, cls.trading_calendar_for_asset_type as a
class-level fixture.
After ``init_class_fixtures`` has been called:
- `cls.trading_calendar` is populated with a default of the nyse trading
calendar for compatibility with existing tests
- `cls.all_trading_calendars` is populated with the trading calendars
keyed by name,
- `cls.trading_calendar_for_asset_type` is populated with the trading
calendars keyed by the asset type which uses the respective calendar.
Attributes
----------
TRADING_CALENDAR_STRS : iterable
iterable of identifiers of the calendars to use.
TRADING_CALENDAR_FOR_ASSET_TYPE : dict
A dictionary which maps asset type names to the calendar associated
with that asset type.
"""
TRADING_CALENDAR_STRS = ('NYSE',)
TRADING_CALENDAR_FOR_ASSET_TYPE = {Equity: 'NYSE', Future: 'us_futures'}
# For backwards compatibility, exisitng tests and fixtures refer to
# `trading_calendar` with the assumption that the value is the NYSE
# calendar.
TRADING_CALENDAR_PRIMARY_CAL = 'NYSE'
@classmethod
def init_class_fixtures(cls):
super(WithTradingCalendars, cls).init_class_fixtures()
cls.trading_calendars = {}
for cal_str in (
set(cls.TRADING_CALENDAR_STRS) |
{cls.TRADING_CALENDAR_PRIMARY_CAL}
):
# Set name to allow aliasing.
calendar = get_calendar(cal_str)
setattr(cls,
'{0}_calendar'.format(cal_str.lower()), calendar)
cls.trading_calendars[cal_str] = calendar
type_to_cal = iteritems(cls.TRADING_CALENDAR_FOR_ASSET_TYPE)
for asset_type, cal_str in type_to_cal:
calendar = get_calendar(cal_str)
cls.trading_calendars[asset_type] = calendar
cls.trading_calendar = (
cls.trading_calendars[cls.TRADING_CALENDAR_PRIMARY_CAL]
)
_MARKET_DATA_DIR = os.path.join(zipline_dir, 'resources', 'market_data')
@remember_last
def read_checked_in_benchmark_data():
symbol = 'SPY'
filename = get_benchmark_filename(symbol)
source_path = os.path.join(_MARKET_DATA_DIR, filename)
benchmark_returns = pd.read_csv(
source_path,
parse_dates=[0],
index_col=0,
header=None,
).tz_localize('UTC')
return benchmark_returns.iloc[:, 0]
class WithBenchmarkReturns(WithDefaultDateBounds,
WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.benchmark_returns as a class-level
attribute.
"""
_default_treasury_curves = None
@classproperty
def BENCHMARK_RETURNS(cls):
benchmark_returns = read_checked_in_benchmark_data()
# Zipline ordinarily uses cached benchmark returns and treasury
# curves data, but when running the zipline tests this cache is not
# always updated to include the appropriate dates required by both
# the futures and equity calendars. In order to create more
# reliable and consistent data throughout the entirety of the
# tests, we read static benchmark returns and treasury curve csv
# files from source. If a test using this fixture attempts to run
# outside of the static date range of the csv files, raise an
# exception warning the user to either update the csv files in
# source or to use a date range within the current bounds.
static_start_date = benchmark_returns.index[0].date()
static_end_date = benchmark_returns.index[-1].date()
warning_message = (
'The WithBenchmarkReturns fixture uses static data between '
'{static_start} and {static_end}. To use a start and end date '
'of {given_start} and {given_end} you will have to update the '
'files in {resource_dir} to include the missing dates.'.format(
static_start=static_start_date,
static_end=static_end_date,
given_start=cls.START_DATE.date(),
given_end=cls.END_DATE.date(),
resource_dir=_MARKET_DATA_DIR,
)
)
if cls.START_DATE.date() < static_start_date or \
cls.END_DATE.date() > static_end_date:
raise AssertionError(warning_message)
return benchmark_returns
class WithSimParams(WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.sim_params as a class level fixture.
Attributes
----------
SIM_PARAMS_CAPITAL_BASE : float
SIM_PARAMS_DATA_FREQUENCY : {'daily', 'minute'}
SIM_PARAMS_EMISSION_RATE : {'daily', 'minute'}
Forwarded to ``SimulationParameters``.
SIM_PARAMS_START : datetime
SIM_PARAMS_END : datetime
Forwarded to ``SimulationParameters``. If not
explicitly overridden these will be ``START_DATE`` and ``END_DATE``
Methods
-------
make_simparams(**overrides)
Construct a ``SimulationParameters`` using the defaults defined by
fixture configuration attributes. Any parameters to
``SimulationParameters`` can be overridden by passing them by keyword.
See Also
--------
zipline.finance.trading.SimulationParameters
"""
SIM_PARAMS_CAPITAL_BASE = 1.0e5
SIM_PARAMS_DATA_FREQUENCY = 'daily'
SIM_PARAMS_EMISSION_RATE = 'daily'
SIM_PARAMS_START = alias('START_DATE')
SIM_PARAMS_END = alias('END_DATE')
@classmethod
def make_simparams(cls, **overrides):
kwargs = dict(
start_session=cls.SIM_PARAMS_START,
end_session=cls.SIM_PARAMS_END,
capital_base=cls.SIM_PARAMS_CAPITAL_BASE,
data_frequency=cls.SIM_PARAMS_DATA_FREQUENCY,
emission_rate=cls.SIM_PARAMS_EMISSION_RATE,
trading_calendar=cls.trading_calendar,
)
kwargs.update(overrides)
return SimulationParameters(**kwargs)
@classmethod
def init_class_fixtures(cls):
super(WithSimParams, cls).init_class_fixtures()
cls.sim_params = cls.make_simparams()
class WithTradingSessions(WithDefaultDateBounds, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.trading_days, cls.all_trading_sessions
as a class-level fixture.
After init_class_fixtures has been called, `cls.all_trading_sessions`
is populated with a dictionary of calendar name to the DatetimeIndex
containing the calendar trading days ranging from:
(DATA_MAX_DAY - (cls.TRADING_DAY_COUNT) -> DATA_MAX_DAY)
`cls.trading_days`, for compatibility with existing tests which make the
assumption that trading days are equity only, defaults to the nyse trading
sessions.
Attributes
----------
DATA_MAX_DAY : datetime
The most recent trading day in the calendar.
TRADING_DAY_COUNT : int
The number of days to put in the calendar. The default value of
``TRADING_DAY_COUNT`` is 126 (half a trading-year). Inheritors can
override TRADING_DAY_COUNT to request more or less data.
"""
DATA_MIN_DAY = alias('START_DATE')
DATA_MAX_DAY = alias('END_DATE')
# For backwards compatibility, exisitng tests and fixtures refer to
# `trading_days` with the assumption that the value is days of the NYSE
# calendar.
trading_days = alias('nyse_sessions')
@classmethod
def init_class_fixtures(cls):
super(WithTradingSessions, cls).init_class_fixtures()
cls.trading_sessions = {}
for cal_str in cls.TRADING_CALENDAR_STRS:
trading_calendar = cls.trading_calendars[cal_str]
sessions = trading_calendar.sessions_in_range(
cls.DATA_MIN_DAY, cls.DATA_MAX_DAY)
# Set name for aliasing.
setattr(cls,
'{0}_sessions'.format(cal_str.lower()), sessions)
cls.trading_sessions[cal_str] = sessions
class WithTmpDir(object):
"""
ZiplineTestCase mixing providing cls.tmpdir as a class-level fixture.
After init_class_fixtures has been called, `cls.tmpdir` is populated with
a `testfixtures.TempDirectory` object whose path is `cls.TMP_DIR_PATH`.
Attributes
----------
TMP_DIR_PATH : str
The path to the new directory to create. By default this is None
which will create a unique directory in /tmp.
"""
TMP_DIR_PATH = None
@classmethod
def init_class_fixtures(cls):
super(WithTmpDir, cls).init_class_fixtures()
cls.tmpdir = cls.enter_class_context(
tmp_dir(path=cls.TMP_DIR_PATH),
)
class WithInstanceTmpDir(object):
"""
ZiplineTestCase mixing providing self.tmpdir as an instance-level fixture.
After init_instance_fixtures has been called, `self.tmpdir` is populated
with a `testfixtures.TempDirectory` object whose path is
`cls.TMP_DIR_PATH`.
Attributes
----------
INSTANCE_TMP_DIR_PATH : str
The path to the new directory to create. By default this is None
which will create a unique directory in /tmp.
"""
INSTANCE_TMP_DIR_PATH = None
def init_instance_fixtures(self):
super(WithInstanceTmpDir, self).init_instance_fixtures()
self.instance_tmpdir = self.enter_instance_context(
tmp_dir(path=self.INSTANCE_TMP_DIR_PATH),
)
class WithEquityDailyBarData(WithAssetFinder, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.make_equity_daily_bar_data.
Attributes
----------
EQUITY_DAILY_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
EQUITY_DAILY_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_equity_daily_bar_data` will read data from
the minute bars defined by `WithEquityMinuteBarData`.
The current default is `False`, but could be `True` in the future.
EQUITY_DAILY_BAR_COUNTRY_CODES : tuple
The countres to create data for. By default this is populated
with all of the countries present in the asset finder.
Methods
-------
make_equity_daily_bar_data(country_code, sids)
make_equity_daily_bar_currency_codes(country_code, sids)
See Also
--------
WithEquityMinuteBarData
zipline.testing.create_daily_bar_data
""" # noqa
EQUITY_DAILY_BAR_START_DATE = alias('START_DATE')
EQUITY_DAILY_BAR_END_DATE = alias('END_DATE')
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = None
@classproperty
def EQUITY_DAILY_BAR_LOOKBACK_DAYS(cls):
# If we're sourcing from minute data, then we almost certainly want the
# minute bar calendar to be aligned with the daily bar calendar, so
# re-use the same lookback parameter.
if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls.EQUITY_MINUTE_BAR_LOOKBACK_DAYS
else:
return 0
@classproperty
def EQUITY_DAILY_BAR_COUNTRY_CODES(cls):
return cls.asset_finder.country_codes
@classmethod
def _make_equity_daily_bar_from_minute(cls):
assert issubclass(cls, WithEquityMinuteBarData), \
"Can't source daily data from minute without minute data!"
assets = cls.asset_finder.retrieve_all(cls.asset_finder.equities_sids)
minute_data = dict(cls.make_equity_minute_bar_data())
for asset in assets:
yield asset.sid, minute_frame_to_session_frame(
minute_data[asset.sid],
cls.trading_calendars[Equity])
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
"""
Create daily pricing data.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code. Data should be created for
this country.
sids : tuple[int]
The sids to include in the data.
Yields
------
(int, pd.DataFrame)
A sid, dataframe pair to be passed to a daily bar writer.
The dataframe should be indexed by date, with columns of
('open', 'high', 'low', 'close', 'volume', 'day', & 'id').
"""
# Requires a WithEquityMinuteBarData to come before in the MRO.
# Resample that data so that daily and minute bar data are aligned.
if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls._make_equity_daily_bar_from_minute()
else:
return create_daily_bar_data(cls.equity_daily_bar_days, sids)
@classmethod
def make_equity_daily_bar_currency_codes(cls, country_code, sids):
"""Create listing currencies.
Default is to list all assets in USD.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code. Data should be created for
this country.
sids : tuple[int]
The sids to include in the data.
Returns
-------
currency_codes : pd.Series[int, str]
Map from sids to currency for that sid's prices.
"""
return pd.Series(index=list(sids), data='USD')
@classmethod
def init_class_fixtures(cls):
super(WithEquityDailyBarData, cls).init_class_fixtures()
trading_calendar = cls.trading_calendars[Equity]
if trading_calendar.is_session(cls.EQUITY_DAILY_BAR_START_DATE):
first_session = cls.EQUITY_DAILY_BAR_START_DATE
else:
first_session = trading_calendar.minute_to_session_label(
pd.Timestamp(cls.EQUITY_DAILY_BAR_START_DATE)
)
if cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS > 0:
first_session = trading_calendar.sessions_window(
first_session,
-1 * cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS
)[0]
days = trading_calendar.sessions_in_range(
first_session,
cls.EQUITY_DAILY_BAR_END_DATE,
)
cls.equity_daily_bar_days = days
class WithFutureDailyBarData(WithAssetFinder, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.make_future_daily_bar_data.
Attributes
----------
FUTURE_DAILY_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
FUTURE_DAILY_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_future_daily_bar_data` will read data from
the minute bars defined by `WithFutureMinuteBarData`.
The current default is `False`, but could be `True` in the future.
Methods
-------
make_future_daily_bar_data() -> iterable[(int, pd.DataFrame)]
A class method that returns an iterator of (sid, dataframe) pairs
which will be written to the bcolz files that the class's
``BcolzDailyBarReader`` will read from. By default this creates
some simple synthetic data with
:func:`~zipline.testing.create_daily_bar_data`
See Also
--------
WithFutureMinuteBarData
zipline.testing.create_daily_bar_data
"""
FUTURE_DAILY_BAR_USE_FULL_CALENDAR = False
FUTURE_DAILY_BAR_START_DATE = alias('START_DATE')
FUTURE_DAILY_BAR_END_DATE = alias('END_DATE')
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE = None
@classproperty
def FUTURE_DAILY_BAR_LOOKBACK_DAYS(cls):
# If we're sourcing from minute data, then we almost certainly want the
# minute bar calendar to be aligned with the daily bar calendar, so
# re-use the same lookback parameter.
if cls.FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls.FUTURE_MINUTE_BAR_LOOKBACK_DAYS
else:
return 0
@classmethod
def _make_future_daily_bar_from_minute(cls):
assert issubclass(cls, WithFutureMinuteBarData), \
"Can't source daily data from minute without minute data!"
assets = cls.asset_finder.retrieve_all(cls.asset_finder.futures_sids)
minute_data = dict(cls.make_future_minute_bar_data())
for asset in assets:
yield asset.sid, minute_frame_to_session_frame(
minute_data[asset.sid],
cls.trading_calendars[Future])
@classmethod
def make_future_daily_bar_data(cls):
# Requires a WithFutureMinuteBarData to come before in the MRO.
# Resample that data so that daily and minute bar data are aligned.
if cls.FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls._make_future_daily_bar_from_minute()
else:
return create_daily_bar_data(
cls.future_daily_bar_days,
cls.asset_finder.futures_sids,
)
@classmethod
def init_class_fixtures(cls):
super(WithFutureDailyBarData, cls).init_class_fixtures()
trading_calendar = cls.trading_calendars[Future]
if cls.FUTURE_DAILY_BAR_USE_FULL_CALENDAR:
days = trading_calendar.all_sessions
else:
if trading_calendar.is_session(cls.FUTURE_DAILY_BAR_START_DATE):
first_session = cls.FUTURE_DAILY_BAR_START_DATE
else:
first_session = trading_calendar.minute_to_session_label(
pd.Timestamp(cls.FUTURE_DAILY_BAR_START_DATE)
)
if cls.FUTURE_DAILY_BAR_LOOKBACK_DAYS > 0:
first_session = trading_calendar.sessions_window(
first_session,
-1 * cls.FUTURE_DAILY_BAR_LOOKBACK_DAYS
)[0]
days = trading_calendar.sessions_in_range(
first_session,
cls.FUTURE_DAILY_BAR_END_DATE,
)
cls.future_daily_bar_days = days
class WithBcolzEquityDailyBarReader(WithEquityDailyBarData, WithTmpDir):
"""
ZiplineTestCase mixin providing cls.bcolz_daily_bar_path,
cls.bcolz_daily_bar_ctable, and cls.bcolz_equity_daily_bar_reader
class level fixtures.
After init_class_fixtures has been called:
- `cls.bcolz_daily_bar_path` is populated with
`cls.tmpdir.getpath(cls.BCOLZ_DAILY_BAR_PATH)`.
- `cls.bcolz_daily_bar_ctable` is populated with data returned from
`cls.make_equity_daily_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_equity_daily_bar_data`.
- `cls.bcolz_equity_daily_bar_reader` is a daily bar reader
pointing to the directory that was just written to.
Attributes
----------
BCOLZ_DAILY_BAR_PATH : str
The path inside the tmpdir where this will be written.
EQUITY_DAILY_BAR_LOOKBACK_DAYS : int
The number of days of data to add before the first day. This is used
when a test needs to use history, in which case this should be set to
the largest history window that will be
requested.
EQUITY_DAILY_BAR_USE_FULL_CALENDAR : bool
If this flag is set the ``equity_daily_bar_days`` will be the full
set of trading days from the trading environment. This flag overrides
``EQUITY_DAILY_BAR_LOOKBACK_DAYS``.
BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD : int
If this flag is set, use the value as the `read_all_threshold`
parameter to BcolzDailyBarReader, otherwise use the default
value.
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_equity_daily_bar_data` will read data from
the minute bar reader defined by a `WithBcolzEquityMinuteBarReader`.
Methods
-------
make_bcolz_daily_bar_rootdir_path() -> string
A class method that returns the path for the rootdir of the daily
bars ctable. By default this is a subdirectory BCOLZ_DAILY_BAR_PATH in
the shared temp directory.
See Also
--------
WithBcolzEquityMinuteBarReader
WithDataPortal
zipline.testing.create_daily_bar_data
"""
BCOLZ_DAILY_BAR_PATH = 'daily_equity_pricing.bcolz'
BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD = None
BCOLZ_DAILY_BAR_COUNTRY_CODE = None
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = False
# allows WithBcolzEquityDailyBarReaderFromCSVs to call the
# `write_csvs`method without needing to reimplement `init_class_fixtures`
_write_method_name = 'write'
# What to do when data being written is invalid, e.g. nan, inf, etc.
# options are: 'warn', 'raise', 'ignore'
INVALID_DATA_BEHAVIOR = 'warn'
@classproperty
def BCOLZ_DAILY_BAR_COUNTRY_CODE(cls):
return cls.EQUITY_DAILY_BAR_COUNTRY_CODES[0]
@classmethod
def make_bcolz_daily_bar_rootdir_path(cls):
return cls.tmpdir.makedir(cls.BCOLZ_DAILY_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(WithBcolzEquityDailyBarReader, cls).init_class_fixtures()
cls.bcolz_daily_bar_path = p = cls.make_bcolz_daily_bar_rootdir_path()
days = cls.equity_daily_bar_days
sids = cls.asset_finder.equities_sids_for_country_code(
cls.BCOLZ_DAILY_BAR_COUNTRY_CODE
)
trading_calendar = cls.trading_calendars[Equity]
cls.bcolz_daily_bar_ctable = t = getattr(
BcolzDailyBarWriter(p, trading_calendar, days[0], days[-1]),
cls._write_method_name,
)(
cls.make_equity_daily_bar_data(
country_code=cls.BCOLZ_DAILY_BAR_COUNTRY_CODE,
sids=sids,
),
invalid_data_behavior=cls.INVALID_DATA_BEHAVIOR
)
if cls.BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD is not None:
cls.bcolz_equity_daily_bar_reader = BcolzDailyBarReader(
t, cls.BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD)
else:
cls.bcolz_equity_daily_bar_reader = BcolzDailyBarReader(t)
class WithBcolzFutureDailyBarReader(WithFutureDailyBarData, WithTmpDir):
"""
ZiplineTestCase mixin providing cls.bcolz_daily_bar_path,
cls.bcolz_daily_bar_ctable, and cls.bcolz_future_daily_bar_reader
class level fixtures.
After init_class_fixtures has been called:
- `cls.bcolz_daily_bar_path` is populated with
`cls.tmpdir.getpath(cls.BCOLZ_DAILY_BAR_PATH)`.
- `cls.bcolz_daily_bar_ctable` is populated with data returned from
`cls.make_future_daily_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_future_daily_bar_data`.
- `cls.bcolz_future_daily_bar_reader` is a daily bar reader
pointing to the directory that was just written to.
Attributes
----------
BCOLZ_DAILY_BAR_PATH : str
The path inside the tmpdir where this will be written.
FUTURE_DAILY_BAR_LOOKBACK_DAYS : int
The number of days of data to add before the first day. This is used
when a test needs to use history, in which case this should be set to
the largest history window that will be
requested.
FUTURE_DAILY_BAR_USE_FULL_CALENDAR : bool
If this flag is set the ``future_daily_bar_days`` will be the full
set of trading days from the trading environment. This flag overrides
``FUTURE_DAILY_BAR_LOOKBACK_DAYS``.
BCOLZ_FUTURE_DAILY_BAR_READ_ALL_THRESHOLD : int
If this flag is set, use the value as the `read_all_threshold`
parameter to BcolzDailyBarReader, otherwise use the default
value.
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_future_daily_bar_data` will read data from
the minute bar reader defined by a `WithBcolzFutureMinuteBarReader`.
Methods
-------
make_bcolz_daily_bar_rootdir_path() -> string
A class method that returns the path for the rootdir of the daily
bars ctable. By default this is a subdirectory BCOLZ_DAILY_BAR_PATH in
the shared temp directory.
See Also
--------
WithBcolzFutureMinuteBarReader
WithDataPortal
zipline.testing.create_daily_bar_data
"""
BCOLZ_FUTURE_DAILY_BAR_PATH = 'daily_future_pricing.bcolz'
BCOLZ_FUTURE_DAILY_BAR_READ_ALL_THRESHOLD = None
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE = False
# What to do when data being written is invalid, e.g. nan, inf, etc.
# options are: 'warn', 'raise', 'ignore'
BCOLZ_FUTURE_DAILY_BAR_INVALID_DATA_BEHAVIOR = 'warn'
BCOLZ_FUTURE_DAILY_BAR_WRITE_METHOD_NAME = 'write'
@classmethod
def make_bcolz_future_daily_bar_rootdir_path(cls):
return cls.tmpdir.makedir(cls.BCOLZ_FUTURE_DAILY_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(WithBcolzFutureDailyBarReader, cls).init_class_fixtures()
p = cls.make_bcolz_future_daily_bar_rootdir_path()
cls.future_bcolz_daily_bar_path = p
days = cls.future_daily_bar_days
trading_calendar = cls.trading_calendars[Future]
cls.future_bcolz_daily_bar_ctable = t = getattr(
BcolzDailyBarWriter(p, trading_calendar, days[0], days[-1]),
cls.BCOLZ_FUTURE_DAILY_BAR_WRITE_METHOD_NAME,
)(
cls.make_future_daily_bar_data(),
invalid_data_behavior=(
cls.BCOLZ_FUTURE_DAILY_BAR_INVALID_DATA_BEHAVIOR
)
)
if cls.BCOLZ_FUTURE_DAILY_BAR_READ_ALL_THRESHOLD is not None:
cls.bcolz_future_daily_bar_reader = BcolzDailyBarReader(
t, cls.BCOLZ_FUTURE_DAILY_BAR_READ_ALL_THRESHOLD)
else:
cls.bcolz_future_daily_bar_reader = BcolzDailyBarReader(t)
class WithBcolzEquityDailyBarReaderFromCSVs(WithBcolzEquityDailyBarReader):
"""
ZiplineTestCase mixin that provides
cls.bcolz_equity_daily_bar_reader from a mapping of sids to CSV
file paths.
"""
_write_method_name = 'write_csvs'
def _trading_days_for_minute_bars(calendar,
start_date,
end_date,
lookback_days):
first_session = calendar.minute_to_session_label(start_date)
if lookback_days > 0:
first_session = calendar.sessions_window(
first_session,
-1 * lookback_days
)[0]
return calendar.sessions_in_range(first_session, end_date)
# TODO_SS: This currently doesn't define any relationship between country_code
# and calendar, which would be useful downstream.
class WithWriteHDF5DailyBars(WithEquityDailyBarData,
WithTmpDir):
"""
Fixture class defining the capability of writing HDF5 daily bars to disk.
Uses cls.make_equity_daily_bar_data (inherited from WithEquityDailyBarData)
to determine the data to write.
Methods
-------
write_hdf5_daily_bars(cls, path, country_codes)
Creates an HDF5 file on disk and populates it with pricing data.
Attributes
----------
HDF5_DAILY_BAR_CHUNK_SIZE
"""
HDF5_DAILY_BAR_CHUNK_SIZE = 30
@classmethod
def write_hdf5_daily_bars(cls, path, country_codes):
"""
Write HDF5 pricing data using an HDF5DailyBarWriter.
Parameters
----------
path : str
Location (relative to cls.tmpdir) at which to write data.
country_codes : list[str]
List of country codes to write.
Returns
-------
written : h5py.File
A read-only h5py.File pointing at the written data. The returned
file is registered to be closed automatically during class
teardown.
"""
ensure_directory_containing(path)
writer = HDF5DailyBarWriter(path, cls.HDF5_DAILY_BAR_CHUNK_SIZE)
write_hdf5_daily_bars(
writer,
cls.asset_finder,
country_codes,
cls.make_equity_daily_bar_data,
cls.make_equity_daily_bar_currency_codes,
)
# Open the file and mark it for closure during teardown.
return cls.enter_class_context(writer.h5_file(mode='r'))
class WithHDF5EquityMultiCountryDailyBarReader(WithWriteHDF5DailyBars):
"""
Fixture providing cls.hdf5_daily_bar_path and
cls.hdf5_equity_daily_bar_reader class level fixtures.
After init_class_fixtures has been called:
- `cls.hdf5_daily_bar_path` is populated with
`cls.tmpdir.getpath(cls.HDF5_DAILY_BAR_PATH)`.
- The file at `cls.hdf5_daily_bar_path` is populated with data returned
from `cls.make_equity_daily_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_equity_daily_bar_data`.
- `cls.hdf5_equity_daily_bar_reader` is a daily bar reader pointing
to the file that was just written to.
Attributes
----------
HDF5_DAILY_BAR_PATH : str
The path inside the tmpdir where this will be written.
HDF5_DAILY_BAR_COUNTRY_CODE : str
The ISO 3166 alpha-2 country code for the country to write/read.
Methods
-------
make_hdf5_daily_bar_path() -> string
A class method that returns the path for the rootdir of the daily
bars ctable. By default this is a subdirectory HDF5_DAILY_BAR_PATH in
the shared temp directory.
See Also
--------
WithDataPortal
zipline.testing.create_daily_bar_data
"""
HDF5_DAILY_BAR_PATH = 'daily_equity_pricing.h5'
HDF5_DAILY_BAR_COUNTRY_CODES = alias('EQUITY_DAILY_BAR_COUNTRY_CODES')
@classmethod
def make_hdf5_daily_bar_path(cls):
return cls.tmpdir.getpath(cls.HDF5_DAILY_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(
WithHDF5EquityMultiCountryDailyBarReader,
cls,
).init_class_fixtures()
cls.hdf5_daily_bar_path = path = cls.make_hdf5_daily_bar_path()
f = cls.write_hdf5_daily_bars(path, cls.HDF5_DAILY_BAR_COUNTRY_CODES)
cls.single_country_hdf5_equity_daily_bar_readers = {
country_code: HDF5DailyBarReader.from_file(f, country_code)
for country_code in f
}
cls.hdf5_equity_daily_bar_reader = MultiCountryDailyBarReader(
cls.single_country_hdf5_equity_daily_bar_readers
)
class WithEquityMinuteBarData(WithAssetFinder, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.equity_minute_bar_days.
After init_class_fixtures has been called:
- `cls.equity_minute_bar_days` has the range over which data has been
generated.
Attributes
----------
EQUITY_MINUTE_BAR_LOOKBACK_DAYS : int
The number of days of data to add before the first day.
This is used when a test needs to use history, in which case this
should be set to the largest history window that will be requested.
EQUITY_MINUTE_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
EQUITY_MINUTE_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
Methods
-------
make_equity_minute_bar_data() -> iterable[(int, pd.DataFrame)]
Classmethod producing an iterator of (sid, minute_data) pairs.
The default implementation invokes
zipline.testing.core.create_minute_bar_data.
See Also
--------
WithEquityDailyBarData
zipline.testing.create_minute_bar_data
"""
EQUITY_MINUTE_BAR_LOOKBACK_DAYS = 0
EQUITY_MINUTE_BAR_START_DATE = alias('START_DATE')
EQUITY_MINUTE_BAR_END_DATE = alias('END_DATE')
@classmethod
def make_equity_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Equity]
return create_minute_bar_data(
trading_calendar.minutes_for_sessions_in_range(
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
),
cls.asset_finder.equities_sids,
)
@classmethod
def init_class_fixtures(cls):
super(WithEquityMinuteBarData, cls).init_class_fixtures()
trading_calendar = cls.trading_calendars[Equity]
cls.equity_minute_bar_days = _trading_days_for_minute_bars(
trading_calendar,
| pd.Timestamp(cls.EQUITY_MINUTE_BAR_START_DATE) | pandas.Timestamp |
import nltk
import pandas as pd
import text2emotion as te
from nltk.corpus import stopwords
import altair as alt
import re
from nltk.tokenize import sent_tokenize
nltk.download("stopwords")
def counter(text):
"""
Generates a summary dataframe of the input
text which contains counts for characters,
words, and sentences.
Parameters:
-----------
text (str): the input text for sentiment analysis
Returns:
--------
data frame: a data frame that contains the summary
statistics for character, word, and sentence count.
example:
text_counter("I am very happy.")
returns: {'characters':16,'words':4,'sentences':1}
"""
if not type(text) is str:
raise TypeError("Only strings are allowed for function input")
num_char = 0
num_word = 0
num_sentences = 0
for char in text:
num_char += 1
num_word = len(re.findall(r"\w+", text))
number_of_sentences = sent_tokenize(text)
num_sentences = len(number_of_sentences)
return pd.DataFrame(
{
"char_count": [num_char],
"word_count": [num_word],
"sentence_count": [num_sentences],
}
)
def sentiment_df(text, sentiment="all"):
"""
Generates a sentiment analysis summary dataframe
of the input text. The summary dataframe would include
the sentiment type, sentiment words, number of
sentiment words, and highest sentiment percentage.
Parameters:
-----------
text (str): the input text for sentiment analysis
sentiment (str, optional): the sentiment that the analysis focuses
on, could be happy, angry, or sad etc.
Defaults to "all".
Returns:
--------
data frame:
a data frame that contains the summary of sentiment analysis
"""
sen_list = ["all", "Happy", "Sad", "Surprise", "Fear", "Angry"]
if not type(text) is str:
raise TypeError("Only strings are allowed for function input")
elif not type(sentiment) is str:
raise TypeError("Only strings are allowed for sentiment input")
elif sentiment not in sen_list:
raise Exception(
"Input not in ['all', 'Happy', 'Sad', 'Surprise', 'Fear', 'Angry']"
)
tokenizer = nltk.RegexpTokenizer(r"\w+")
word_list = tokenizer.tokenize(text)
stop_words = set(stopwords.words("english"))
cleaned_list = []
for i in word_list:
if i not in stop_words:
cleaned_list.append(i)
count_dict = {}
for i in cleaned_list:
count_dict[i] = cleaned_list.count(i)
df = pd.DataFrame()
for i in set(cleaned_list):
df_current = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.union(index), index)
tm.assert_index_equal(index.union(index[:1]), index)
def test_intersection(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
result = other.intersection(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.intersection(index), index)
def test_difference(self, closed):
index = self.create_index(closed=closed)
tm.assert_index_equal(index.difference(index[:1]), index[1:])
def test_symmetric_difference(self, closed):
idx = self.create_index(closed=closed)
result = idx[1:].symmetric_difference(idx[:-1])
expected = IntervalIndex([idx[0], idx[-1]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
def test_set_operation_errors(self, closed, op_name):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# test errors
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
with tm.assert_raises_regex(ValueError, msg):
set_op(Index([1, 2, 3]))
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
set_op(other)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
def test_datetime(self):
dates = date_range('2000', periods=3)
idx = IntervalIndex.from_breaks(dates)
tm.assert_index_equal(idx.left, dates[:2])
tm.assert_index_equal(idx.right, dates[-2:])
expected = date_range('2000-01-01T12:00', periods=2)
tm.assert_index_equal(idx.mid, expected)
assert Timestamp('2000-01-01T12') not in idx
assert Timestamp('2000-01-01T12') not in idx
target = date_range('1999-12-31T12:00', periods=7, freq='12H')
actual = idx.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', overwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
class TestIntervalRange(object):
def test_construction_from_numeric(self, closed, name):
# combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
np.arange(0, 6), name=name, closed=closed)
result = interval_range(start=0, end=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=5, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with freq
expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
name=name, closed=closed)
result = interval_range(start=0, end=6, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=6, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
name=name, closed=closed)
result = interval_range(start=0, end=4, freq=1.5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timestamp(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
breaks = date_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2017-01-08')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with non-fixed freq
freq = 'M'
start, end = Timestamp('2017-01-01'), Timestamp('2017-12-31')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2018-01-15')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timedelta(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timedelta('1 day'), Timedelta('6 days')
breaks = timedelta_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timedelta('1 day'), Timedelta('7 days')
breaks = timedelta_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timedelta('7 days 1 hour')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_constructor_coverage(self):
# float value for periods
expected = pd.interval_range(start=0, periods=10)
result = pd.interval_range(start=0, periods=10.5)
tm.assert_index_equal(result, expected)
# equivalent timestamp-like start/end
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15')
expected = pd.interval_range(start=start, end=end)
result = pd.interval_range(start=start.to_pydatetime(),
end=end.to_pydatetime())
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timestamp
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1),
DateOffset(days=1)]
for freq in equiv_freq:
result = pd.interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
# equivalent timedelta-like start/end
start, end = | Timedelta(days=1) | pandas.Timedelta |
from selenium.webdriver import Chrome
import pandas as pd
import time as time
webdriver = "webdriver/chromedriver.exe"
driver = Chrome(webdriver)
url = "https://blog.deeplearning.ai/blog"
next_posts_btn_selector = 'next-posts-link'
driver.get(url)
load_more_btn = driver.find_element_by_class_name(next_posts_btn_selector)
while load_more_btn.is_displayed():
load_more_btn.click()
time.sleep(2)
posts = driver.find_elements_by_xpath('//h2[@class = "post-header"]/a[1]')
links = [post.get_attribute('href') for post in posts]
print('Number of posts: ', len(posts))
df = | pd.DataFrame(links, columns=['link']) | pandas.DataFrame |
import os
import sys
import logging
import pandas as pd
import numpy as np
from linker.plugins.base import AlgorithmProvider
from linker.core.union_find import UnionFind
from jellyfish import levenshtein_distance, jaro_winkler
logger = logging.getLogger(__name__)
class Levenshtein(AlgorithmProvider):
name = 'LEVENSHTEIN'
title = 'Levenshtein'
type = 'DTR'
args = ['max_edits']
def apply(self, s1, s2, max_edits=0):
def levenshtein_alg(x, max_edits=0):
try:
d = levenshtein_distance(x[0], x[1])
return 1 if d <= max_edits else 0
except TypeError as err:
logger.error(
'Error in calculating Levenshtein edit distance: {}'
.format(err))
strings = | pd.concat([s1, s2], axis=1, ignore_index=True) | pandas.concat |
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
from sklearn.metrics import precision_recall_fscore_support
from statsmodels.stats.inter_rater import fleiss_kappa
__author__ = '<NAME>'
pd.set_option('max_colwidth', 999)
pd.set_option('display.max_rows', 999)
pd.set_option('display.max_columns', 999)
ALL_CATS = ('positive', 'negative', 'neutral', 'mixed')
TERNARY_CATS = ('positive', 'negative', 'neutral')
def load_dataset(*src_filenames, labels=None):
data = []
for filename in src_filenames:
with open(filename) as f:
for line in f:
d = json.loads(line)
if labels is None or d['gold_label'] in labels:
data.append(d)
return data
def get_label_distribution(*splits, dist_labels=False):
if dist_labels:
all_labels = []
for split in splits:
for d in split:
dist = d['label_distribution']
all_labels += [label for label, ids in dist.items()
for _ in range(len(ids))]
series = pd.Series(all_labels)
else:
df = pd.concat((pd.DataFrame(split) for split in splits))
series = df.gold_label
series = series.fillna("No Majority")
dist = series.value_counts(dropna=False)
dist['Total'] = dist.sum()
return dist
def get_label_model_relationship(*splits, model_colname='model_0_label'):
all_splits = sum(splits, [])
df = pd.DataFrame(all_splits)
return df.groupby(['gold_label', model_colname]).apply(len)
def get_adversarial_rate(*splits, model_colname='model_0_label', labels=None):
dist = get_label_model_relationship(*splits, model_colname=model_colname)
dist = dist.reset_index().rename(columns={0: 'examples'})
total = dist.examples.sum()
if labels is not None:
dist = dist[dist.gold_label.isin(labels)]
adversarial = dist[dist.gold_label != dist[model_colname]]
return adversarial.examples.sum(), total
def get_label_rating_relationship(*splits):
all_splits = sum(splits, [])
df = pd.DataFrame(all_splits)
return df.groupby(['gold_label', 'review_rating']).apply(len)
def get_dist_of_majority_dists(split):
data = []
for d in split:
if d['gold_label']:
dist = sorted([(len(v), k) for k, v in d['label_distribution'].items()])
c = dist[-1][0]
data.append(c)
return pd.Series(data).value_counts()
def get_global_worker_dist(*splits):
all_workers = []
for split in splits:
for d in split:
for workers in d['label_distribution'].values():
all_workers += workers
series = pd.Series(all_workers)
return series.value_counts()
def plot_global_worker_dist(worker_dist, output_filename=None):
ax = worker_dist.apply(np.log).hist(bins=10, figsize=(8, 5))
ax.set_xlabel("Examples labeled (log scale)")
ax.set_ylabel("Workers")
if output_filename is not None:
plt.tight_layout()
plt.savefig(output_filename, dpi=200)
def sample_short_examples(split, model_colname, minlength=30, maxlength=50, random_state=1):
df = pd.DataFrame(split)
# No cases without majorities:
df = df[df.gold_label.isnull() == False]
# Length restrictions:
df = df[
(df.sentence.str.len() > minlength) &
(df.sentence.str.len() < maxlength)]
# One example per combination of gold and predicted:
samp = df.groupby(['gold_label', model_colname]).apply(
lambda x: x.sample(1, random_state=random_state))
# Abbreviated label distribution for display:
samp['responses'] = samp.label_distribution.apply(_format_dist)
# Final formatting:
samp = samp[['sentence', model_colname, 'responses']].reset_index(drop=True)
samp = samp.sort_values(model_colname)
return samp
def sample_examples(split, model_colname, n=10, random_state=1):
df = pd.DataFrame(split)
df = df[df.gold_label.isnull() == False]
samp = df.sample(n, random_state=random_state)
samp['responses'] = samp.label_distribution.apply(_format_dist)
samp = samp[['sentence', model_colname, 'responses']].reset_index(drop=True)
return samp
def _format_dist(d):
vals = sorted([k[: 3] for k, v in d.items() for _ in range(len(v))])
return ", ".join(vals)
def get_fleiss_kappa(split, cats=ALL_CATS):
mat = np.zeros((len(split), len(cats)))
for i, d in enumerate(split):
for j, cat in enumerate(cats):
mat[i][j] += len(d['label_distribution'][cat])
return fleiss_kappa(mat)
def estimate_human_precision_recall_f1(split, cats=TERNARY_CATS, random_state=1):
random.seed(random_state)
y = []
all_responses = []
for i, d in enumerate(split):
gold_label = d['gold_label']
if gold_label is not None:
y.append(gold_label)
responses = [c for c, vals in d['label_distribution'].items()
for _ in range(len(vals))]
random.shuffle(responses)
all_responses.append(responses)
all_scores = []
for i in range(len(all_responses[0])):
ann = [x[i] for x in all_responses]
scores = precision_recall_fscore_support(y, ann, labels=cats)
scores = np.array(scores)
all_scores.append(scores)
mu = np.mean(all_scores, axis=0)
df = pd.DataFrame(
mu.T,
index=cats,
columns=['precision', 'recall', 'F1', 'support'])
df.loc['Macro avg'] = df.mean(axis=0)
return df
def get_worker_agreement(split):
data = []
for d in split:
gold_label = d['gold_label']
if gold_label is not None:
for c, workers in d['label_distribution'].items():
for w in workers:
data.append({
'worker': w,
'response': c,
'gold_label': gold_label})
df = | pd.DataFrame(data) | pandas.DataFrame |
#
# Build a graph describing the layout of each station based on data
# from the MTA's elevator and escalator equipment file. We also
# incorporate an override file, since some of the MTA descriptions
# too difficult for this simple program to understand. Writes to
# stdout.
#
import argparse
import pandas as pd
import re
import sys
from utils import split_elevator_description
def load_equipment(master_file, with_inactive=False, with_inaccessible=False, with_escalators=False, with_elevators=True):
equipment = pd.read_csv(master_file)
# filter by equipment
equipment_type = []
if with_escalators:
equipment_type.append('ES')
if with_elevators:
equipment_type.append('EL')
equipment = equipment[ equipment.equipment_type.isin(equipment_type) ]
# filter by active
if not with_inactive:
equipment = equipment[ equipment.is_active == 'Y' ]
# filter by accessibility
if not with_inaccessible:
equipment = equipment[ equipment.ada_compliant == 'Y' ]
# discard columns we don't need
equipment = equipment[["station_name", "equipment_id", "description", "connection_to_street"]]
return equipment
def load_platforms(platform_file):
platforms = pd.read_csv(platform_file)
# ensure the expected columns are present
platforms = platforms[["equipment_id", "line", "direction"]]
return platforms
def load_overrides(override_file):
columns = ["station_name", "equipment_id", "from", "to", "platform_id"]
if override_file is None:
return pd.DataFrame(columns=columns)
overrides = pd.read_csv(override_file)
# ensure the expected columns are present
overrides = overrides[columns]
return overrides
def merge_platforms(equipment, platforms):
# the MTA direction information is incomplete!
platform_ids = platforms[['equipment_id', 'line', 'direction']].set_index('equipment_id')
platform_ids = platform_ids.apply(lambda t : '-'.join(t), axis=1).groupby(level=0).unique()
platform_ids = platform_ids.apply(lambda t : '/'.join(t))
equipment = equipment.set_index('equipment_id')
equipment['platform_id'] = platform_ids
equipment.reset_index(inplace=True)
return equipment
def merge_overrides(equipment, overrides):
# discard any old data for elevators described in the override file
equipment = equipment[~equipment.equipment_id.isin(overrides.equipment_id.unique())]
# now append the overrides
equipment = equipment.append(overrides, sort=True)
return equipment
def identify_edges(equipment):
def elevator_route(desc):
def simplify(name):
if re.match(r'.*[Pp]latform.*', name):
return 'Platform'
if re.match(r'.*(St|Av|[Pp]laza|[Bb]lvd|[Pp]ark|[Ss]idewalk|Pl|Rd|[Ss]quare|[Ss]treet).*', name):
return 'Street'
if re.match(r'.*Upper Mezzanine.*', name):
return 'Upper Mezzanine'
if re.match(r'.*Lower Mezzanine.*', name):
return 'Lower Mezzanine'
if re.match(r'.*([Mm]ezzanine|[Bb]alcony|[Oo]verpass).*', name):
return 'Mezzanine'
if re.match(r'.*[Bb]alcony.*', name):
return 'Balcony'
if name in ['PA Bus Terminal', 'Oculus Main Level']:
return 'Street'
print(f'Failed to parse "{name}". Using "Unknown"', file=sys.stderr)
return 'Unknown'
levels = split_elevator_description(desc)
if len(levels) > 1:
return simplify(levels[0]), simplify(levels[1])
if re.match('^Mezzanine .*bound Platform$', desc):
return ('Mezzanine', 'Platform')
print(f'Failed to parse "{desc}". Using "Unknown-Unknown"', file=sys.stderr)
return ('Unknown', 'Unknown')
# some sanity tests
assert elevator_route('125 St & Lexington Ave to Mezzanine for service in both directions') == ('Street', 'Mezzanine')
assert elevator_route('Mezzanine to Platform for downtown A/C service') == ('Mezzanine', 'Platform')
assert elevator_route('Mezzanine to Platforms for service in both directions') == ('Mezzanine', 'Platform')
assert elevator_route('Mezzanine to uptown Platform') == ('Mezzanine', 'Platform')
assert elevator_route('161 St & River Ave (NE Corner) to Mezzanine to reach service in both directions') == ('Street', 'Mezzanine')
assert elevator_route('Street to # 6 Northbound platform') == ('Street', 'Platform')
assert elevator_route('Sidewalk entrance (east of the pedestrian skybridge) to Manhattan bound Platform') == ('Street', 'Platform')
# assert elevator_route('G and 7 Mezzanines to Flushing-bound 7 Platform') == ('Mezzanine', 'Platform')
from_col = equipment.description.apply(lambda d : elevator_route(d)[0])
# some elevators record the street part explicitly
from_col.loc[equipment['connection_to_street'] == 'Y'] = 'Street'
to_col = equipment.description.apply(lambda d : elevator_route(d)[1])
return pd.DataFrame({'from': from_col, 'to': to_col})
def canonical_names(equipment):
def make_canon(t):
label, station, platform_id = t
if label == 'Unknown':
return 'Unknown-' + station
if label == 'Platform':
return '-'.join([str(x) for x in [label, station, platform_id]])
return '-'.join([str(x) for x in [label, station]])
return pd.DataFrame({
'fqn_from' : equipment[['from', 'station_name', 'platform_id']].apply(make_canon, axis=1),
'fqn_to' : equipment[['to', 'station_name', 'platform_id']].apply(make_canon, axis=1),
'label_from': equipment[['from', 'station_name']].apply(lambda t : t[1] if t[0] == 'Street' else t[0], axis=1),
'label_to' : equipment['to']})
def main():
parser = argparse.ArgumentParser("Station graph builder")
parser.add_argument("--master-list", required=True)
parser.add_argument("--override-list", required=False)
parser.add_argument("--platform-list", required=True)
parser.add_argument("--output", required=False,
help="file to output to, defaults to stdout")
parser.add_argument("--inactive", dest="inactive", action="store_true", required=False, default=False,
help="include inactive elevators/escalators")
parser.add_argument("--no-inactive", dest="inactive", action="store_false", required=False,
help="don't include inactive elevators/escalators")
parser.add_argument("--inaccessible", dest="inaccessible", action="store_true", required=False, default=False,
help="don't require ADA compliance")
parser.add_argument("--no-inaccessible", dest="inaccessible", action="store_false", required=False,
help="require ADA compliance")
parser.add_argument("--escalators", dest="escalators", action="store_true", required=False, default=False,
help="include escalators as a connection between floors")
parser.add_argument("--no-escalators", dest="escalators", action="store_false", required=False,
help="don't include escalators as a connection between floors")
parser.add_argument("--elevators", dest="elevators", action="store_true", required=False, default=True,
help="include elevators as a connection between floors")
parser.add_argument("--no-elevators", dest="elevators", action="store_false", required=False,
help="don't include elevators as a connection between floors")
parser.add_argument("--verbose", dest="verbose", action="store_true", required=False, default=False)
opts = parser.parse_args()
log = (lambda hdr,df: print("==={}===\n{}".format(hdr, df.head()), file=sys.stderr)
) if opts.verbose else (lambda hdr,df: None)
equipment = load_equipment(opts.master_list,
with_inactive=opts.inactive, with_inaccessible=opts.inaccessible,
with_escalators=opts.escalators, with_elevators=opts.elevators)
log("Equipment", equipment)
platforms = load_platforms(opts.platform_list)
log("Platforms", platforms)
overrides = load_overrides(opts.override_list)
log("Overrides", overrides)
equipment = merge_platforms(equipment, platforms)
log("Merged 1", equipment)
from_to = identify_edges(equipment)
equipment = | pd.concat([equipment, from_to], axis=1, sort=False) | pandas.concat |
import math
import pandas as pd
import numpy as np
def clean_portfolio(portfolio):
""" Clean the portfolio dataset.
"""
portfolio_clean = portfolio.copy()
# Create dummy columns for the channels column
clean_channels = pd.get_dummies(portfolio_clean.channels.apply(pd.Series).stack(),
prefix="channel").sum(level=0)
portfolio_clean = pd.concat([portfolio_clean, clean_channels], axis=1, sort=False)
portfolio_clean.drop(columns='channels', inplace=True)
# Change the column name to be more descriptive
portfolio_clean.rename(columns={'id': 'offer_id'}, inplace=True)
return portfolio_clean
def clean_profile(profile):
""" Clean the profile dataset."""
profile_clean = profile.copy()
# Transform date from int to datetime
def date(x): return pd.to_datetime(str(x), format='%Y%m%d')
profile_clean.became_member_on = profile_clean.became_member_on.apply(date)
# Create column that separates customers with valida data
profile_clean['valid'] = (profile_clean.age != 118).astype(int)
# Change the name of id column to customer_id
profile_clean.rename(columns={'id': 'customer_id'}, inplace=True)
# Create dummy columns for the gender column
dummy_gender = pd.get_dummies(profile_clean.gender, prefix="gender")
profile_clean = pd.concat([profile_clean, dummy_gender], axis=1, sort=False)
return profile_clean
def clean_transcript(transcript):
""" Clean the transcript dataset."""
transcript_clean = transcript.copy()
# Split event into several dummy columns
transcript_clean.event = transcript_clean.event.str.replace(' ', '_')
event_dummies = | pd.get_dummies(transcript_clean.event, prefix="event") | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 13:10:27 2020
@author: Oliver
"""
import os
import numpy as np
import scipy.io
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
from scipy.signal import savgol_filter, find_peaks
database = | pd.DataFrame(columns=['condition', 'name', 'ecg']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from collections import defaultdict
from solarnet.preprocessing.masks import MaskMaker, IMAGE_SIZES
class TestMasks:
@staticmethod
def _make_polygon_vertices_pixel_coordinates(polygon_shapes):
# make the fake data
max_vertices = max(polygon_shapes.values())
test_data = defaultdict(list)
for polygon_idx, num_vertices in polygon_shapes.items():
test_data['polygon_id'].append(polygon_idx)
test_data['number_vertices'].append(num_vertices)
for vertex in range(1, max_vertices + 1):
vertex_value = vertex if vertex >= num_vertices else None
test_data[f'lon{vertex}'].append(vertex_value)
test_data[f'lat{vertex}'].append(vertex_value)
return | pd.DataFrame(data=test_data) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Virginia Case Study
"""
import os
import sys
import re
import csv
import json
import random
import math
import numpy as np
from functools import partial
import pandas as pd
import geopandas as gpd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from gerrychain import (
Election,
Graph,
MarkovChain,
Partition,
accept, #always_accept, acceptance functions
constraints,
updaters,
)
from gerrychain.metrics import efficiency_gap, mean_median, polsby_popper, wasted_votes
from gerrychain.proposals import recom, propose_random_flip
from gerrychain.updaters import cut_edges
from gerrychain.tree import recursive_tree_part, bipartition_tree_random
sys.path.insert(0, os.getenv("REDISTRICTING_HOME"))
import utility_functions as uf
plt.style.use('seaborn-whitegrid')
#--- IMPORT DATA
try:
os.chdir(os.path.join(os.getenv("REDISTRICTING_HOME", default=""),
"Virginia"))
except OSError:
os.mkdir(os.path.join(os.getenv("REDISTRICTING_HOME", default=""),
"Virginia"))
os.chdir(os.path.join(os.getenv("REDISTRICTING_HOME", default=""),
"Virginia"))
graph = Graph.from_json("Data/VA_Chain.json")
df = gpd.read_file("Data/VA_precincts.shp")
#--- CREATE SHORTCUTS
state_abbr="VA"
housen="CON"
num_districts=11
pop_col="TOTPOP"
num_elections= 3
#Make an output directory to dump files in
newdir = "./Outputs/"+state_abbr+housen+"_Precincts/"
print(newdir)
os.makedirs(os.path.dirname(newdir), exist_ok=True)
# Visualize districts for existing plans
uf.plot_district_map(df, df['CD_12'].to_dict(), "2012 Congressional District Map")
uf.plot_district_map(df, df['CD_16'].to_dict(), "2016 Congressional District Map")
#--- DATA CLEANING
graph = uf.convert_attributes_to_int(graph, ["G18DSEN", "G18RSEN", "G16DPRS", "G16RPRS"])
# calculate non-BVAP
graph = uf.add_other_population_attribute(graph)
#--- GENERIC UPDATERS
updater = {
"population": updaters.Tally("TOTPOP", alias="population"), #can only take in partitions
"cut_edges": cut_edges,
#"PP":polsby_popper
}
#--- ELECTION UPDATERS
#BVAP - Black Voting Age Population
#G18DSEN - 2018 Democratic senate candidate
#G18RSEN - 2018 Republican senate candidate
#G16DPRS: 2016 Democratic presidential candidate
#G16RPRS: 2016 Republican presidential candidate
election_names=[
"BVAP", #BVAP, nBVAP
"G16PRS", #G17DGOV, G17RGOV
"G18SEN" #G18DSEN, G18RSEN
]
election_columns=[
["BVAP", "nBVAP"], #First is BVAP, Second is NOT BVAP
["G16DPRS", "G16RPRS"], #First is Democrats, Second is Republicans
["G18DSEN", "G18RSEN"] #First is Democrats, Second is Republicans
]
elections = [
Election(
election_names[i], #Name of election
{"First": election_columns[i][0], "Second": election_columns[i][1]},
)#Take two columns of the election_columns, using first and second aligned to those column assignments
for i in range(num_elections)
]
election_updaters = {election.name: election for election in elections}
updater.update(election_updaters)
#--- STARTING PLAN (SEED PLAN)
totpop = df.TOTPOP.sum()
cddict = recursive_tree_part(graph, #graph object
range(num_districts), #How many districts
totpop/num_districts, #population target
"TOTPOP", #population column, variable name
.01, #epsilon value
1)
file_name = os.path.join(newdir, "initial_plan.png")
uf.plot_district_map(df, cddict, "Seed Plan: Recursive Partitioning Tree",
output_path=file_name)
# --- PARTITION
initial_partition = Partition(graph,
cddict, #initial plan (this is our recurisive_tree_part)
updater)
ideal_population = sum(initial_partition["population"].values()) / len(initial_partition)
with open(newdir+"init.json", 'w') as jf1:
json.dump(cddict, jf1)
#--- LOOK AT STATS OF STARTING PLAN
stats_df = uf.export_election_metrics_per_partition(initial_partition)
stats_df.percent.apply(pd.Series).plot()
plt.show()
# --- PROPOSAL
proposal = partial(#All the functions inside gerrychain want to take partition, but recom wants more functions than that
#Partial takes main functions and prefill all the objects until it becomes a partition
recom,
pop_col="TOTPOP",
pop_target=ideal_population,
epsilon=0.05,
node_repeats=1,
method=bipartition_tree_random
)
#--- CREATE CONSTRAINTS
compactness_bound = constraints.UpperBound(
lambda p: len(p["cut_edges"]), 1.5 * len(initial_partition["cut_edges"])
)
#--- ACCEPTANCE FUNCTIONS
#--- MCMC CHAINS
recom_chain = MarkovChain( #recom automatically does contiguity
proposal=proposal,
constraints=[
constraints.within_percent_of_ideal_population(initial_partition, 0.05),
compactness_bound,
],
accept=accept.always_accept, #put acceptance function later?
initial_state=initial_partition,
total_steps=2000
)
flip_chain = MarkovChain(
proposal=propose_random_flip,
constraints=[
constraints.within_percent_of_ideal_population(initial_partition, 0.05),
constraints.single_flip_contiguous,
compactness_bound
],
accept=accept.always_accept,
initial_state=initial_partition,
total_steps=20000
)
# #--- RUN RECOMBINATION & FLIP BOUNDARY PROPOSALS & SAVE RESULTS
uf.export_all_metrics_per_chain(recom_chain,
output_path=os.path.join(newdir, 'recom_chain'),
buffer_length=200)
uf.export_all_metrics_per_chain(flip_chain,
output_path=os.path.join(newdir, 'flip_chain'),
buffer_length=2000
)
for file in os.listdir(os.path.join(newdir, 'recom_chain')):
m = re.search('assignment_(\d+).json', file)
if m:
assignment = json.load(open(os.path.join(newdir,
'recom_chain/' + file)))
assignment = {int(k): v for k, v in assignment.items()}
title = f'Recom Proposal: {m.groups()[0]} Steps'
uf.plot_district_map(df, assignment, title=title,
output_path=os.path.join(newdir, f'recom_chain/plot_{m.groups()[0]}.png')
)
for file in os.listdir(os.path.join(newdir, 'flip_chain')):
m = re.search('assignment_(\d+).json', file)
if m:
assignment = json.load(open(os.path.join(newdir,
'flip_chain/' + file)))
assignment = {int(k): v for k, v in assignment.items()}
title = f'Flip Proposal: {m.groups()[0]} Steps'
uf.plot_district_map(df, assignment, title=title,
output_path=os.path.join(newdir, f'flip_chain/plot_{m.groups()[0]}.png')
)
#--- BUILD VISUALIZATIONS
# sns.set_style('darkgrid')
sns.set_style("darkgrid", {"axes.facecolor": ".97"})
datadir = "./Outputs/VACON_Precincts/"
#Build partitions for 2012/2016 maps to calculate comparison metrics
partition_2012 = Partition(graph,
df["CD_12"],
updater)
partition_2016 = Partition(graph,
df["CD_16"],
updater)
#--- VISUALIZATION FUNCTIONS
def comparison_hist(df_proposal_metric, title, election, gc_metric):
plt.hist(df_proposal_metric)
plt.title(title)
plt.vlines(x=sum(df_proposal_metric)/len(df_proposal_metric),
ymin=0,
ymax=(np.histogram(df_proposal_metric)[0]).max(),
colors="black",
linestyles="solid",
label="Ensemble Mean")
plt.vlines(x=gc_metric(partition_2012[election]),
ymin=0,
ymax=(np.histogram(df_proposal_metric)[0]).max(),
colors="red",
linestyles="dashed",
label="2012 Plan")
plt.vlines(x=gc_metric(partition_2016[election]),
ymin=0,
ymax=(np.histogram(df_proposal_metric)[0]).max(),
colors="orange",
linestyles="dashed",
label="2016 Plan")
plt.xlabel('Metric Value')
plt.ylabel('Frequency')
plt.legend(bbox_to_anchor=(.8, 1),
loc='upper left', borderaxespad=0.)
plt.show()
def comparison_plot(df_proposal_metric, title, election, gc_metric):
plt.plot(df_proposal_metric)
plt.title(title)
plt.hlines(y=sum(df_proposal_metric)/len(df_proposal_metric),
xmin=0,
xmax=len(df_proposal_metric),
colors="black",
linestyles="solid",
label="Ensemble Mean")
plt.hlines(y=gc_metric(partition_2012[election]),
xmin=0,
xmax=len(df_proposal_metric),
colors="red",
linestyles="dashed",
label="2012 Plan")
plt.hlines(y=gc_metric(partition_2016[election]),
xmin=0,
xmax=len(df_proposal_metric),
colors="orange",
linestyles="dashed",
label="2016 Plan")
plt.xlabel('Number of Steps')
plt.ylabel('Metric Value')
plt.legend(bbox_to_anchor=(.8, 1),
loc='upper left', borderaxespad=0.)
plt.show()
#--- RECOM PROPOSAL VISUALIZATION
recom_hmss = []
for file in os.listdir(os.path.join(newdir, 'recom_chain')):
m = re.search('wins_(\d+).csv', file)
if m:
recom_hmss.append(pd.read_csv(os.path.join(newdir, 'recom_chain/' + file), header=None))
df_recom_seats = pd.concat(recom_hmss)
df_recom_seats.columns = election_names
df_recom_seats = df_recom_seats.reset_index(drop=False)
recom_mms = []
for file in os.listdir(os.path.join(newdir, 'recom_chain')):
m = re.search('mean_median_(\d+).csv', file)
if m:
recom_mms.append(pd.read_csv(os.path.join(newdir, 'recom_chain/' + file), header=None))
df_recom_mms = | pd.concat(recom_mms) | pandas.concat |
import pytest
import numpy as np
import os
import pandas as pd
import minst.model as model
@pytest.fixture
def rwc_obs():
return dict(index='U1309f091', dataset='uiowa',
audio_file="RWC_I_05/172/172VCSPP.flac",
instrument='piano', source_index='U12345',
start_time=0.0, duration=2, note_number=45,
dynamic='pp', partition='test-0')
@pytest.fixture
def test_obs():
obs = [
dict(index="rwcabc123", dataset="rwc", audio_file="foo_00.aiff",
instrument="tuba", source_index="001", start_time=0.0,
duration=1.0, note_number=None, dynamic="pp", partition=None),
dict(index="rwcabc234", dataset="rwc", audio_file="foo_01.aiff",
instrument="tuba", source_index="001", start_time=0.0,
duration=1.0, note_number=None, dynamic="mf", partition=None),
dict(index="rwcabc534", dataset="rwc", audio_file="foo_01.aiff",
instrument="saxophone", source_index="002", start_time=0.0,
duration=1.0, note_number=None, dynamic="mf", partition=None),
dict(index="rwcabc675", dataset="rwc", audio_file="foo_01.aiff",
instrument="saxophone", source_index="002", start_time=0.0,
duration=1.0, note_number=None, dynamic="mf", partition=None),
dict(index="uiowaabc098", dataset="uiowa", audio_file="foo_01.mp3",
instrument="tuba", source_index="003", start_time=0.0,
duration=1.0, note_number=None, dynamic="pp", partition=None),
dict(index="uiowaabc099", dataset="uiowa", audio_file="foo_02.mp3",
instrument="saxophone", source_index="004", start_time=0.0,
duration=1.0, note_number=None, dynamic="pp", partition=None),
dict(index="phildef123", dataset="philharmonia", audio_file="124.aiff",
instrument="tuba", source_index="005", start_time=0.0,
duration=1.0, note_number=None, dynamic="pp", partition=None),
dict(index="phil456", dataset="philharmonia", audio_file="foo_02.aiff",
instrument="saxophone", source_index="006", start_time=0.0,
duration=1.0, note_number=None, dynamic="pp", partition=None)
]
return obs
@pytest.fixture
def test_bigobs(test_obs):
# Careful! All of these extra obs point to the original
# objects.
bigobs = test_obs * 10
# Fixing the index to be unique
for i, obs in enumerate(bigobs):
# This copy is to correct the above comment.
o = obs.copy()
o['index'] = obs['index'] + str(i)
bigobs[i] = o
return bigobs
def test_Observation___init__(rwc_obs):
obs = model.Observation(**rwc_obs)
assert obs
def test_Observation_to_builtin(rwc_obs):
obs = model.Observation(**rwc_obs)
assert obs.to_builtin() == rwc_obs
def test_Observation_from_series(test_obs):
index = [x.pop('index') for x in test_obs]
df = | pd.DataFrame.from_records(test_obs, index=index) | pandas.DataFrame.from_records |
import collections
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
isna,
)
import pandas._testing as tm
class TestCategoricalMissing:
def test_isna(self):
exp = np.array([False, False, True])
cat = Categorical(["a", "b", np.nan])
res = cat.isna()
tm.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = list(range(10))
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
tm.assert_numpy_array_equal(isna(cat), labels == -1)
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
c[1] = np.nan
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0], dtype=np.int8))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
def test_set_dtype_nans(self):
c = Categorical(["a", "b", np.nan])
result = c._set_dtype(CategoricalDtype(["a", "c"]))
tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1], dtype="int8"))
def test_set_item_nan(self):
cat = Categorical([1, 2, 3])
cat[1] = np.nan
exp = Categorical([1, np.nan, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(cat, exp)
@pytest.mark.parametrize(
"fillna_kwargs, msg",
[
(
{"value": 1, "method": "ffill"},
"Cannot specify both 'value' and 'method'.",
),
({}, "Must specify a fill 'value' or 'method'."),
({"method": "bad"}, "Invalid fill method. Expecting .* bad"),
(
{"value": Series([1, 2, 3, 4, "a"])},
"Cannot setitem on a Categorical with a new category",
),
],
)
def test_fillna_raises(self, fillna_kwargs, msg):
# https://github.com/pandas-dev/pandas/issues/19682
# https://github.com/pandas-dev/pandas/issues/13628
cat = Categorical([1, 2, 3, None, None])
with pytest.raises(ValueError, match=msg):
cat.fillna(**fillna_kwargs)
@pytest.mark.parametrize("named", [True, False])
def test_fillna_iterable_category(self, named):
# https://github.com/pandas-dev/pandas/issues/21097
if named:
Point = collections.namedtuple("Point", "x y")
else:
Point = lambda *args: args # tuple
cat = Categorical(np.array([Point(0, 0), Point(0, 1), None], dtype=object))
result = cat.fillna(Point(0, 0))
expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)])
tm.assert_categorical_equal(result, expected)
def test_fillna_array(self):
# accept Categorical or ndarray value if it holds appropriate values
cat = Categorical(["A", "B", "C", None, None])
other = cat.fillna("C")
result = cat.fillna(other)
tm.assert_categorical_equal(result, other)
assert isna(cat[-1]) # didnt modify original inplace
other = np.array(["A", "B", "C", "B", "A"])
result = cat.fillna(other)
expected = Categorical(["A", "B", "C", "B", "A"], dtype=cat.dtype)
tm.assert_categorical_equal(result, expected)
assert isna(cat[-1]) # didnt modify original inplace
@pytest.mark.parametrize(
"values, expected",
[
([1, 2, 3], np.array([False, False, False])),
([1, 2, np.nan], np.array([False, False, True])),
([1, 2, np.inf], np.array([False, False, True])),
([1, 2, pd.NA], np.array([False, False, True])),
],
)
def test_use_inf_as_na(self, values, expected):
# https://github.com/pandas-dev/pandas/issues/33594
with pd.option_context("mode.use_inf_as_na", True):
cat = Categorical(values)
result = cat.isna()
tm.assert_numpy_array_equal(result, expected)
result = Series(cat).isna()
expected = Series(expected)
tm.assert_series_equal(result, expected)
result = DataFrame(cat).isna()
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"values, expected",
[
([1, 2, 3], np.array([False, False, False])),
([1, 2, np.nan], np.array([False, False, True])),
([1, 2, np.inf], np.array([False, False, True])),
([1, 2, pd.NA], np.array([False, False, True])),
],
)
def test_use_inf_as_na_outside_context(self, values, expected):
# https://github.com/pandas-dev/pandas/issues/33594
# Using isna directly for Categorical will fail in general here
cat = Categorical(values)
with pd.option_context("mode.use_inf_as_na", True):
result = pd.isna(cat)
tm.assert_numpy_array_equal(result, expected)
result = pd.isna(Series(cat))
expected = Series(expected)
tm.assert_series_equal(result, expected)
result = pd.isna(DataFrame(cat))
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"a1, a2, categories",
[
(["a", "b", "c"], [np.nan, "a", "b"], ["a", "b", "c"]),
([1, 2, 3], [np.nan, 1, 2], [1, 2, 3]),
],
)
def test_compare_categorical_with_missing(self, a1, a2, categories):
# GH 28384
cat_type = | CategoricalDtype(categories) | pandas.core.dtypes.dtypes.CategoricalDtype |
#! usr/bin/python
# coding=utf-8
# Convolution using mxnet ### x w
from __future__ import print_function
import mxnet as mx
import numpy as np
import pandas as pd
from mxnet import nd, autograd, gluon
from mxnet.gluon.nn import Dense, ELU, LeakyReLU, LayerNorm, Conv2D, MaxPool2D, Flatten, Activation
from mxnet.gluon import data as gdata, loss as gloss, nn, utils as gutils
from mxnet.image import Augmenter
import matplotlib.mlab as mlab
from scipy.signal import tukey
mx.random.seed(1) # Set seed for reproducable results
# system
import os, sys, time, datetime, copy
from loguru import logger
config = {
"handlers": [
{"sink": "MF4MXNet_{}.log".format(datetime.date.today()), "level":"DEBUG" ,"format": '<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level}</level> | <level>{message}</level>'},
# {"sink": "Solver_cnn.log",},
{"sink": sys.stdout, "format": '<green>{time:YYYY-MM-DD}</green> <cyan>{time:HH:mm:ss}</cyan> | <level>{level: <7}</level> | <level>{message}</level>',
"level": "INFO"},
],
# "extra": {"user": "someone"}
}
#### REF #### https://loguru.readthedocs.io/en/stable/api/logger.html
# DEBUG 10 # INFO 20 # WARNING 30 # ERROR 40 # CRITICAL 50
logger.configure(**config)
logger.debug('#'*40)
from pyinstrument import Profiler # https://github.com/joerick/pyinstrument
from tqdm import tnrange, tqdm_notebook, tqdm
########## RAY ################
# import ray
# # CPU_COUNT = 40 # cpu_count()
# CPU_COUNT = 2
# logger.info("#" * 30)
# logger.info("CPU_COUNT: {}", CPU_COUNT)
# logger.info("#" * 30)
# ray.init(num_cpus=CPU_COUNT, num_gpus = 0, include_webui=False, ignore_reinit_error=True)
########## RAY ################
def mkdir(path):
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
logger.success(path+' 创建成功')
else:
logger.success(path+' 目录已存在')
def EquapEvent(fs, data):
# Window function
dwin = tukey(data.size, alpha=1./8)
sample = data.astype('float32') # (1,fs) ndarray cpu
psd = np.real(np.fft.ifft(1/np.sqrt(power_vec(sample[0].asnumpy(), fs)))).reshape(1,-1) # (1,fs) np.array
sample_block = (sample* nd.array(dwin)).expand_dims(0).expand_dims(0) #(1,1,1,fs) ndarray cup
sample_psd_block = nd.concat(sample_block, nd.array(psd).expand_dims(0).expand_dims(0), dim=1)
return sample_psd_block # (1, 2, 1, 4096) ndarray cpu
def pred_O1Events(deltat, fs, T, C, frac):
onesecslice = [(65232, 69327) , (65178, 69273),
(66142, 70237), (66134, 70229),
(65902, 69997), (65928, 70023),
(65281, 69376), (65294, 69389)]
llLIGOevents = [file for file in os.listdir('Data_LIGO_Totural') if 'strain' in file]
llLIGOevents.sort()
aroundEvents = np.concatenate([np.load('./Data_LIGO_Totural/'+file).reshape(1,-1)[:,onesecslice[index][0]-int((deltat-0.5)*fs):onesecslice[index][1]+int((deltat-0.5)*fs)+1] \
for index, file in enumerate(llLIGOevents)])
logger.info('data_block: {} | {}', aroundEvents.shape, np.array(llLIGOevents))
aroundEvents = nd.array(aroundEvents).expand_dims(1)
logger.info('aroundEvents: {} [cpu ndarray]', aroundEvents.shape)
bias = 0#fs//2
# frac = 40
moving_slide = {}
spsd_block = {}
for index, filename in tqdm(enumerate(llLIGOevents), disable=True):
moving_slide[filename] = np.concatenate([ aroundEvents[index:index+1, 0, i*int(fs*(T/frac))+bias : i*int(fs*(T/frac))+T*fs+bias].asnumpy() for i in range(aroundEvents.shape[-1]) if i*int(fs*(T/frac))+T*fs+bias <=aroundEvents.shape[-1] ], axis=0)#[:160]#[:64 if T == 2 else 128]
spsd_block[filename] = np.concatenate([np.real(np.fft.ifft(1/np.sqrt(power_vec(i, fs)))).reshape(1,-1) for i in moving_slide[filename]])
# (64, fs*T)
logger.info('moving_slide: {} [np.array]', moving_slide[filename].shape)
logger.info('spsd_block: {} [np.array]', spsd_block[filename].shape)
time_range = [(i*int(fs*(T/frac))+bias + 20480//2)/fs for i in range(aroundEvents.shape[-1]) if i*int(fs*(T/frac))+T*fs+bias <=aroundEvents.shape[-1] ]
dwin = tukey(T*fs, alpha=1./8)
iterator_events, data_psd_events = {}, {}
for index, (filename_H1, filename_L1) in enumerate(zip(llLIGOevents[::2], llLIGOevents[1::2])):
data_block_nd = nd.concat(nd.array(moving_slide[filename_H1] * dwin).expand_dims(1),
nd.array(moving_slide[filename_L1] * dwin).expand_dims(1), dim=1) # (161, C, T*fs)
psd_block_nd = nd.concat(nd.array(spsd_block[filename_H1]).expand_dims(1),
nd.array(spsd_block[filename_L1]).expand_dims(1), dim=1) # (161, C, T*fs)
# (161, 2, 2, 1, 20480)
data_psd_events[filename_H1.split('_')[0]] = nd.concat(data_block_nd.expand_dims(1),
psd_block_nd.expand_dims(1), dim=1).expand_dims(3)
events_dataset = gluon.data.ArrayDataset(data_psd_events[filename_H1.split('_')[0]])
iterator_events[filename_H1.split('_')[0]] = gdata.DataLoader(events_dataset, 8, shuffle=False, last_batch = 'keep', num_workers=0)
logger.info('data_psd_events: {} | {}', data_psd_events['GW150914'].shape, data_psd_events.keys())
return iterator_events, time_range
# 计算 PSD
def power_vec(x, fs):
"""
Input 1-D np.array
"""
# fs = 4096
# NFFT = T*fs//8
# We have assumed it as 1/8.
NFFT = int((x.size/fs/8.0)*fs)
# with Blackman window function
psd_window = np.blackman(NFFT)
# and a 50% overlap:
NOVL = NFFT/2
# -- Calculate the PSD of the data. Also use an overlap, and window:
data_psd, freqs = mlab.psd(x, Fs = fs, NFFT = NFFT, window=psd_window, noverlap=NOVL)
datafreq = np.fft.fftfreq(x.shape[-1])*fs
# -- Interpolate to get the PSD values at the needed frequencies
return np.interp(np.abs(datafreq), freqs, data_psd)
# 计算标准差
def nd_std(x, axis=-1):
""" Standard Deviation (SD)
Note: Do not try 'axis=0'
"""
return nd.sqrt(nd.square(nd.abs(x - x.mean(axis=axis).expand_dims(axis=axis) )).mean(axis=axis))
class RandomPeakAug(Augmenter):
"""Make RandomPeakAug.
Parameters
----------
percet : float [0,1]
p : the possibility the img be rotated
"""
__slots__ = ['fs', 'T', 'C', 'N', 'margin', 'ori_peak', 'shape_aug']
def __init__(self, margin, fs, C, ori_peak=None, T=1, rand_jitter = 1):
super(RandomPeakAug, self).__init__(margin=margin, ori_peak=ori_peak, fs=fs, T=T, C=C)
self.fs = fs
self.T = T # [s]
self.N = int(fs*T) # [n]
self.C = C
self.margin = int(margin * fs * T ) #[n]
self.ori_peak = int(ori_peak * fs * T) if ori_peak else None
# self.shape_aug = mx.image.RandomCropAug(size=(fs, 1))
self.rand_jitter = rand_jitter
# print(C, fs, self.margin, self.ori_peak)
def __call__(self, src):
"""Augmenter body"""
assert src.shape[-2:] == (self.C, self.N) # (nsample, C, N)
if self.ori_peak == None:
self.ori_peak = int(src.argmax(axis=2)[0,0].asscalar()) # first+H1 as bench
logger.debug('self.ori_peak: {}', self.ori_peak)
# myrelu = lambda x: x if (x>0) and (x<=self.ori_peak*2) else None
# (nsample, C, 2*(N-margin))
# full = nd.concatenate([src, nd.zeros(shape=src.shape[:2]+(self.ori_peak*2-self.N,))], axis=2)[:,:,myrelu(self.ori_peak-(self.N-self.margin)):myrelu(self.ori_peak+(self.N-self.margin))]
full = nd.concat(src, nd.zeros(shape=src.shape[:2]+(self.ori_peak-self.margin,)) , dim=2)[:,:,self.ori_peak-(self.N-self.margin):]
assert (nd.sum( full[:,:1].argmax(-1) / full[:,:1].shape[-1] )/full[:,:1].shape[0]).asscalar() == 0.5
if self.margin == (self.T*self.fs)//2:
return full
if self.rand_jitter: # for every sample
"""
RP = RandomPeakAug(margin=0.1, fs = fs, C = 2, ori_peak=0.9, rand_jitter=0)
%timeit _ = RP(dataset_GW[pre])
# 505 ms ± 30.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
"""
randlist= [ (i , i+fs) for i in np.random.randint(low=1,high=(fs-2*self.margin), size= full.shape[0]) if i+fs <= full.shape[-1]]
assert len(randlist) == full.shape[0]
return nd.concatenate([ sample.expand_dims(axis=0)[:,:,i:j] for sample, (i, j) in zip(full, randlist) ], axis=0) # (nsample, C, N)
# full = nd.concatenate([self.shape_aug(sample.swapaxes(0,1).expand_dims(axis=0)) for sample in full ], axis=0) # (nsample, N, C)
# return full.swapaxes(1,2) # (nsample, C, N)
else:
"""
RP = RandomPeakAug(margin=0.1, fs = fs, C = 2, ori_peak=0.9, rand_jitter=1)
%timeit _ = RP(dataset_GW[pre])
# 808 µs ± 37.7 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
"""
full = full.swapaxes(0,2).expand_dims(axis=0) # (1, 2*(N-margin), C, nsample)
return self.shape_aug(full.reshape(1,0,-3)).reshape(1,0,self.C,-1).swapaxes(1,3)[0] # where swapaxes from (1, 2*(N-margin), C, nsample) to (nsample, C, N)
class MatchedFilteringLayer(gluon.HybridBlock):
def __init__(self, mod, fs,
template_H1,
template_L1,
differentiable = False):
super(MatchedFilteringLayer, self).__init__()
self.mod = int(mod)
self.fs = int(fs)
with self.name_scope():
# self.weights = self.params.get('weights',
# shape=(hidden_units, 0),
# allow_deferred_init=True)
self.template_H1 = self.params.get('template_H1',
shape=template_H1.shape,
init=mx.init.Constant(template_H1.asnumpy().tolist()), # Convert to regular list to make this object serializable
differentiable=differentiable)
self.template_L1 = self.params.get('template_L1',
shape=template_L1.shape,
init=mx.init.Constant(template_L1.asnumpy().tolist()), # Convert to regular list to make this object serializable
differentiable=differentiable)
self.num_filter_template = self.template_H1.shape[0]
self.kernel_size = self.template_H1.shape[-1]
## Global fs/ctx
def get_module(self, F, data, mod):
ctx = data.context
return F.concatenate([data, F.zeros(data.shape[:-1]+(mod - data.shape[-1]%mod, ), ctx=ctx)], axis=len(data.shape)-1).reshape(0,0,-1,mod).sum(axis=-2).expand_dims(2)[:,:,:,::-1]
# something wrong here for pad??
# data = F.reshape(F.pad(data, mode="constant", constant_value=0, pad_width=(0,0, 0,0, 0,0, 0,1)), shape=(0,0,-1,mod))
# return F.reverse(F.expand_dims(F.sum(data, axis=-2), 2), axis=3)
def hybrid_forward(self, F, data, template_H1, template_L1):
# data (nsmaple, 2, C, 1, T*fs) gpu nd.array
data_H1, data_L1 = F.split(data = data, axis=2, num_outputs=2)
data_H1 = data_H1[:,:,0] # (nsample, 2, 1, T*fs)
data_L1 = data_L1[:,:,0]
MF_H1 = self.onedetector_forward(F, data_H1, template_H1)
MF_L1 = self.onedetector_forward(F, data_L1, template_L1)
# (nsample, num_filter_template, 1, T*fs)
return nd.concat(MF_H1.expand_dims(0), MF_L1.expand_dims(0), dim=0)
def onedetector_forward(self, F, data, template):
# Note: Not working for hybrid blocks/mx.symbol!
# (8, 1, 1, T*fs), (8, 1, 1, T*fs) <= (8, 2, 1, T*fs)
data_block_nd, ts_block_nd = F.split(data = data, axis=1, num_outputs=2)
# assert F.shape_array(data).size_array().asscalar() == 4 # (8, 1, 1, T*fs)
# assert F.shape_array(self.weight).size_array().asscalar() == 4
batch_size = F.slice_axis(F.shape_array(ts_block_nd), axis=0, begin=0, end=1).asscalar() # 8
# Whiten data ===========================================================
data_whiten = F.concatenate( [F.Convolution(data=data_block_nd[i:i+1], # (8, 1, 1, T*fs)
weight=ts_block_nd[i:i+1], # (8, 1, 1, T*fs)
no_bias=True,
kernel=(1, self.mod),
stride=(1,1),
num_filter=1,
pad=(0,self.mod -1),) for i in range(batch_size) ],
axis=0)
data_whiten = self.get_module(F, data_whiten, self.mod) # (8, 1, 1, T*fs)
# Whiten template =======================================================
template_whiten = F.Convolution(data=template, # (8, 1, 1, T*fs)
weight=ts_block_nd, # (8, 1, 1, T*fs)
no_bias=True,
kernel=(1, self.mod),
stride=(1,1),
num_filter=batch_size,
pad=(0,self.mod -1),)
template_whiten = self.get_module(F, template_whiten, self.kernel_size)
# template_whiten (8, 8, 1, T*fs)
# == Calculate the matched filter output in the time domain: ============
optimal = F.concatenate([ F.Convolution(data=data_whiten[i:i+1], # (8, 8, 1, T*fs)
weight=template_whiten[:,i:i+1], # (8, 8, 1, T*fs)
no_bias=True,
kernel=(1, self.kernel_size),
stride=(1,1),
num_filter=self.num_filter_template,
pad=(0, self.kernel_size -1),) for i in range(batch_size)],
axis=0)
optimal = self.get_module(F, optimal, self.mod)
optimal_time = F.abs(optimal*2/self.fs)
# optimal_time (8, 8, 1, T*fs)
# == Normalize the matched filter output: ===============================
sigmasq = F.concatenate([ F.Convolution(data=template_whiten.swapaxes(0,1)[j:j+1:,i:i+1], # (8, 8, 1, T*fs)
weight=template_whiten.swapaxes(0,1)[j:j+1:,i:i+1], # (8, 8, 1, T*fs)
no_bias=True,
kernel=(1, self.kernel_size),
stride=(1,1),
num_filter=1,
pad=(0, self.kernel_size -1),) for j in range(batch_size) for i in range(self.num_filter_template) ],
axis=0)
sigmasq = self.get_module(F, sigmasq, self.kernel_size)[:,:,:,0].reshape(optimal_time.shape[:2])
sigma = F.sqrt(F.abs( sigmasq/self.fs )).expand_dims(2).expand_dims(2)
# sigma (8, 8, 1, 1)
return F.broadcast_div(optimal_time, sigma) # (8, 8, 1, T*fs) SNR_MF
class CutHybridLayer(gluon.HybridBlock):
def __init__(self, margin):
super(CutHybridLayer, self).__init__()
extra_range = 0.0
self.around_range = (1-margin*2)/2
# self.left = int(fs- np.around(self.around_range + extra_range, 2) * fs)
# self.right = int(fs+ np.around(self.around_range + extra_range, 2) * fs)+1
def hybrid_forward(self, F, x):
# (C, nsample, num_filter_template, 1, T*fs)
return F.max(x, axis=-1).swapaxes(1,0).swapaxes(3,2)
# if self.around_range == 0:
# return F.slice_axis(x, begin=0, end=1, axis=3).swapaxes(1,3)
# else:
# return F.slice_axis(F.Concat(x,x, dim=3), axis=-1, begin=self.left, end=self.right)
def preTemplateFloyd(fs, T, C, shift_size, wind_size, margin,debug = True):
temp_window = tukey(fs*wind_size, alpha=1./8)
dataset_GW = {}
keys = {}
pre = 'train'
data = np.load('/floyd/input/templates/data_T{}_fs4096_{}{}_{}.npy'.format(T,T*0.9,T*0.9, pre))[:,1] # drop GPS
dataset_GW[pre] = nd.array(data)[:,:C] # (1610,C,T*fs) cpu nd.ndarray
keys[pre] = np.load('/floyd/input/templates/data_T{}_fs4096_{}{}_keys_{}.npy'.format(T, T*0.9,T*0.9,pre))
logger.debug('Loading {} data: {}', pre, dataset_GW[pre].shape)
keys[pre] = | pd.Series(keys[pre][:,0]) | pandas.Series |
import codecs
import sys
import matplotlib.pyplot as plt
import pandas as pd
import re
import sklearn
print(sys.path)
# sys.path.append("C:/Program Files/Anaconda/envs/Coursework")
sys.path.append("C:/Program Files/Anaconda/envs/Coursework/Lib/site-packages")
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.util import bigrams
from string import punctuation
import numpy as np
from langdetect import detect, DetectorFactory
# from langdetect import detect_langs
from textblob import TextBlob
import emoji
from sklearn import svm
from sklearn.metrics import mean_squared_error
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
DetectorFactory.seed = 0
# nltk.download()
# nltk.download("stopwords") # downloading stopwords
# converts a tab delimited txt file into csv file.
# @param txt_file = string name of tab delimited txt file to convert into csv
# @param csv_file = string name of new csv file
def createTrainingCsv(textFile="mediaeval-2015-trainingset.txt", csvFile="training_set.csv"):
with codecs.open(textFile, "r", encoding="utf8") as txtFileToCsv:
with codecs.open(csvFile, 'w', encoding="utf8") as newCsvFile:
input_txt = txtFileToCsv.readlines()
for line in input_txt:
record = line.replace("\t", "x0x")
newCsvFile.write(record)
print("Finished converting ", textFile, " into a csv")
# Shows composition of labels in training data set
def showLabelComposition(trainingSet):
labelComposition = trainingSet["label"].value_counts()
totalLabels = trainingSet["label"].count()
groundTruthTypes = ["Fake", "Real", "Humor"]
print(labelComposition)
for counter, label in enumerate(groundTruthTypes):
print(label, " ", str((labelComposition[counter] / totalLabels) * 100), "%")
plt.figure()
plt.pie(labelComposition, labels=groundTruthTypes)
plt.title("The label composition of Training dataset")
plt.savefig(fname="Data Visualisation/Label composition.png")
plt.show()
def showLanguageComposition(trainingSet):
languageComposition = trainingSet["language"].value_counts()
print(languageComposition)
totalLanguageTypes = trainingSet["language"].count()
# languageTypes = trainingSet["language"].unique().sort
languageTypes = trainingSet["language"].value_counts().index
# print(languageTypes)
for counter, label in enumerate(languageTypes):
print(label, " ", str((languageComposition[counter] / totalLanguageTypes) * 100), "%")
plt.figure()
plt.pie(languageComposition,
labels=languageTypes
)
plt.title("The language composition of Training dataset")
plt.savefig(fname="Data Visualisation/Language composition.png")
plt.show()
def showPolarityComposition(trainingSet):
polarityTypes = trainingSet["polarity"].value_counts().index # labels
realRecords = trainingSet[trainingSet["label"] == "real"]
fakeRecords = trainingSet[trainingSet["label"] == "fake"]
humourRecords = trainingSet[trainingSet["label"] == "humor"]
# print(realRecords.iloc[:,6:9].head(10))
# print(fakeRecords.iloc[:,6:9].head(10))
realRecordsPolarities = realRecords["polarity"].value_counts()
fakeRecordsPolarities = fakeRecords["polarity"].value_counts()
humourRecordsPolarites = humourRecords["polarity"].value_counts()
print(realRecordsPolarities)
print(fakeRecordsPolarities)
polarityComposition = trainingSet["polarity"].value_counts()
# print(polarityComposition)
# totalPolarityTypes = trainingSet["polarity"].count()
# for counter, label in enumerate(polarityTypes):
# print(label, " ", str((polarityComposition[counter] / totalPolarityTypes) * 100), "%")
width = 0.25
x = np.arange(len(polarityTypes))
# new_x = x * 2
fig, ax = plt.subplots()
rects1 = ax.bar(x - width, realRecordsPolarities, width, label='Real')
rects2 = ax.bar(x, fakeRecordsPolarities, width, label='Fake')
rects3 = ax.bar(x + width, humourRecordsPolarites, width, label='Humor')
ax.set_ylabel("Occurence")
ax.set_title("Polarity breakdown of dataset")
ax.set_xticks(x)
ax.set_xticklabels(polarityTypes)
ax.legend()
# autolabel(rects1)
# autolabel(rects2)
fig.tight_layout()
plt.savefig(fname="Data Visualisation/Polarity breakdown.png")
plt.show()
def showSubjectivityComposition(trainingSet):
polarityTypes = trainingSet["subjectivity"].value_counts().index # labels
realRecords = trainingSet[trainingSet["label"] == "real"]
fakeRecords = trainingSet[trainingSet["label"] == "fake"]
humourRecords = trainingSet[trainingSet["label"] == "humor"]
# print(realRecords.iloc[:,6:9].head(10))
# print(fakeRecords.iloc[:,6:9].head(10))
realRecordsPolarities = realRecords["subjectivity"].value_counts()
fakeRecordsPolarities = fakeRecords["subjectivity"].value_counts()
humourRecordsPolarites = humourRecords["subjectivity"].value_counts()
print(realRecordsPolarities)
print(fakeRecordsPolarities)
width = 0.25
x = np.arange(len(polarityTypes))
# new_x = x * 2
fig, ax = plt.subplots()
rects1 = ax.bar(x - width, realRecordsPolarities, width, label='Real')
rects2 = ax.bar(x, fakeRecordsPolarities, width, label='Fake')
rects3 = ax.bar(x + width, humourRecordsPolarites, width, label='Humor')
ax.set_ylabel("Occurence")
ax.set_title("Subjectivity breakdown of dataset")
ax.set_xticks(x)
ax.set_xticklabels(polarityTypes)
ax.legend()
# autolabel(rects1)
# autolabel(rects2)
fig.tight_layout()
plt.savefig(fname="Data Visualisation/Subjectivity breakdown.png")
plt.show()
def showEmojiComposition(trainingSet):
# emojiTypes = trainingSet["number of emojis"].value_counts().index # labels
realRecords = trainingSet[trainingSet["label"] == "real"]
fakeRecords = trainingSet[trainingSet["label"] == "fake"]
humourRecords = trainingSet[trainingSet["label"] == "humor"]
fakeRecords = pd.concat([fakeRecords, humourRecords])
# print(realRecords.iloc[:,6:9].head(10))
# print(fakeRecords.iloc[:,6:9].head(10))
realRecords = realRecords["number of emojis"].mean()
fakeRecords = fakeRecords["number of emojis"].mean()
# realRecords = realRecords["number of emojis"].sum()
# fakeRecords = fakeRecords["number of emojis"].sum()
print(realRecords)
print(fakeRecords)
emojiComposition = trainingSet["number of emojis"].value_counts()
# print(emojiComposition)
# totalemojiTypes = trainingSet["emoji"].count()
# for counter, label in enumerate(emojiTypes):
# print(label, " ", str((emojiComposition[counter] / totalemojiTypes) * 100), "%")
plt.figure()
labels = ["Real", "Fake"]
y_pos = np.arange(len(labels))
heightData = [realRecords, fakeRecords]
plt.bar(y_pos, heightData, align='center', alpha=0.5)
plt.xticks(y_pos, labels)
plt.ylabel('Usage')
plt.title('Mean Emoji usages in Fake and Real posts in the english training dataset')
plt.savefig(fname="Data Visualisation/Mean Emoji usages in Fake and Real posts in the english training dataset.png")
plt.show()
def showMentionComposition(trainingSet):
# emojiTypes = trainingSet["number of emojis"].value_counts().index # labels
realRecords = trainingSet[trainingSet["label"] == "real"]
fakeRecords = trainingSet[trainingSet["label"] == "fake"]
humourRecords = trainingSet[trainingSet["label"] == "humor"]
fakeRecords = pd.concat([fakeRecords, humourRecords])
# print(realRecords.iloc[:,6:9].head(10))
# print(fakeRecords.iloc[:,6:9].head(10))
realRecords = realRecords["number of mentions"].mean()
fakeRecords = fakeRecords["number of mentions"].mean()
# realRecords = realRecords["number of mentions"].sum()
# fakeRecords = fakeRecords["number of mentions"].sum()
print(realRecords)
print(fakeRecords)
plt.figure()
labels = ["Real", "Fake"]
y_pos = np.arange(len(labels))
heightData = [realRecords, fakeRecords]
plt.bar(y_pos, heightData, align='center', alpha=0.5)
plt.xticks(y_pos, labels)
plt.ylabel('Usage')
plt.title('Mean @mentions usages in Fake and Real posts in the english training dataset')
plt.savefig(
fname="Data Visualisation/Mean @mention usages in Fake and Real posts in the english training dataset.png")
plt.show()
def showURLComposition(trainingSet):
# emojiTypes = trainingSet["number of emojis"].value_counts().index # labels
realRecords = trainingSet[trainingSet["label"] == "real"]
fakeRecords = trainingSet[trainingSet["label"] == "fake"]
humourRecords = trainingSet[trainingSet["label"] == "humor"]
fakeRecords = pd.concat([fakeRecords, humourRecords])
# print(realRecords.iloc[:,6:9].head(10))
# print(fakeRecords.iloc[:,6:9].head(10))
# realRecords = realRecords["number of URLS"].mean()
# fakeRecords = fakeRecords["number of URLS"].mean()
realRecords = realRecords["number of URLS"].sum()
fakeRecords = fakeRecords["number of URLS"].sum()
print(realRecords)
print(fakeRecords)
plt.figure()
labels = ["Real", "Fake"]
y_pos = np.arange(len(labels))
heightData = [realRecords, fakeRecords]
plt.bar(y_pos, heightData, align='center', alpha=0.5)
plt.xticks(y_pos, labels)
plt.ylabel('Usage')
plt.title('Total URL usages in Fake and Real posts in the english training dataset')
plt.savefig(fname="Data Visualisation/Total URL usages in Fake and Real posts in the english training dataset.png")
plt.show()
def showHashtagsComposition(trainingSet):
# emojiTypes = trainingSet["number of emojis"].value_counts().index # labels
realRecords = trainingSet[trainingSet["label"] == "real"]
fakeRecords = trainingSet[trainingSet["label"] == "fake"]
humourRecords = trainingSet[trainingSet["label"] == "humor"]
fakeRecords = pd.concat([fakeRecords, humourRecords])
# print(realRecords.iloc[:,6:9].head(10))
# print(fakeRecords.iloc[:,6:9].head(10))
# realRecords = realRecords["number of URLS"].mean()
# fakeRecords = fakeRecords["number of URLS"].mean()
realRecords = realRecords["number of hashtags"].sum()
fakeRecords = fakeRecords["number of hashtags"].sum()
print(realRecords)
print(fakeRecords)
plt.figure()
labels = ["Real", "Fake"]
y_pos = np.arange(len(labels))
heightData = [realRecords, fakeRecords]
plt.bar(y_pos, heightData, align='center', alpha=0.5)
plt.xticks(y_pos, labels)
plt.ylabel('Usage')
plt.title('Total Hashtag usages in Fake and Real posts in the english training dataset')
plt.savefig(
fname="Data Visualisation/Total Hashtag usages in Fake and Real posts in the english training dataset.png")
plt.show()
def showTweetComposition(trainingSet):
# polarityTypes = trainingSet["polarity"].value_counts().index # labels
labelTypes = ["no. exclamations", "no. questions", "no. ellipsis", "no. locations","no. disaster words","no. emojis", "no. URLS", "no. Hashtags",
"no. mentions"]
print(labelTypes)
realRecords = trainingSet[trainingSet["label"] == "real"]
fakeRecords = trainingSet[trainingSet["label"] == "fake"]
humourRecords = trainingSet[trainingSet["label"] == "humor"]
# fakeRecords = pd.concat([fakeRecords,humourRecords])
# print(realRecords.iloc[:,6:9].head(10))
# print(fakeRecords.iloc[:,6:9].head(10))
#realRecordsNumberOfCharacters = realRecords["character length"].mean()
#fakeRecordsNumberOfCharacters = fakeRecords["character length"].mean()
#humourRecordsNumberOfCharacters = humourRecords["character length"].mean()
realRecordsNumberOfExclamations = realRecords["number of exclamations"].mean()
fakeRecordsNumberOfExclamations = fakeRecords["number of exclamations"].mean()
humourRecordsNumberOfExclamations = fakeRecords["number of exclamations"].mean()
realRecordsNumberOfQuestions = realRecords["number of questions"].mean()
fakeRecordsNumberOfQuestions = fakeRecords["number of questions"].mean()
humourRecordsNumberOfQuestions = fakeRecords["number of questions"].mean()
realRecordsNumberOfEllipsis = realRecords["number of ellipsis"].mean()
fakeRecordsNumberOfEllipsis = fakeRecords["number of ellipsis"].mean()
humourRecordsNumberOfEllipsis = fakeRecords["number of ellipsis"].mean()
#realRecordsNumberOfWords = realRecords["word length"].mean()
#fakeRecordsNumberOfWords = fakeRecords["word length"].mean()
#humourRecordsNumberOfWords = fakeRecords["word length"].mean()
realRecordsNumberOfLocations = realRecords["number of locations"].mean()
fakeRecordsNumberOfLocations = fakeRecords["number of locations"].mean()
humourRecordsNumberOfLocations = fakeRecords["number of locations"].mean()
realRecordsNumberOfDisasterWords = realRecords["number of disaster words"].mean()
fakeRecordsNumberOfDisasterWords = fakeRecords["number of disaster words"].mean()
humourRecordsNumberOfDisasterWords = fakeRecords["number of disaster words"].mean()
realRecordsNumberOfEmojis = realRecords["number of emojis"].mean()
fakeRecordsNumberOfEmojis = fakeRecords["number of emojis"].mean()
humourRecordsNumberOfEmojis = fakeRecords["number of emojis"].mean()
realRecordsNumberOfURLS = realRecords["number of URLS"].mean()
fakeRecordsNumberOfURLS = fakeRecords["number of URLS"].mean()
humourRecordsNumberOfURLS = fakeRecords["number of URLS"].mean()
realRecordsNumberOfHashtags = realRecords["number of Hashtags"].mean()
fakeRecordsNumberOfHashtags = fakeRecords["number of Hashtags"].mean()
humourRecordsNumberOfHashtags = fakeRecords["number of Hashtags"].mean()
realRecordsNumberOfMentions = realRecords["number of mentions"].mean()
fakeRecordsNumberOfMentions = fakeRecords["number of mentions"].mean()
humourRecordsNumberOfMentions = fakeRecords["number of mentions"].mean()
realData = [realRecordsNumberOfExclamations, realRecordsNumberOfQuestions, realRecordsNumberOfEllipsis,realRecordsNumberOfLocations,realRecordsNumberOfDisasterWords,
realRecordsNumberOfEmojis, realRecordsNumberOfURLS, realRecordsNumberOfHashtags,
realRecordsNumberOfMentions]
fakeData = [fakeRecordsNumberOfExclamations, fakeRecordsNumberOfQuestions, fakeRecordsNumberOfEllipsis,fakeRecordsNumberOfLocations,fakeRecordsNumberOfDisasterWords,
fakeRecordsNumberOfEmojis, fakeRecordsNumberOfURLS, fakeRecordsNumberOfHashtags,
fakeRecordsNumberOfMentions]
humourData = [humourRecordsNumberOfExclamations, humourRecordsNumberOfQuestions, humourRecordsNumberOfEllipsis,humourRecordsNumberOfLocations,humourRecordsNumberOfDisasterWords,
humourRecordsNumberOfEmojis, humourRecordsNumberOfURLS, humourRecordsNumberOfHashtags,
humourRecordsNumberOfMentions]
# polarityComposition = trainingSet["polarity"].value_counts()
# print(polarityComposition)
# totalPolarityTypes = trainingSet["polarity"].count()
# for counter, label in enumerate(polarityTypes):
# print(label, " ", str((polarityComposition[counter] / totalPolarityTypes) * 100), "%")
width = 0.25
x = np.arange(len(labelTypes))
# new_x = x * 2
fig, ax = plt.subplots()
rects1 = ax.bar(x - width, realData, width, label='Real')
rects2 = ax.bar(x, fakeData, width, label='Fake')
rects3 = ax.bar(x + width, humourData, width, label='Humor')
ax.set_ylabel("Mean Occurences")
ax.set_title("Tweet breakdown")
ax.set_xticks(x)
ax.set_xticklabels(labelTypes)
ax.legend()
# autolabel(rects1)
# autolabel(rects2)
fig.tight_layout()
plt.savefig(fname="Data Visualisation/Tweet breakdown of English training set.png")
plt.show()
def showPOSTagsComposition(trainingSet):
# polarityTypes = trainingSet["polarity"].value_counts().index # labels
labelTypes = ["verb count","noun count","adjective count","adverb count","pronoun count"]
print(labelTypes)
realRecords = trainingSet[trainingSet["label"] == "real"]
fakeRecords = trainingSet[trainingSet["label"] == "fake"]
humourRecords = trainingSet[trainingSet["label"] == "humor"]
# fakeRecords = pd.concat([fakeRecords,humourRecords])
# print(realRecords.iloc[:,6:9].head(10))
# print(fakeRecords.iloc[:,6:9].head(10))
#realRecordsNumberOfCharacters = realRecords["character length"].mean()
#fakeRecordsNumberOfCharacters = fakeRecords["character length"].mean()
#humourRecordsNumberOfCharacters = humourRecords["character length"].mean()
realRecordsVerbCount = realRecords["verb count"].mean()
fakeRecordsVerbCount = fakeRecords["verb count"].mean()
humourRecordsVerbCount = fakeRecords["verb count"].mean()
realRecordsNounCount = realRecords["noun count"].mean()
fakeRecordsNounCount = fakeRecords["noun count"].mean()
humourRecordsNounCount = fakeRecords["noun count"].mean()
realRecordsAdjectiveCount = realRecords["adjective count"].mean()
fakeRecordsAdjectiveCount = fakeRecords["adjective count"].mean()
humourRecordsAdjectiveCount = fakeRecords["adjective count"].mean()
#realRecordsNumberOfWords = realRecords["word length"].mean()
#fakeRecordsNumberOfWords = fakeRecords["word length"].mean()
#humourRecordsNumberOfWords = fakeRecords["word length"].mean()
realRecordsAdverbCount = realRecords["adverb count"].mean()
fakeRecordsAdverbCount = fakeRecords["adverb count"].mean()
humourRecordsAdverbCount = fakeRecords["adverb count"].mean()
realRecordsPronounCount = realRecords["pronoun count"].mean()
fakeRecordsPronounCount = fakeRecords["pronoun count"].mean()
humourRecordsPronounCount = fakeRecords["pronoun count"].mean()
realData = [realRecordsVerbCount,realRecordsNounCount,realRecordsAdjectiveCount,realRecordsAdverbCount,realRecordsPronounCount]
fakeData = [fakeRecordsVerbCount,fakeRecordsNounCount,fakeRecordsAdjectiveCount,fakeRecordsAdverbCount,fakeRecordsPronounCount]
humourData = [humourRecordsVerbCount,humourRecordsNounCount,humourRecordsAdjectiveCount,humourRecordsAdverbCount,humourRecordsPronounCount]
# polarityComposition = trainingSet["polarity"].value_counts()
# print(polarityComposition)
# totalPolarityTypes = trainingSet["polarity"].count()
# for counter, label in enumerate(polarityTypes):
# print(label, " ", str((polarityComposition[counter] / totalPolarityTypes) * 100), "%")
width = 0.25
x = np.arange(len(labelTypes))
# new_x = x * 2
fig, ax = plt.subplots()
rects1 = ax.bar(x - width, realData, width, label='Real')
rects2 = ax.bar(x, fakeData, width, label='Fake')
rects3 = ax.bar(x + width, humourData, width, label='Humor')
ax.set_ylabel("Mean Occurences")
ax.set_title("Mean occurences of POS tags in tweet content")
ax.set_xticks(x)
ax.set_xticklabels(labelTypes)
ax.legend()
# autolabel(rects1)
# autolabel(rects2)
fig.tight_layout()
plt.savefig(fname="Data Visualisation/POS tag breakdown.png")
plt.show()
def showNumberOfWordsUsage(trainingSet):
realRecords = trainingSet[trainingSet["label"] == "real"]
fakeRecords = trainingSet[trainingSet["label"] == "fake"]
humourRecords = trainingSet[trainingSet["label"] == "humor"]
# fakeRecords = pd.concat([fakeRecords,humourRecords])
# print(realRecords.iloc[:,6:9].head(10))
# print(fakeRecords.iloc[:,6:9].head(10))
realRecordsNumberOfWords = realRecords["word count"].mean()
fakeRecordsNumberOfWords = fakeRecords["word count"].mean()
humourRecordsNumberOfWords = humourRecords["word count"].mean()
realData = realRecordsNumberOfWords
fakeData = fakeRecordsNumberOfWords
humourData = humourRecordsNumberOfWords
plt.figure()
labelTypes = ["Real", "Fake", "Humour"]
y_pos = np.arange(len(labelTypes))
data = [realData, fakeData, humourData]
plt.bar(y_pos, data, align='center', alpha=0.5)
plt.xticks(y_pos, labelTypes)
plt.ylabel('Mean word count')
plt.title('Mean word count in the training dataset')
plt.savefig(fname="Data Visualisation/Mean words in training set.png")
plt.show()
def showNumberOfCharactersUsage(trainingSet):
realRecords = trainingSet[trainingSet["label"] == "real"]
fakeRecords = trainingSet[trainingSet["label"] == "fake"]
humourRecords = trainingSet[trainingSet["label"] == "humor"]
# fakeRecords = pd.concat([fakeRecords,humourRecords])
# print(realRecords.iloc[:,6:9].head(10))
# print(fakeRecords.iloc[:,6:9].head(10))
realRecordsNumberOfWords = realRecords["character count"].mean()
fakeRecordsNumberOfWords = fakeRecords["character count"].mean()
humourRecordsNumberOfWords = humourRecords["character count"].mean()
realData = realRecordsNumberOfWords
fakeData = fakeRecordsNumberOfWords
humourData = humourRecordsNumberOfWords
plt.figure()
labelTypes = ["Real", "Fake", "Humour"]
y_pos = np.arange(len(labelTypes))
data = [realData, fakeData, humourData]
plt.bar(y_pos, data, align='center', alpha=0.5)
plt.xticks(y_pos, labelTypes)
plt.ylabel('Mean character count')
plt.title('Mean character count in the training dataset')
plt.savefig(fname="Data Visualisation/Mean character count training set.png")
plt.show()
def detectTweetTextLanguage(row):
try:
lang = detect(row.iloc[1])
languageDetected.append(lang)
except:
lang = "error"
print("This row throws an error " + str(row.iloc[0]))
# 262974742716370944 Man sandy Foreal?? ⚡⚡⚡☔☔⚡🌊🌊☁🚣⛵💡🔌🚬🚬🚬🔫🔫🔒🔒🔐🔑🔒🚪🚪🚪🔨🔨🔨🏊🏊🏊🏊🎣🎣🎣😱😰😖😫😩😤💨💨💨💨💦💦💦💧💦💥💥💥👽💩🙌🙌🙌🙌🙌🏃🏃🏃🏃🏃👫👭💏👪👪👬👭💑🙇🌕🌕🌕🌎 http://t.co/vEWVXy10 183424929 sandyA_fake_29 vtintit Mon Oct 29 17:50:39 +0000 2012 fake
languageDetected.append(lang)
def detectPolarityOfTweet(row):
try:
blob = TextBlob(row.iloc[1])
polarityOfBlob = blob.polarity
polarityScores.append(polarityOfBlob)
if (polarityOfBlob == 0):
polarityOfBlob = "neutral"
elif (polarityOfBlob > 0):
polarityOfBlob = "positive"
elif (polarityOfBlob < 0):
polarityOfBlob = "negative"
else:
polarityOfBlob = "error"
polarityTweet.append(polarityOfBlob)
except:
blob = "error"
print("This row throws an error " + str(row.iloc[0]))
polarityTweet.append(blob)
def detectSubjectivityOfTweet(row):
try:
blob = TextBlob(row.iloc[1])
subjectivityOfBlob = blob.subjectivity
subjectivityScores.append(subjectivityOfBlob)
if (subjectivityOfBlob == 0.5):
subjectivityOfBlob = "neutral"
elif (subjectivityOfBlob < 0.5):
subjectivityOfBlob = "objective"
elif (subjectivityOfBlob > 0.5):
subjectivityOfBlob = "subjective"
else:
subjectivityOfBlob = "error"
subjectivityTweet.append(subjectivityOfBlob)
except:
blob = "error"
print("This row throws an error " + str(row.iloc[0]))
subjectivityTweet.append(blob)
# TO DO:
# PRE-PROCESSING DATASET:
# remove dupes - DONE
# scikit has count vectorizer
# need to decide between n-grams, bag of words, tf-idf, POS-tagging
# MAKING FEATURES:
# language detection - DONE
# has URL?
# number of URLS
# retweet?
# ML ALGO:
#
def featureGeneration(trainingSet):
for index, row in trainingSet.iterrows():
tweetTokens = word_tokenize(row[1])
detectTweetTextLanguage(row)
detectPolarityOfTweet(row)
detectSubjectivityOfTweet(row)
detectTweetFeatures(row)
trainingSet["language"] = languageDetected
trainingSet["polarity"] = polarityTweet
trainingSet["subjectivity"] = subjectivityTweet
trainingSet["polarity score"] = polarityScores
trainingSet["subjectivity score"] = subjectivityScores
trainingSet["character count"] = trainingSet.iloc[:, 1].apply(lambda x: len(x)) # taken
trainingSet['punctuation count'] = trainingSet.iloc[:, 1].apply(lambda x: len("".join(_ for _ in x if _ in punctuation))) # taken
trainingSet["number of exclamations"] = trainingSet.iloc[:, 1].apply(lambda x: extractExclamations(x))
trainingSet["number of questions"] = trainingSet.iloc[:, 1].apply(lambda x: extractQuestions(x))
trainingSet["number of ellipsis"] = trainingSet.iloc[:, 1].apply(lambda x: extractEllipsis(x))
trainingSet["word count"] = trainingSet.iloc[:, 1].apply(lambda x: len(x.split())) # taken
trainingSet['noun count'] = trainingSet.iloc[:, 1].apply(lambda x: checkPosTag(x, 'noun'))
trainingSet['verb count'] = trainingSet.iloc[:, 1].apply(lambda x: checkPosTag(x, 'verb'))
trainingSet['adjective count'] = trainingSet.iloc[:, 1].apply(lambda x: checkPosTag(x, 'adjective'))
trainingSet['adverb count'] = trainingSet.iloc[:, 1].apply(lambda x: checkPosTag(x, 'adverb'))
trainingSet['pronoun count'] = trainingSet.iloc[:, 1].apply(lambda x: checkPosTag(x, 'pronoun'))
trainingSet["number of locations"] = numberOfLocations
trainingSet["number of disaster words"] = numberOfDisasterWords
trainingSet["number of emojis"] = trainingSet.iloc[:, 1].apply(lambda x: extractEmojis(x))
trainingSet["number of URLS"] = trainingSet.iloc[:, 1].apply(lambda x: extractUrlCount(x))
trainingSet["number of Hashtags"] = trainingSet.iloc[:, 1].apply(lambda x: extractHashtags(x))
trainingSet["number of mentions"] = trainingSet.iloc[:, 1].apply(lambda x: extractMentions(x))
trainingSet['word density'] = trainingSet['character count'] / (trainingSet['word count'] + 1) # taken
trainingSet["target"] = target
def featureGeneration2(trainingSet):
for index, row in trainingSet.iterrows():
if row[6] == "real":
target.append(1)
else:
target.append(0)
def detectTweetFeatures(row):
tweet = row[1]
locations = 0
disasterWords = 0
tweetTokens = word_tokenize(tweet)
if row[6] == "real":
target.append(0)
else:
target.append(1)
for w in tweetTokens:
if w not in stopWordsPunctuation:
tokenizedTweets.append(w)
if ((cityCorpora == w) | (countryCorpora == w) | (iso3Corpora == w.upper())).any():
locations =+ 1
if (naturalDisasterWordsCopora == w).any():
disasterWords =+ 1
numberOfLocations.append(locations)
numberOfDisasterWords.append(disasterWords)
def extractEmojis(tweet):
emojis = emoji.demojize(tweet)
emojis = re.findall(emojiRegex, emojis)
#emojisTweet = [emoji.emojize(x) for x in emojis]
#cnt = len(emojisTweet)
cnt = len(emojis)
return cnt
def extractEllipsis(tweet):
ellipsisR = re.findall(ellipsisRegex, tweet)
cnt = (len(ellipsisR))
return cnt
def extractQuestions(tweet):
questions = re.findall(questionRegex, tweet)
cnt = (len(questions))
return cnt
def extractExclamations(tweet):
exclamations = re.findall(exclamationRegex, tweet)
cnt = (len(exclamations))
return cnt
def extractMentions(tweet):
mentions = re.findall(mentionRegex, tweet)
cnt = (len(mentions))
return cnt
def extractHashtags(tweet):
hashtags = re.findall(hashtagRegex, tweet)
cnt = (len(hashtags))
return cnt
def extractUrlCount(tweet):
URLs = re.findall(twitterLinkRegex, tweet)
cnt = (len(URLs))
return cnt
def checkPosTag(tweetTokens, tag):
cnt = 0
try:
POSTaggedTokens = nltk.pos_tag(tweetTokens)
for tuple in POSTaggedTokens:
POStag = tuple[1]
if POStag in pos_tags[tag]:
cnt += 1
except:
pass
return cnt
def printFirst7Records():
with codecs.open("training_set.csv", "r", encoding="utf8") as txtFileToCsv:
c = 0;
for line in txtFileToCsv.readlines():
record = line.split(",")
print(record)
print(len(record))
if c == 8:
break
c += 1
createTrainingCsv()
trainingSet = pd.read_csv("training_set.csv", encoding="utf8", delimiter="x0x")
#trainingSet = pd.read_csv("testset.csv", encoding="utf8", delimiter="x0x")
#createTrainingCsv("mediaeval-2015-testset.txt","testset.csv")
#testSet = pd.read_csv("testset.csv", encoding="utf8", delimiter="x0x")
pos_tags = {
"noun" : ["NN","NNS","NNP","NNPS"],
"pronoun" : ["PRP","PRP$","WP","WP$"],
"verb" : ["VB","VBD","VBG","VBN","VBP","VBZ"],
"adjective" : ["JJ","JJR","JJS"],
"adverb" : ["RB","RBR","RBS","WRB"]
}
locationCorpora = pd.read_csv("Cities database/worldcities.csv", encoding="utf8")
cityCorpora = locationCorpora["city"]
countryCorpora = locationCorpora["country"].drop_duplicates()
iso3Corpora = locationCorpora["iso3"].drop_duplicates()
#borrowed and slightly edited.
naturalDisasterWordsCopora = pd.Series(["tsunami", "disaster", "volcano", "tornado", "avalanche", "earthquake", "blizzard", "drought", "fire", "tremor", "storm","magma","twister", "windstorm", "heat wave", "cyclone", "fire", "flood","hailstorm", "lava", "lightning", "high-pressure", "hail", "hurricane", "seismic", "erosion", "whirlpool", "whirlwind","thunderstorm", "barometer", "gale", "blackout", "gust", "force", "low-pressure", "volt", "snowstorm", "rainstorm", "storm", "nimbus", "violent", "sandstorm", "casualty", "fatal", "fatality", "cumulonimbus", "death", "lost", "destruction", "tension", "cataclysm", "damage", "uproot", "underground", "destroy", "arsonist", "arson", "rescue", "permafrost", "disaster", "fault", "scientist", "shelter"])
# englishTrainingSet = pd.read_csv("englishTrainingSet.csv",encoding="utf8")
print("This is the size of the data ", trainingSet.shape)
# print("Size of englishTrainingSet.csv", englishTrainingSet.shape)
# print(trainingSet["username"].value_counts()) #some users have multiple posts in the same dataset
# subTrainingSet = trainingSet[0:11]
stopWords = set(stopwords.words("english"))
stopWordsPunctuation = set(stopwords.words("english") + list(punctuation))
twitterLinkRegex = "http:.*"
# twitterLinkRegex = "http: \*/\*/t.co/* | http://t.co/*"
hashtagRegex = "#([0-9]*[a-zA-Z]*)+"
mentionRegex = "@([0-9]*[a-zA-Z]*)+"
exclamationRegex = "!"
questionRegex = "\?"
ellipsisRegex = "\.{3}"
emojiRegex = r"(:[^:]*:)"
# think about emoticon regex
# need to extract named entities
# locations
# respected news agents
MAXFEATURES = 1000;
tokenizedTweets = []
languageDetected = []
polarityTweet = []
subjectivityTweet = []
polarityScores = []
subjectivityScores = []
numberOfLocations = []
numberOfDisasterWords = []
target = []
#featureGeneration(trainingSet)
#featureGeneration(testSet)
#corpus = trainingSet.iloc[:,1]
#corpusCSV = pd.Series(corpus)
#corpusCSV.to_csv("corpusCSV.csv",encoding="utf8")
#corpus = pd.read_csv("corpusCSV.csv",encoding="utf8")
#vectorizer = CountVectorizer(stop_words="english")
#X = vectorizer.fit_transform(corpus) # 27468 words
#print(vectorizer.get_feature_names()) #token_pattern=r'\b\w+\b' -> whitespace word whitespace
#bigramVectorizer = CountVectorizer(ngram_range=(1, 2), token_pattern=r'\b\w+\b', min_df=1, stop_words="english" ,max_features=MAXFEATURES)
#X = bigramVectorizer.fit_transform(corpus) # 85822 words
#print(bigramVectorizer.get_feature_names())
#print(corpus)
#print(X.toarray())
#print(len(X.toarray()[0]))
#print(X.shape)
#print(len(corpus))
#transformer = TfidfTransformer()
#tfidf = transformer.fit_transform(X)
#print(tfidf.shape)
#print(tfidf.toarray())
#print(transformer.idf_)
#tfidfVectorizer = TfidfVectorizer(ngram_range=(1, 2), token_pattern=r'\b\w+\b', min_df=1, stop_words="english" ,max_features=MAXFEATURES,)
#Y = tfidfVectorizer.fit_transform(corpus)
#print(Y.shape)
#print(Y.toarray())
#print(tfidfVectorizer.idf_)
#print("n_samples: %d, n_features: %d" % Y.shape)
#trainingSet["Vectors"] = Y.toarray()
# trainingSet.to_csv(path_or_buf="D:/Work/Uni work/Comp3222 - MLT/CW/comp3222-mediaeval/testing1.csv",encoding="utf8")
#trainingSet.to_csv("trainingSetAllFeatures.csv",encoding="utf8")
trainingSet = pd.read_csv("trainingSetAllFeatures.csv",encoding="utf8")
#showLabelComposition(trainingSet)
showPOSTagsComposition(trainingSet)
showPolarityComposition(trainingSet)
showSubjectivityComposition(trainingSet)
#showNumberOfCharactersUsage(trainingSet)
#showNumberOfWordsUsage(trainingSet)
#testSet["language"] = languageDetected
#testSet["polarity"] = polarityTweet
#testSet["subjectivity"] = subjectivityTweet
#testSet["polarity score"] = polarityScores
#testSet["subjectivity score"] = subjectivityScores
#testSet["character length"] = numberOfCharacters
#testSet["number of exclamations"] = numberOfExclamations
#testSet["number of questions"] = numberOfQuestions
#testSet["number of ellipsis"] = numberOfEllipsis
#testSet["word length"] = numberOfWords
#testSet["number of locations"] = numberOfLocations
#testSet["number of disaster words"] = numberOfDisasterWords
#testSet["number of emojis"] = numberOfEmojis
#testSet["number of URLS"] = numberOfUrls
#testSet["number of Hashtags"] = numberOfHashtags
#testSet["number of mentions"] = numberOfMentions
#testSet["target"] = target
#trainingSet.to_csv(path_or_buf="D:/Work/Uni work/Comp3222 - MLT/CW/comp3222-mediaeval/tfidfAttempt.csv",encoding="utf8")
#testSet.to_csv(path_or_buf="D:/Work/Uni work/Comp3222 - MLT/CW/comp3222-mediaeval/allLanguagesDatasetTEST.csv",encoding="utf8")
#trainingSet = pd.read_csv("testing2.csv",encoding="utf8")
# showLanguageComposition(trainingSet) #11142
# en 76.93157494994131 % english
# es 9.024373403300421 % spanish
# tl 2.2440102188773046 % tagalog
# fr 1.5397362424911967 % french
# id 1.2221224884347166 % indonesian
#languageFilter = trainingSet["language"] == "en"
#languageFilter = testSet["language"] == "en"
#englishTrainingSet = trainingSet[languageFilter].reset_index(drop=True)
#englishTrainingSet = englishTrainingSet.drop_duplicates(ignore_index=False) # 11141
#englishTestSet = testSet[languageFilter].reset_index(drop=True)
#englishTestSet = englishTestSet.drop_duplicates(ignore_index=False) # 11141
#trainingSet.to_csv(path_or_buf="D:/Work/Uni work/Comp3222 - MLT/CW/comp3222-mediaeval/testing2.csv", encoding="utf8")
# print("the shape of english records " ,str(englishTrainingSet.shape)) #10956
# print("The shape of original records " ,str(trainingSet.shape)) #10955
# englishTrainingSet = englishTrainingSet.drop_duplicates(ignore_index=True)
# print("shape of english records with dupes removed ",str(t))
# duplicatedSeries = englishTrainingSet.duplicated(keep=False)
# print(duplicatedSeries.loc[lambda x : x == True])
# print("does english training set have same size ", str(englishTrainingSet.shape))
# showLanguageComposition(englishTrainingSet)
# showLabelComposition(englishTrainingSet)
# showPolarityComposition(englishTrainingSet)
# showEmojiComposition(englishTrainingSet)
# showMentionComposition(englishTrainingSet)
# showURLComposition(englishTrainingSet)
# showHashtagsComposition(englishTrainingSet)
# showTweetComposition(trainingSet)
# showNumberOfWordsUsage(englishTrainingSet)
# showNumberOfCharactersUsage(englishTrainingSet)
# englishTrainingSet
# print(englishTrainingSet["number of emojis"].value_counts())
# index = englishTrainingSet["number of emojis"] == 16
# print(englishTrainingSet[index])
#englishTrainingSet.to_csv(path_or_buf="D:/Work/Uni work/Comp3222 - MLT/CW/comp3222-mediaeval/tfidfAttemptEnglish.csv",encoding="utf8")
#englishTestSet.to_csv(path_or_buf="D:/Work/Uni work/Comp3222 - MLT/CW/comp3222-mediaeval/englishTrainingSet.csv",encoding="utf8")
#englishTrainingSet = pd.read_csv(path_or_buf="D:/Work/Uni work/Comp3222 - MLT/CW/comp3222-mediaeval/englishTrainingSet.csv",encoding="utf8")
#print(englishTrainingSet["label"].value_counts())
#realRecords = englishTrainingSet[englishTrainingSet["label"] == "real"]
#fakeRecords = englishTrainingSet[englishTrainingSet["label"] == "fake"]
#humourRecords = englishTrainingSet[englishTrainingSet["label"] == "humor"]
realRecords = trainingSet[trainingSet["label"] == "real"]
fakeRecords = trainingSet[trainingSet["label"] == "fake"]
humourRecords = trainingSet[trainingSet["label"] == "humor"]
INDICESREAL = 4000
INDICESFAKE = 3000
INDICESHUMOUR = 1000
realRecordsData = realRecords.iloc[:INDICESREAL, 10:23]
fakeRecordsData = fakeRecords.iloc[:INDICESFAKE, 10:23]
humourRecordsData = humourRecords.iloc[:INDICESHUMOUR, 10:23]
realTargets = realRecords.iloc[:INDICESREAL, 23]
fakeTargets = fakeRecords.iloc[:INDICESFAKE, 23]
humourTargets = humourRecords.iloc[:INDICESHUMOUR, 23]
subTrainingSet = pd.concat([realRecordsData, fakeRecordsData, humourRecordsData])
subTargetSet = | pd.concat([realTargets, fakeTargets, humourTargets]) | pandas.concat |
# Generated by nuclio.export.NuclioExporter
import mlrun
from mlrun.platforms.iguazio import mount_v3io, mount_v3iod
from mlrun.datastore import DataItem
from mlrun.execution import MLClientCtx
import os
from subprocess import run
import pandas as pd
import numpy as np
from pyspark.sql.types import LongType
from pyspark.sql import SparkSession
import sys
import base64 as b64
import warnings
warnings.filterwarnings("ignore")
from itertools import product
import matplotlib
import numpy as np
import json
import pandas as pd
from matplotlib import pyplot as plt
from pkg_resources import resource_filename
import six
from pyspark.sql import DataFrame as SparkDataFrame
from pyspark.sql.functions import (abs as df_abs, col, count, countDistinct,
max as df_max, mean, min as df_min,
sum as df_sum, when
)
from pyspark.sql.functions import variance, stddev, kurtosis, skewness
def describe(df, bins, corr_reject, config, **kwargs):
if not isinstance(df, SparkDataFrame):
raise TypeError("df must be of type pyspark.sql.DataFrame")
table_stats = {"n": df.count()}
if table_stats["n"] == 0:
raise ValueError("df cannot be empty")
try:
matplotlib.style.use("default")
except:
pass
def pretty_name(x):
x *= 100
if x == int(x):
return '%.0f%%' % x
else:
return '%.1f%%' % x
def corr_matrix(df, columns=None):
if columns is None:
columns = df.columns
combinations = list(product(columns,columns))
def separate(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
grouped = list(separate(combinations,len(columns)))
df_cleaned = df.select(*columns).na.drop(how="any")
for i in grouped:
for j in enumerate(i):
i[j[0]] = i[j[0]] + (df_cleaned.corr(str(j[1][0]), str(j[1][1])),)
df_pandas = pd.DataFrame(grouped).applymap(lambda x: x[2])
df_pandas.columns = columns
df_pandas.index = columns
return df_pandas
def create_hist_data(df, column, minim, maxim, bins=10):
def create_all_conditions(current_col, column, left_edges, count=1):
"""
Recursive function that exploits the
ability to call the Spark SQL Column method
.when() in a recursive way.
"""
left_edges = left_edges[:]
if len(left_edges) == 0:
return current_col
if len(left_edges) == 1:
next_col = current_col.when(col(column) >= float(left_edges[0]), count)
left_edges.pop(0)
return create_all_conditions(next_col, column, left_edges[:], count+1)
next_col = current_col.when((float(left_edges[0]) <= col(column))
& (col(column) < float(left_edges[1])), count)
left_edges.pop(0)
return create_all_conditions(next_col, column, left_edges[:], count+1)
num_range = maxim - minim
bin_width = num_range / float(bins)
left_edges = [minim]
for _bin in range(bins):
left_edges = left_edges + [left_edges[-1] + bin_width]
left_edges.pop()
expression_col = when((float(left_edges[0]) <= col(column))
& (col(column) < float(left_edges[1])), 0)
left_edges_copy = left_edges[:]
left_edges_copy.pop(0)
bin_data = (df.select(col(column))
.na.drop()
.select(col(column),
create_all_conditions(expression_col,
column,
left_edges_copy
).alias("bin_id")
)
.groupBy("bin_id").count()
).toPandas()
bin_data.index = bin_data["bin_id"]
new_index = list(range(bins))
bin_data = bin_data.reindex(new_index)
bin_data["bin_id"] = bin_data.index
bin_data = bin_data.fillna(0)
bin_data["left_edge"] = left_edges
bin_data["width"] = bin_width
return bin_data
def describe_integer_1d(df, column, current_result, nrows):
stats_df = df.select(column).na.drop().agg(mean(col(column)).alias("mean"),
df_min(col(column)).alias("min"),
df_max(col(column)).alias("max"),
variance(col(column)).alias("variance"),
kurtosis(col(column)).alias("kurtosis"),
stddev(col(column)).alias("std"),
skewness(col(column)).alias("skewness"),
df_sum(col(column)).alias("sum")
).toPandas()
for x in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
stats_df[pretty_name(x)] = (df.select(column)
.na.drop()
.selectExpr("percentile(`{col}`,CAST({n} AS DOUBLE))"
.format(col=column, n=x)).toPandas().iloc[:,0]
)
stats = stats_df.iloc[0].copy()
stats.name = column
stats["range"] = stats["max"] - stats["min"]
stats["iqr"] = stats[pretty_name(0.75)] - stats[pretty_name(0.25)]
stats["cv"] = stats["std"] / float(stats["mean"])
stats["mad"] = (df.select(column)
.na.drop()
.select(df_abs(col(column)-stats["mean"]).alias("delta"))
.agg(df_sum(col("delta"))).toPandas().iloc[0,0] / float(current_result["count"]))
stats["type"] = "NUM"
stats['n_zeros'] = df.select(column).where(col(column)==0.0).count()
stats['p_zeros'] = stats['n_zeros'] / float(nrows)
hist_data = create_hist_data(df, column, stats["min"], stats["max"], bins)
return stats
def describe_float_1d(df, column, current_result, nrows):
stats_df = df.select(column).na.drop().agg(mean(col(column)).alias("mean"),
df_min(col(column)).alias("min"),
df_max(col(column)).alias("max"),
variance(col(column)).alias("variance"),
kurtosis(col(column)).alias("kurtosis"),
stddev(col(column)).alias("std"),
skewness(col(column)).alias("skewness"),
df_sum(col(column)).alias("sum")
).toPandas()
for x in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
stats_df[pretty_name(x)] = (df.select(column)
.na.drop()
.selectExpr("percentile_approx(`{col}`,CAST({n} AS DOUBLE))"
.format(col=column, n=x)).toPandas().iloc[:,0]
)
stats = stats_df.iloc[0].copy()
stats.name = column
stats["range"] = stats["max"] - stats["min"]
stats["iqr"] = stats[pretty_name(0.75)] - stats[pretty_name(0.25)]
stats["cv"] = stats["std"] / float(stats["mean"])
stats["mad"] = (df.select(column)
.na.drop()
.select(df_abs(col(column)-stats["mean"]).alias("delta"))
.agg(df_sum(col("delta"))).toPandas().iloc[0,0] / float(current_result["count"]))
stats["type"] = "NUM"
stats['n_zeros'] = df.select(column).where(col(column)==0.0).count()
stats['p_zeros'] = stats['n_zeros'] / float(nrows)
hist_data = create_hist_data(df, column, stats["min"], stats["max"], bins)
return stats
def describe_date_1d(df, column):
stats_df = df.select(column).na.drop().agg(df_min(col(column)).alias("min"),
df_max(col(column)).alias("max")
).toPandas()
stats = stats_df.iloc[0].copy()
stats.name = column
if isinstance(stats["max"], pd.Timestamp):
stats = stats.astype(object)
stats["max"] = str(stats["max"].to_pydatetime())
stats["min"] = str(stats["min"].to_pydatetime())
else:
stats["range"] = stats["max"] - stats["min"]
stats["type"] = "DATE"
return stats
def guess_json_type(string_value):
try:
obj = json.loads(string_value)
except:
return None
return type(obj)
def describe_categorical_1d(df, column):
value_counts = (df.select(column).na.drop()
.groupBy(column)
.agg(count(col(column)))
.orderBy("count({c})".format(c=column),ascending=False)
).cache()
stats = (value_counts
.limit(1)
.withColumnRenamed(column, "top")
.withColumnRenamed("count({c})".format(c=column), "freq")
).toPandas().iloc[0]
top_50 = value_counts.limit(50).toPandas().sort_values("count({c})".format(c=column),
ascending=False)
top_50_categories = top_50[column].values.tolist()
others_count = pd.Series([df.select(column).na.drop()
.where(~(col(column).isin(*top_50_categories)))
.count()
], index=["***Other Values***"])
others_distinct_count = pd.Series([value_counts
.where(~(col(column).isin(*top_50_categories)))
.count()
], index=["***Other Values Distinct Count***"])
top = top_50.set_index(column)["count({c})".format(c=column)]
top = top.append(others_count)
top = top.append(others_distinct_count)
stats["value_counts"] = top
stats["type"] = "CAT"
value_counts.unpersist()
unparsed_valid_jsons = df.select(column).na.drop().rdd.map(
lambda x: guess_json_type(x[column])).filter(
lambda x: x).distinct().collect()
stats["unparsed_json_types"] = unparsed_valid_jsons
return stats
def describe_constant_1d(df, column):
stats = pd.Series(['CONST'], index=['type'], name=column)
stats["value_counts"] = (df.select(column)
.na.drop()
.limit(1)).toPandas().iloc[:,0].value_counts()
return stats
def describe_unique_1d(df, column):
stats = | pd.Series(['UNIQUE'], index=['type'], name=column) | pandas.Series |
import numpy as np
import pandas as pd
import inspect, os.path
import matplotlib.pyplot as plt
import seaborn as sns
import re
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
filename = inspect.getframeinfo(inspect.currentframe()).filename
path = os.path.dirname(os.path.abspath(filename))
# print(os.listdir(path+"/input"))
train_df=pd.read_csv(path+"/input/train.csv")
test_df=pd.read_csv(path+"/input/test.csv")
gender_submission_df= | pd.read_csv(path+"/input/gender_submission.csv") | pandas.read_csv |
import pandas as pd
import re, json
import argparse
'''
preprocessing for mimic discharge summary note
1. load NOTEEVENTS.csv
2. get discharge sumamry notes
a) NOTEVENTS.CATEGORY = 'Discharge Summary'
b) NOTEVENTS.DESCRIPTION = 'Report'
c) eliminate a short-note
3. preprocess discharge sumamry notes
a) clean text
b) split sections by headers
4. save csv file
a) PK: NOTEVENTS.ROW_ID
b) TEXT: string(doubled-list)
'''
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--load_file_path', type=str, default='file/NOTEEVENTS.csv')
parser.add_argument('--save_file_path', type=str, default='sections_discharge_summary.csv')
opt = parser.parse_args()
return opt
def load_noteevents(file_path):
df = pd.read_csv(file_path)
# dataframe dtype config
df.CHARTDATE = | pd.to_datetime(df.CHARTDATE, format='%Y-%m-%d', errors='raise') | pandas.to_datetime |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pickle
from glob import glob
import os
from time import sleep
import subprocess
def get_all_file_paths(root_dir):
to_return = []
current_level_dfs = glob(f"{root_dir}/*Df.csv")
if len(current_level_dfs) > 0:
to_return += [df_path for df_path in current_level_dfs]
else:
for subdir in os.listdir(root_dir):
full_dir = f"{root_dir}/{subdir}"
if os.path.isdir(full_dir):
to_return += get_all_file_paths(full_dir)
return to_return
if __name__ == "__main__":
os.chdir("../simulation_scripts")
os.system("./RunEffectSizeSimsSameArmsTSPPD.sh")
os.chdir("../simulation_analysis_scripts")
# wait 10 minutes for all the simulations to finish
sleep(600)
save_dir = 'TSPPDNoEffectResampleFast'
# get chi square cutoffs for each combination of n and c
num_sims = 500
arm_prob = 0.5
means = {}
cutoffs = {}
for n in (32, 88, 197, 785):
for c in (0.025, 0.1, 0.2, 0.3, 0.05):
try:
file_path = glob(save_dir + f"/num_sims={num_sims}armProb={arm_prob}/N={n}c={c}/*.csv")[0]
except IndexError:
print(save_dir + f"/num_sims={num_sims}armProb={arm_prob}/N={n}c={c}/*.csv", "not found")
continue
df_sims = pd.read_csv(file_path)[:num_sims]
plt.hist(df_sims['stat'])
plt.title(f"Chi-Square Statistic: n={n}, c={c}")
plt.xlabel("Chi-Square Statistic")
plt.ylabel("# Sims")
plt.savefig(f'../simulation_analysis_saves/chi_square_cutoff/chi_square_histogram_{n}.png')
plt.show()
cutoff = df_sims['stat'].sort_values().reset_index()['stat'][int(0.95 * num_sims)]
print(f"cutoff: {cutoff}")
cutoffs[f'n={n}_c={c}'] = cutoff
print(f"chi square mean: {df_sims['stat'].mean()}")
means[f'n={n}_c={c}'] = df_sims['stat'].mean()
# delete the old simulations
os.system(f"rm -rf {save_dir}")
# re-run the simulations both with a difference between arms and no difference
test_scripts = ["RunEffectSizeSimsSameArmsTSPPD.sh", "RunEffectSizeSimsTSPPD.sh"]
os.chdir("../simulation_scripts")
for test_script in test_scripts:
os.system(f"./{test_script}")
os.chdir("../simulation_analysis_scripts")
# wait 10 minutes for the new simulations to finish
sleep(600)
# compute false positive rate
save_dir = '../simulation_saves/TSPPDNoEffectResampleFast'
num_sims = 500
arm_prob = 0.5
df_fp = pd.DataFrame()
for n in (32, 88, 197, 785):
for c in (0.025, 0.1, 0.2, 0.3, 0.05):
try:
file_path = glob(save_dir + f"/num_sims={num_sims}armProb={arm_prob}/N={n}c={c}/*.csv")[0]
except IndexError:
print(save_dir + f"/num_sims={num_sims}armProb={arm_prob}/N={n}c={c}/*.csv", "not found")
continue
df_sims = pd.read_csv(file_path)[:num_sims]
if f'n={n}_c={c}' not in cutoffs:
continue
cutoff = cutoffs[f'n={n}_c={c}']
df_positives = df_sims[df_sims['stat'] > cutoff]
percent_positive = len(df_positives)/num_sims
print(f"# above chi-square_cutoff: {len(df_positives)}")
print(f"% of sims positive: {len(df_positives)/num_sims}")
df_fp = df_fp.append({'effect_size': 0, 'n': n, 'c': c, 'percent_positive': percent_positive}, ignore_index=True)
# compute true positive rate
save_dir = '../simulation_saves/TSPPDIsEffectResampleFast'
num_sims = 500
arm_prob = 0.5
df_power = pd.DataFrame()
for es in (0.1, 0.2, 0.3, 0.5):
for c in (0.025, 0.1, 0.2, 0.3, 0.05):
try:
file_path = glob(save_dir + f"/num_sims={num_sims}armProb={arm_prob}/es={es}c={c}/*.csv")[0]
except IndexError:
print(save_dir + f"/num_sims={num_sims}armProb={arm_prob}/N={n}c={c}/*.csv", "not found")
continue
df_sims = | pd.read_csv(file_path) | pandas.read_csv |
"""
The double-7s-ave-portfolio stategy.
This is double-7s strategy applied to a portfolio.
The simple double 7's strategy was revealed in the book
'Short Term Strategies that Work: A Quantified Guide to Trading Stocks
and ETFs', by <NAME> and <NAME>. It's a mean reversion
strategy looking to buy dips and sell on strength and was initially
designed for ETFs.
This module allows us to examine this strategy and try different
number of periods. Each period is represented using a different
symbol, for example SPY_7 for 7 day high/low. This allows us to
compare the periods as if we were comparing different stocks in a
portfolio.
"""
import datetime
import matplotlib.pyplot as plt
import pandas as pd
from talib.abstract import *
import pinkfish as pf
default_options = {
'use_adj' : False,
'use_cache' : True,
'margin' : 1.0,
'periods' : [7],
'sma' : 200,
'use_regime_filter' : True,
}
class Strategy:
def __init__(self, symbol, capital, start, end, options=default_options):
self.symbol = symbol
self.capital = capital
self.start = start
self.end = end
self.options = options.copy()
self.symbols = None
self.ts = None
self.rlog = None
self.tlog = None
self.dbal = None
self.stats = None
def _algo(self):
pf.TradeLog.cash = self.capital
pf.TradeLog.margin = self.options['margin']
# Loop though timeseries.
for i, row in enumerate(self.ts.itertuples()):
date = row.Index.to_pydatetime()
end_flag = pf.is_last_row(self.ts, i)
# Get the prices for this row, put in dict p.
p = self.portfolio.get_prices(row, fields=['close'])
# Get row values
regime = row.regime
sma = row.sma
# Loop though each symbol in portfolio.
for symbol in self.portfolio.symbols:
period = str(symbol.split('_')[1])
period_high_field = 'period_high' + str(symbol.split('_')[1])
period_low_field = 'period_low' + str(symbol.split('_')[1])
period_high = getattr(row, period_high_field)
period_low = getattr(row, period_low_field)
# Use variables to make code cleaner.
close = p[symbol]['close']
# Sell Logic
# First we check if an existing position in symbol should be sold
# - sell if price closes at X day high
# - sell if end of data by adjusted the percent to zero
if symbol in self.portfolio.positions:
if close == period_high or end_flag:
self.portfolio.adjust_percent(date, close, 0, symbol, row)
# Buy Logic
# First we check to see if there is an existing position, if so do nothing
# - Buy if (regime > 0 or not use_regime_filter) and price closes at X day low
else:
if (((row.regime > 0 or close > row.sma) or not self.options['use_regime_filter'])
and close == period_low):
# Use equal weight.
weight = 1 / len(self.portfolio.symbols)
self.portfolio.adjust_percent(date, close, weight, symbol, row)
# record daily balance
self.portfolio.record_daily_balance(date, row)
def run(self):
# Build the list of symbols.
periods = self.options['periods']
self.symbols = []
for period in periods:
symbol = self.symbol + '_' + str(period)
self.symbols.append(symbol)
self.portfolio = pf.Portfolio()
self.ts = self.portfolio.fetch_timeseries(self.symbols, self.start, self.end,
fields=['close'], use_cache=self.options['use_cache'],
use_adj=self.options['use_adj'])
# Fetch symbol time series
ts = pf.fetch_timeseries(self.symbol, use_cache=self.options['use_cache'])
ts = pf.select_tradeperiod(ts, self.start, self.end, use_adj=self.options['use_adj'])
# Add technical indicator: 200 sma regime filter.
self.ts['regime'] = pf.CROSSOVER(ts, timeperiod_fast=1, timeperiod_slow=200)
# Add technical indicator: X day sma.
self.ts['sma'] = SMA(ts, timeperiod=self.options['sma'])
# Add technical indicator: X day high, and X day low.
for period in periods:
self.ts['period_high'+str(period)] = | pd.Series(ts.close) | pandas.Series |
#----------------------------------------------------------------------------------------------
####################
# IMPORT LIBRARIES #
####################
import streamlit as st
import pandas as pd
import numpy as np
import plotly as dd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
import plotly.graph_objects as go
import functions as fc
import modelling as ml
import os
import altair as alt
import altair
import itertools
import statsmodels.api as sm
from scipy import stats
import sys
from streamlit import caching
import SessionState
import platform
import base64
from io import BytesIO
from pygam import LinearGAM, LogisticGAM, s
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from factor_analyzer import FactorAnalyzer
from factor_analyzer.factor_analyzer import calculate_bartlett_sphericity
from factor_analyzer.factor_analyzer import calculate_kmo
#----------------------------------------------------------------------------------------------
def app():
# Clear cache
caching.clear_cache()
# Hide traceback in error messages (comment out for de-bugging)
#sys.tracebacklimit = 0
# Show altair tooltip when full screen
st.markdown('<style>#vg-tooltip-element{z-index: 1000051}</style>',unsafe_allow_html=True)
#Session state
session_state = SessionState.get(id = 0)
# Analysis type
analysis_type = st.selectbox("What kind of analysis would you like to conduct?", ["Regression", "Multi-class classification", "Data decomposition"], key = session_state.id)
st.header("**Multivariate data**")
if analysis_type == "Regression":
st.markdown("Get your data ready for powerfull methods: Artificial Neural Networks, Boosted Regression Trees, Random Forest, Generalized Additive Models, Multiple Linear Regression, and Logistic Regression! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
if analysis_type == "Multi-class classification":
st.markdown("Get your data ready for powerfull multi-class classification methods! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
if analysis_type == "Data decomposition":
st.markdown("Decompose your data with Principal Component Analysis or Factor Analysis! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA IMPORT
# File upload section
df_dec = st.sidebar.radio("Get data", ["Use example dataset", "Upload data"])
uploaded_data=None
if df_dec == "Upload data":
#st.subheader("Upload your data")
#uploaded_data = st.sidebar.file_uploader("Make sure that dot (.) is a decimal separator!", type=["csv", "txt"])
separator_expander=st.sidebar.beta_expander('Upload settings')
with separator_expander:
a4,a5=st.beta_columns(2)
with a4:
dec_sep=a4.selectbox("Decimal sep.",['.',','], key = session_state.id)
with a5:
col_sep=a5.selectbox("Column sep.",[';', ',' , '|', '\s+', '\t','other'], key = session_state.id)
if col_sep=='other':
col_sep=st.text_input('Specify your column separator', key = session_state.id)
a4,a5=st.beta_columns(2)
with a4:
thousands_sep=a4.selectbox("Thousands x sep.",[None,'.', ' ','\s+', 'other'], key = session_state.id)
if thousands_sep=='other':
thousands_sep=st.text_input('Specify your thousands separator', key = session_state.id)
with a5:
encoding_val=a5.selectbox("Encoding",[None,'utf_8','utf_8_sig','utf_16_le','cp1140','cp1250','cp1251','cp1252','cp1253','cp1254','other'], key = session_state.id)
if encoding_val=='other':
encoding_val=st.text_input('Specify your encoding', key = session_state.id)
# Error handling for separator selection:
if dec_sep==col_sep:
st.sidebar.error("Decimal and column separators cannot be identical!")
elif dec_sep==thousands_sep:
st.sidebar.error("Decimal and thousands separators cannot be identical!")
elif col_sep==thousands_sep:
st.sidebar.error("Column and thousands separators cannot be identical!")
uploaded_data = st.sidebar.file_uploader("Default separators: decimal '.' | column ';'", type=["csv", "txt"])
if uploaded_data is not None:
df = pd.read_csv(uploaded_data, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
df_name=os.path.splitext(uploaded_data.name)[0]
st.sidebar.success('Loading data... done!')
elif uploaded_data is None:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
df = pd.read_csv("default data/WHR_2021.csv", sep = ";|,|\t",engine='python')
df_name="WHR_2021"
if analysis_type == "Multi-class classification":
df = pd.read_csv("default data/iris.csv", sep = ";|,|\t",engine='python')
df_name="iris"
else:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
df = pd.read_csv("default data/WHR_2021.csv", sep = ";|,|\t",engine='python')
df_name="WHR_2021"
if analysis_type == "Multi-class classification":
df = pd.read_csv("default data/iris.csv", sep = ";|,|\t",engine='python')
df_name="iris"
st.sidebar.markdown("")
#Basic data info
n_rows = df.shape[0]
n_cols = df.shape[1]
#++++++++++++++++++++++++++++++++++++++++++++
# SETTINGS
settings_expander=st.sidebar.beta_expander('Settings')
with settings_expander:
st.caption("**Precision**")
user_precision=st.number_input('Number of digits after the decimal point',min_value=0,max_value=10,step=1,value=4)
st.caption("**Help**")
sett_hints = st.checkbox('Show learning hints', value=False)
st.caption("**Appearance**")
sett_wide_mode = st.checkbox('Wide mode', value=False)
sett_theme = st.selectbox('Theme', ["Light", "Dark"])
#sett_info = st.checkbox('Show methods info', value=False)
#sett_prec = st.number_input('Set the number of diggits for the output', min_value=0, max_value=8, value=2)
st.sidebar.markdown("")
# Check if wide mode
if sett_wide_mode:
fc.wide_mode_func()
# Check theme
if sett_theme == "Dark":
fc.theme_func_dark()
if sett_theme == "Light":
fc.theme_func_light()
fc.theme_func_dl_button()
#++++++++++++++++++++++++++++++++++++++++++++
# RESET INPUT
reset_clicked = st.sidebar.button("Reset all your input")
if reset_clicked:
session_state.id = session_state.id + 1
st.sidebar.markdown("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA PREPROCESSING & VISUALIZATION
# Check if enough data is available
if n_rows > 0 and n_cols > 0:
st.empty()
else:
st.error("ERROR: Not enough data!")
return
data_exploration_container = st.beta_container()
with data_exploration_container:
st.header("**Data screening and processing**")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA SUMMARY
# Main panel for data summary (pre)
#----------------------------------
dev_expander_dsPre = st.beta_expander("Explore raw data info and stats ", expanded = False)
with dev_expander_dsPre:
# Default data description:
if uploaded_data == None:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
if st.checkbox("Show data description", value = False, key = session_state.id):
st.markdown("**Data source:**")
st.markdown("The data come from the Gallup World Poll surveys from 2018 to 2020. For more details see the [World Happiness Report 2021] (https://worldhappiness.report/).")
st.markdown("**Citation:**")
st.markdown("Helliwell, <NAME>., <NAME>, <NAME>, and <NAME>, eds. 2021. World Happiness Report 2021. New York: Sustainable Development Solutions Network.")
st.markdown("**Variables in the dataset:**")
col1,col2=st.beta_columns(2)
col1.write("Country")
col2.write("country name")
col1,col2=st.beta_columns(2)
col1.write("Year ")
col2.write("year ranging from 2005 to 2020")
col1,col2=st.beta_columns(2)
col1.write("Ladder")
col2.write("happiness score or subjective well-being with the best possible life being a 10, and the worst possible life being a 0")
col1,col2=st.beta_columns(2)
col1.write("Log GDP per capita")
col2.write("in purchasing power parity at constant 2017 international dollar prices")
col1,col2=st.beta_columns(2)
col1.write("Social support")
col2.write("the national average of the binary responses (either 0 or 1) to the question regarding relatives or friends to count on")
col1,col2=st.beta_columns(2)
col1.write("Healthy life expectancy at birth")
col2.write("based on the data extracted from the World Health Organization’s Global Health Observatory data repository")
col1,col2=st.beta_columns(2)
col1.write("Freedom to make life choices")
col2.write("national average of responses to the corresponding question")
col1,col2=st.beta_columns(2)
col1.write("Generosity")
col2.write("residual of regressing national average of response to the question regarding money donations in the past month on GDP per capita")
col1,col2=st.beta_columns(2)
col1.write("Perceptions of corruption")
col2.write("the national average of the survey responses to the corresponding question")
col1,col2=st.beta_columns(2)
col1.write("Positive affect")
col2.write("the average of three positive affect measures (happiness, laugh and enjoyment)")
col1,col2=st.beta_columns(2)
col1.write("Negative affect (worry, sadness and anger)")
col2.write("the average of three negative affect measures (worry, sadness and anger)")
st.markdown("")
if analysis_type == "Multi-class classification":
if st.checkbox("Show data description", value = False, key = session_state.id):
st.markdown("**Data source:**")
st.markdown("The data come from Fisher's Iris data set. See [here] (https://archive.ics.uci.edu/ml/datasets/iris) for more information.")
st.markdown("**Citation:**")
st.markdown("<NAME>. (1936). The use of multiple measurements in taxonomic problems. Annals of Eugenics, 7(2): 179–188. doi: [10.1111/j.1469-1809.1936.tb02137.x] (https://doi.org/10.1111%2Fj.1469-1809.1936.tb02137.x)")
st.markdown("**Variables in the dataset:**")
col1,col2=st.beta_columns(2)
col1.write("class_category")
col2.write("Numerical category for 'class': Iris Setosa (0), Iris Versicolour (1), and Iris Virginica (2)")
col1,col2=st.beta_columns(2)
col1.write("class")
col2.write("Iris Setosa, Iris Versicolour, and Iris Virginica")
col1,col2=st.beta_columns(2)
col1.write("sepal length")
col2.write("sepal length in cm")
col1,col2=st.beta_columns(2)
col1.write("sepal width")
col2.write("sepal width in cm")
col1,col2=st.beta_columns(2)
col1.write("petal length")
col2.write("petal length in cm")
col1,col2=st.beta_columns(2)
col1.write("petal width")
col2.write("petal width in cm")
st.markdown("")
# Show raw data & data info
df_summary = fc.data_summary(df)
if st.checkbox("Show raw data ", value = False, key = session_state.id):
st.write(df)
st.write("Data shape: ", n_rows, " rows and ", n_cols, " columns")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl=st.checkbox("Show duplicates and NAs info ", value = False, key = session_state.id)
if check_nasAnddupl:
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0])
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(np.where(df.isnull())[0])))))
# Show variable info
if st.checkbox('Show variable info ', value = False, key = session_state.id):
st.write(df_summary["Variable types"])
# Show summary statistics (raw data)
if st.checkbox('Show summary statistics (raw data) ', value = False, key = session_state.id):
st.write(df_summary["ALL"].style.set_precision(user_precision))
# Download link for summary statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_summary["Variable types"].to_excel(excel_file, sheet_name="variable_info")
df_summary["ALL"].to_excel(excel_file, sheet_name="summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Summary statistics__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
#++++++++++++++++++++++
# DATA PROCESSING
# Settings for data processing
#-------------------------------------
#st.subheader("Data processing")
dev_expander_dm_sb = st.beta_expander("Specify data processing preferences", expanded = False)
with dev_expander_dm_sb:
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
n_rows_wNAs_pre_processing = "No"
if n_rows_wNAs > 0:
n_rows_wNAs_pre_processing = "Yes"
a1, a2, a3 = st.beta_columns(3)
else: a1, a3 = st.beta_columns(2)
sb_DM_dImp_num = None
sb_DM_dImp_other = None
sb_DM_delRows=None
sb_DM_keepRows=None
with a1:
#--------------------------------------------------------------------------------------
# DATA CLEANING
st.markdown("**Data cleaning**")
# Delete rows
delRows =st.selectbox('Delete rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = session_state.id)
if delRows!='-':
if delRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
if (row_1 + 1) < row_2 :
sb_DM_delRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif delRows=='equal':
sb_DM_delRows = st.multiselect("to...", df.index, key = session_state.id)
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = session_state.id)
if delRows=='greater':
sb_DM_delRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.warning("WARNING: No row is deleted!")
elif delRows=='greater or equal':
sb_DM_delRows=df.index[df.index >= row_1]
if row_1 == 0:
st.error("ERROR: All rows are deleted!")
return
elif delRows=='smaller':
sb_DM_delRows=df.index[df.index < row_1]
if row_1 == 0:
st.warning("WARNING: No row is deleted!")
elif delRows=='smaller or equal':
sb_DM_delRows=df.index[df.index <= row_1]
if row_1 == len(df)-1:
st.error("ERROR: All rows are deleted!")
return
if sb_DM_delRows is not None:
df = df.loc[~df.index.isin(sb_DM_delRows)]
no_delRows=n_rows-df.shape[0]
# Keep rows
keepRows =st.selectbox('Keep rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = session_state.id)
if keepRows!='-':
if keepRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
if (row_1 + 1) < row_2 :
sb_DM_keepRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif keepRows=='equal':
sb_DM_keepRows = st.multiselect("to...", df.index, key = session_state.id)
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = session_state.id)
if keepRows=='greater':
sb_DM_keepRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.error("ERROR: No row is kept!")
return
elif keepRows=='greater or equal':
sb_DM_keepRows=df.index[df.index >= row_1]
if row_1 == 0:
st.warning("WARNING: All rows are kept!")
elif keepRows=='smaller':
sb_DM_keepRows=df.index[df.index < row_1]
if row_1 == 0:
st.error("ERROR: No row is kept!")
return
elif keepRows=='smaller or equal':
sb_DM_keepRows=df.index[df.index <= row_1]
if sb_DM_keepRows is not None:
df = df.loc[df.index.isin(sb_DM_keepRows)]
no_keptRows=df.shape[0]
# Delete columns
sb_DM_delCols = st.multiselect("Select columns to delete ", df.columns, key = session_state.id)
df = df.loc[:,~df.columns.isin(sb_DM_delCols)]
# Keep columns
sb_DM_keepCols = st.multiselect("Select columns to keep", df.columns, key = session_state.id)
if len(sb_DM_keepCols) > 0:
df = df.loc[:,df.columns.isin(sb_DM_keepCols)]
# Delete duplicates if any exist
if df[df.duplicated()].shape[0] > 0:
sb_DM_delDup = st.selectbox("Delete duplicate rows ", ["No", "Yes"], key = session_state.id)
if sb_DM_delDup == "Yes":
n_rows_dup = df[df.duplicated()].shape[0]
df = df.drop_duplicates()
elif df[df.duplicated()].shape[0] == 0:
sb_DM_delDup = "No"
# Delete rows with NA if any exist
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
if n_rows_wNAs > 0:
sb_DM_delRows_wNA = st.selectbox("Delete rows with NAs ", ["No", "Yes"], key = session_state.id)
if sb_DM_delRows_wNA == "Yes":
df = df.dropna()
elif n_rows_wNAs == 0:
sb_DM_delRows_wNA = "No"
# Filter data
st.markdown("**Data filtering**")
filter_var = st.selectbox('Filter your data by a variable...', list('-')+ list(df.columns), key = session_state.id)
if filter_var !='-':
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if df[filter_var].dtypes=="float64":
filter_format="%.8f"
else:
filter_format=None
user_filter=st.selectbox('Select values that are ...', options=['greater','greater or equal','smaller','smaller or equal', 'equal','between'], key = session_state.id)
if user_filter=='between':
filter_1=st.number_input('Lower limit is', format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
filter_2=st.number_input('Upper limit is', format=filter_format, value=df[filter_var].max(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
#reclassify values:
if filter_1 < filter_2 :
df = df[(df[filter_var] > filter_1) & (df[filter_var] < filter_2)]
if len(df) == 0:
st.error("ERROR: No data available for the selected limits!")
return
elif filter_1 >= filter_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif user_filter=='equal':
filter_1=st.multiselect('to... ', options=df[filter_var].values, key = session_state.id)
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
else:
filter_1=st.number_input('than... ',format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
#reclassify values:
if user_filter=='greater':
df = df[df[filter_var] > filter_1]
elif user_filter=='greater or equal':
df = df[df[filter_var] >= filter_1]
elif user_filter=='smaller':
df= df[df[filter_var]< filter_1]
elif user_filter=='smaller or equal':
df = df[df[filter_var] <= filter_1]
if len(df) == 0:
st.error("ERROR: No data available for the selected value!")
return
elif len(df) == n_rows:
st.warning("WARNING: Data are not filtered for this value!")
else:
filter_1=st.multiselect('Filter your data by a value...', (df[filter_var]).unique(), key = session_state.id)
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
if n_rows_wNAs_pre_processing == "Yes":
with a2:
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
# Select data imputation method (only if rows with NA not deleted)
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.markdown("**Data imputation**")
sb_DM_dImp_choice = st.selectbox("Replace entries with NA ", ["No", "Yes"], key = session_state.id)
if sb_DM_dImp_choice == "Yes":
# Numeric variables
sb_DM_dImp_num = st.selectbox("Imputation method for numeric variables ", ["Mean", "Median", "Random value"], key = session_state.id)
# Other variables
sb_DM_dImp_other = st.selectbox("Imputation method for other variables ", ["Mode", "Random value"], key = session_state.id)
df = fc.data_impute(df, sb_DM_dImp_num, sb_DM_dImp_other)
else:
st.markdown("**Data imputation**")
st.write("")
st.info("No NAs in data set!")
with a3:
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
st.markdown("**Data transformation**")
# Select columns for different transformation types
transform_options = df.select_dtypes([np.number]).columns
numCat_options = df.columns
sb_DM_dTrans_log = st.multiselect("Select columns to transform with log ", transform_options, key = session_state.id)
if sb_DM_dTrans_log is not None:
df = fc.var_transform_log(df, sb_DM_dTrans_log)
sb_DM_dTrans_sqrt = st.multiselect("Select columns to transform with sqrt ", transform_options, key = session_state.id)
if sb_DM_dTrans_sqrt is not None:
df = fc.var_transform_sqrt(df, sb_DM_dTrans_sqrt)
sb_DM_dTrans_square = st.multiselect("Select columns for squaring ", transform_options, key = session_state.id)
if sb_DM_dTrans_square is not None:
df = fc.var_transform_square(df, sb_DM_dTrans_square)
sb_DM_dTrans_cent = st.multiselect("Select columns for centering ", transform_options, key = session_state.id)
if sb_DM_dTrans_cent is not None:
df = fc.var_transform_cent(df, sb_DM_dTrans_cent)
sb_DM_dTrans_stand = st.multiselect("Select columns for standardization ", transform_options, key = session_state.id)
if sb_DM_dTrans_stand is not None:
df = fc.var_transform_stand(df, sb_DM_dTrans_stand)
sb_DM_dTrans_norm = st.multiselect("Select columns for normalization ", transform_options, key = session_state.id)
if sb_DM_dTrans_norm is not None:
df = fc.var_transform_norm(df, sb_DM_dTrans_norm)
sb_DM_dTrans_numCat = st.multiselect("Select columns for numeric categorization ", numCat_options, key = session_state.id)
if sb_DM_dTrans_numCat:
if not df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist():
sb_DM_dTrans_numCat_sel = st.multiselect("Select variables for manual categorization ", sb_DM_dTrans_numCat, key = session_state.id)
if sb_DM_dTrans_numCat_sel:
for var in sb_DM_dTrans_numCat_sel:
if df[var].unique().size > 5:
st.error("ERROR: Selected variable has too many categories (>5): " + str(var))
return
else:
manual_cats = pd.DataFrame(index = range(0, df[var].unique().size), columns=["Value", "Cat"])
text = "Category for "
# Save manually selected categories
for i in range(0, df[var].unique().size):
text1 = text + str(var) + ": " + str(sorted(df[var].unique())[i])
man_cat = st.number_input(text1, value = 0, min_value=0, key = session_state.id)
manual_cats.loc[i]["Value"] = sorted(df[var].unique())[i]
manual_cats.loc[i]["Cat"] = man_cat
new_var_name = "numCat_" + var
new_var = pd.DataFrame(index = df.index, columns = [new_var_name])
for c in df[var].index:
if pd.isnull(df[var][c]) == True:
new_var.loc[c, new_var_name] = np.nan
elif pd.isnull(df[var][c]) == False:
new_var.loc[c, new_var_name] = int(manual_cats[manual_cats["Value"] == df[var][c]]["Cat"])
df[new_var_name] = new_var.astype('int64')
# Exclude columns with manual categorization from standard categorization
numCat_wo_manCat = [var for var in sb_DM_dTrans_numCat if var not in sb_DM_dTrans_numCat_sel]
df = fc.var_transform_numCat(df, numCat_wo_manCat)
else:
df = fc.var_transform_numCat(df, sb_DM_dTrans_numCat)
else:
col_with_na = df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist()
st.error("ERROR: Please select columns without NAs: " + ', '.join(map(str,col_with_na)))
return
else:
sb_DM_dTrans_numCat = None
sb_DM_dTrans_mult = st.number_input("Number of variable multiplications ", value = 0, min_value=0, key = session_state.id)
if sb_DM_dTrans_mult != 0:
multiplication_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_mult), columns=["Var1", "Var2"])
text = "Multiplication pair"
for i in range(0, sb_DM_dTrans_mult):
text1 = text + " " + str(i+1)
text2 = text + " " + str(i+1) + " "
mult_var1 = st.selectbox(text1, transform_options, key = session_state.id)
mult_var2 = st.selectbox(text2, transform_options, key = session_state.id)
multiplication_pairs.loc[i]["Var1"] = mult_var1
multiplication_pairs.loc[i]["Var2"] = mult_var2
fc.var_transform_mult(df, mult_var1, mult_var2)
sb_DM_dTrans_div = st.number_input("Number of variable divisions ", value = 0, min_value=0, key = session_state.id)
if sb_DM_dTrans_div != 0:
division_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_div), columns=["Var1", "Var2"])
text = "Division pair"
for i in range(0, sb_DM_dTrans_div):
text1 = text + " " + str(i+1) + " (numerator)"
text2 = text + " " + str(i+1) + " (denominator)"
div_var1 = st.selectbox(text1, transform_options, key = session_state.id)
div_var2 = st.selectbox(text2, transform_options, key = session_state.id)
division_pairs.loc[i]["Var1"] = div_var1
division_pairs.loc[i]["Var2"] = div_var2
fc.var_transform_div(df, div_var1, div_var2)
data_transform=st.checkbox("Transform data in Excel?", value=False)
if data_transform==True:
st.info("Press the button to open your data in Excel. Don't forget to save your result as a csv or a txt file!")
# Download link
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="data",index=False)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Data_transformation__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Transform your data in Excel</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# PROCESSING SUMMARY
if st.checkbox('Show a summary of my data processing preferences ', value = False, key = session_state.id):
st.markdown("Summary of data changes:")
#--------------------------------------------------------------------------------------
# DATA CLEANING
# Rows
if sb_DM_delRows is not None and delRows!='-' :
if no_delRows > 1:
st.write("-", no_delRows, " rows were deleted!")
elif no_delRows == 1:
st.write("-",no_delRows, " row was deleted!")
elif no_delRows == 0:
st.write("- No row was deleted!")
else:
st.write("- No row was deleted!")
if sb_DM_keepRows is not None and keepRows!='-' :
if no_keptRows > 1:
st.write("-", no_keptRows, " rows are kept!")
elif no_keptRows == 1:
st.write("-",no_keptRows, " row is kept!")
elif no_keptRows == 0:
st.write("- All rows are kept!")
else:
st.write("- All rows are kept!")
# Columns
if len(sb_DM_delCols) > 1:
st.write("-", len(sb_DM_delCols), " columns were deleted:", ', '.join(sb_DM_delCols))
elif len(sb_DM_delCols) == 1:
st.write("-",len(sb_DM_delCols), " column was deleted:", str(sb_DM_delCols[0]))
elif len(sb_DM_delCols) == 0:
st.write("- No column was deleted!")
if len(sb_DM_keepCols) > 1:
st.write("-", len(sb_DM_keepCols), " columns are kept:", ', '.join(sb_DM_keepCols))
elif len(sb_DM_keepCols) == 1:
st.write("-",len(sb_DM_keepCols), " column is kept:", str(sb_DM_keepCols[0]))
elif len(sb_DM_keepCols) == 0:
st.write("- All columns are kept!")
# Duplicates
if sb_DM_delDup == "Yes":
if n_rows_dup > 1:
st.write("-", n_rows_dup, " duplicate rows were deleted!")
elif n_rows_dup == 1:
st.write("-", n_rows_dup, "duplicate row was deleted!")
else:
st.write("- No duplicate row was deleted!")
# NAs
if sb_DM_delRows_wNA == "Yes":
if n_rows_wNAs > 1:
st.write("-", n_rows_wNAs, "rows with NAs were deleted!")
elif n_rows_wNAs == 1:
st.write("-", n_rows - n_rows_wNAs, "row with NAs was deleted!")
else:
st.write("- No row with NAs was deleted!")
# Filter
if filter_var != "-":
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if isinstance(filter_1, list):
if len(filter_1) == 0:
st.write("-", " Data was not filtered!")
elif len(filter_1) > 0:
st.write("-", " Data filtered by:", str(filter_var))
elif filter_1 is not None:
st.write("-", " Data filtered by:", str(filter_var))
else:
st.write("-", " Data was not filtered!")
elif len(filter_1)>0:
st.write("-", " Data filtered by:", str(filter_var))
elif len(filter_1) == 0:
st.write("-", " Data was not filtered!")
else:
st.write("-", " Data was not filtered!")
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.write("- Data imputation method for numeric variables:", sb_DM_dImp_num)
st.write("- Data imputation method for other variable types:", sb_DM_dImp_other)
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
# log
if len(sb_DM_dTrans_log) > 1:
st.write("-", len(sb_DM_dTrans_log), " columns were log-transformed:", ', '.join(sb_DM_dTrans_log))
elif len(sb_DM_dTrans_log) == 1:
st.write("-",len(sb_DM_dTrans_log), " column was log-transformed:", sb_DM_dTrans_log[0])
elif len(sb_DM_dTrans_log) == 0:
st.write("- No column was log-transformed!")
# sqrt
if len(sb_DM_dTrans_sqrt) > 1:
st.write("-", len(sb_DM_dTrans_sqrt), " columns were sqrt-transformed:", ', '.join(sb_DM_dTrans_sqrt))
elif len(sb_DM_dTrans_sqrt) == 1:
st.write("-",len(sb_DM_dTrans_sqrt), " column was sqrt-transformed:", sb_DM_dTrans_sqrt[0])
elif len(sb_DM_dTrans_sqrt) == 0:
st.write("- No column was sqrt-transformed!")
# square
if len(sb_DM_dTrans_square) > 1:
st.write("-", len(sb_DM_dTrans_square), " columns were squared:", ', '.join(sb_DM_dTrans_square))
elif len(sb_DM_dTrans_square) == 1:
st.write("-",len(sb_DM_dTrans_square), " column was squared:", sb_DM_dTrans_square[0])
elif len(sb_DM_dTrans_square) == 0:
st.write("- No column was squared!")
# centering
if len(sb_DM_dTrans_cent) > 1:
st.write("-", len(sb_DM_dTrans_cent), " columns were centered:", ', '.join(sb_DM_dTrans_cent))
elif len(sb_DM_dTrans_cent) == 1:
st.write("-",len(sb_DM_dTrans_cent), " column was centered:", sb_DM_dTrans_cent[0])
elif len(sb_DM_dTrans_cent) == 0:
st.write("- No column was centered!")
# standardize
if len(sb_DM_dTrans_stand) > 1:
st.write("-", len(sb_DM_dTrans_stand), " columns were standardized:", ', '.join(sb_DM_dTrans_stand))
elif len(sb_DM_dTrans_stand) == 1:
st.write("-",len(sb_DM_dTrans_stand), " column was standardized:", sb_DM_dTrans_stand[0])
elif len(sb_DM_dTrans_stand) == 0:
st.write("- No column was standardized!")
# normalize
if len(sb_DM_dTrans_norm) > 1:
st.write("-", len(sb_DM_dTrans_norm), " columns were normalized:", ', '.join(sb_DM_dTrans_norm))
elif len(sb_DM_dTrans_norm) == 1:
st.write("-",len(sb_DM_dTrans_norm), " column was normalized:", sb_DM_dTrans_norm[0])
elif len(sb_DM_dTrans_norm) == 0:
st.write("- No column was normalized!")
# numeric category
if sb_DM_dTrans_numCat is not None:
if len(sb_DM_dTrans_numCat) > 1:
st.write("-", len(sb_DM_dTrans_numCat), " columns were transformed to numeric categories:", ', '.join(sb_DM_dTrans_numCat))
elif len(sb_DM_dTrans_numCat) == 1:
st.write("-",len(sb_DM_dTrans_numCat), " column was transformed to numeric categories:", sb_DM_dTrans_numCat[0])
elif sb_DM_dTrans_numCat is None:
st.write("- No column was transformed to numeric categories!")
# multiplication
if sb_DM_dTrans_mult != 0:
st.write("-", "Number of variable multiplications: ", sb_DM_dTrans_mult)
elif sb_DM_dTrans_mult == 0:
st.write("- No variables were multiplied!")
# division
if sb_DM_dTrans_div != 0:
st.write("-", "Number of variable divisions: ", sb_DM_dTrans_div)
elif sb_DM_dTrans_div == 0:
st.write("- No variables were divided!")
st.write("")
st.write("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# UPDATED DATA SUMMARY
# Show only if changes were made
if any(v for v in [sb_DM_delCols, sb_DM_dImp_num, sb_DM_dImp_other, sb_DM_dTrans_log, sb_DM_dTrans_sqrt, sb_DM_dTrans_square, sb_DM_dTrans_cent, sb_DM_dTrans_stand, sb_DM_dTrans_norm, sb_DM_dTrans_numCat ] if v is not None) or sb_DM_delDup == "Yes" or sb_DM_delRows_wNA == "Yes" or sb_DM_dTrans_mult != 0 or sb_DM_dTrans_div != 0 or filter_var != "-" or delRows!='-' or keepRows!='-' or len(sb_DM_keepCols) > 0:
dev_expander_dsPost = st.beta_expander("Explore cleaned and transformed data info and stats ", expanded = False)
with dev_expander_dsPost:
if df.shape[1] > 0 and df.shape[0] > 0:
# Show cleaned and transformed data & data info
df_summary_post = fc.data_summary(df)
if st.checkbox("Show cleaned and transformed data ", value = False, key = session_state.id):
n_rows_post = df.shape[0]
n_cols_post = df.shape[1]
st.dataframe(df)
st.write("Data shape: ", n_rows_post, "rows and ", n_cols_post, "columns")
# Download transformed data:
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="Clean. and transf. data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "CleanedTransfData__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned and transformed data</a>
""",
unsafe_allow_html=True)
st.write("")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl2 = st.checkbox("Show duplicates and NAs info (processed) ", value = False, key = session_state.id)
if check_nasAnddupl2:
index_c = []
for c in df.columns:
for r in df.index:
if pd.isnull(df[c][r]):
index_c.append(r)
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", len(pd.unique(sorted(index_c))))
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(sorted(index_c))))))
# Show cleaned and transformed variable info
if st.checkbox("Show cleaned and transformed variable info ", value = False, key = session_state.id):
st.write(df_summary_post["Variable types"])
# Show summary statistics (cleaned and transformed data)
if st.checkbox('Show summary statistics (cleaned and transformed data) ', value = False, key = session_state.id):
st.write(df_summary_post["ALL"].style.set_precision(user_precision))
# Download link
output = BytesIO()
excel_file = | pd.ExcelWriter(output, engine="xlsxwriter") | pandas.ExcelWriter |
# -*- coding: utf-8 -*-
"""
This module contains all the methods required to request the data from
a particular object, obtain it from the ESA NEOCC portal and parse it
to show it properly. The information of the object is shows in the
ESA NEOCC in different tabs that correspond to the different classes
within this module.
* Project: NEOCC portal Python interface
* Property: European Space Agency (ESA)
* Developed by: Elecnor Deimos
* Author: <NAME>
* Issue: 2.1.0
* Date: 01-03-2021
* Purpose: Module which request and parse list data from ESA NEOCC
* Module: tabs.py
* History:
======== =========== =====================================================
Version Date Change History
======== =========== =====================================================
1.0 26-02-2021 Initial version
1.1 26-03-2021 Physical properties and summary funcionalities added
1.2 17-05-2021 Adding *help* property for dataframes.\n
Parsing of diameter property in *summary* and
*physical_properties* has been modified to add
robustness.\n
In *physical_properties* the parsing of properties
has been modified to include cases with more
information.\n
Adding timeout of 90 seconds.
1.3 16-06-2021 URLs and timeout from configuration file for
astroquery implementation.\n
Change time format to datetime ISO format.\n
Change to correct types in attributes (e.g.,
matrices, etc.)\n
Change ephemerides skyfooter to fix bug.\n
Change *get_matrix* from *orbit_properties* for
objects with 2 non-gravitational parameters.
1.3.1 29-06-2021 No changes
1.4.0 29-10-2021 Tab physical_properties has been recoded to parse the
information through a request in the portal instead
of parsing the html.\n
Get URL function now contains the file extension for
physical properties.\n
Parsing of ephemerides has been change to adapt new
format.\n
Orb_type attribute added in tab *orbit_properties*.\n
Bug fix in tab *observations*.\n
Adding redundancy for tab *summary* parsing.
2.0.0 21-01-2022 Prepare module for Astroquery integration
2.1.0 01-03-2022 Remove *parse* dependency
======== =========== =====================================================
© Copyright [European Space Agency][2022]
All rights reserved
"""
import io
import logging
import time
import re
from datetime import datetime, timedelta
import pandas as pd
import requests
from bs4 import BeautifulSoup
from astroquery.esa.neocc import conf
# Import URLs and TIMEOUT
API_URL = conf.API_URL
EPHEM_URL = conf.EPHEM_URL
SUMMARY_URL = conf.SUMMARY_URL
TIMEOUT = conf.TIMEOUT
VERIFICATION = conf.SSL_CERT_VERIFICATION
def get_object_url(name, tab, **kwargs):
"""Get url from requested object and tab name.
Parameters
----------
name : str
Name of the requested object.
tab : str
Name of the request tab. Valid names are: *summary,
orbit_properties, physical_properties, observations,
ephemerides, close_approaches and impacts*.
**kwargs : str
orbit_properties and ephemerides tabs required additional
arguments to work:
* *orbit_properties*: the required additional arguments are:
* *orbital_elements* : str (keplerian or equinoctial)
* *orbit_epoch* : str (present or middle)
* *ephemerides*: the required additional arguments are:
* *observatory* : str (observatory code, e.g. '500', 'J04', etc.)
* *start* : str (start date in YYYY-MM-DD HH:MM)
* *stop* : str (end date in YYYY-MM-DD HH:MM)
* *step* : str (time step, e.g. '2', '15', etc.)
* *step_unit* : str (e.g. 'days', 'minutes', etc.)
Returns
-------
url : string
Final url from which data is requested.
Raises
------
KeyError
If the requested tab is not in the dictionary.
ValueError
If the elements requested are not valid.
"""
# Define the parameters of each list
tab_dict = {"impacts": '.risk',
"close_approaches": '.clolin',
"physical_properties" : '.phypro',
"observations": '.rwo',
"orbit_properties": ['.ke0', '.ke1', '.eq0', '.eq1']}
# Raise error is input is not in dictionary
if tab not in tab_dict:
raise KeyError('Valid list names are impacts, close_approaches'
' observations and orbit_properties')
# Check if orbital_elements is an input
if 'orbital_elements' in kwargs:
# Check if the elements are Keplerian or Equinoctial
if kwargs['orbital_elements'] == "keplerian":
#Check if the epoch is present day or middle obs. arch
if kwargs['orbit_epoch'] == "present":
url = str(name).replace(' ', '%20') + tab_dict[tab][1]
elif kwargs['orbit_epoch'] == "middle":
url = str(name).replace(' ', '%20') + tab_dict[tab][0]
elif kwargs['orbital_elements'] == "equinoctial":
if kwargs['orbit_epoch'] == "present":
url = str(name).replace(' ', '%20') + tab_dict[tab][3]
elif kwargs['orbit_epoch'] == "middle":
url = str(name).replace(' ', '%20') + tab_dict[tab][2]
else:
raise ValueError('The introduced file type does not exist.'
'Check that orbit elements (keplerian or '
'equinoctial) and orbit epoch (present or '
'middle).')
else:
url = str(name).replace(' ', '%20') + tab_dict[tab]
return url
def get_object_data(url):
"""Get object in byte format from requested url.
Parameters
----------
url : str
URL of the requested data.
Returns
-------
data_obj : object
Object in byte format.
"""
# Get data from URL
data_obj = requests.get(API_URL + url, timeout=TIMEOUT,
verify=VERIFICATION).content
# Parse data and assign attributes to object
return data_obj
def get_indexes(dfobj, value):
"""Get a list with location index of a value or string in the
DataFrame requested.
Parameters
----------
dfobj : pandas.DataFrame
Data frame where the value will be searched.
value : str, int, float
String, integer or float to be searched.
Returns
-------
listofpos : list
List which contains the location of the value in the Data
frame. The first elements will correspond to the index and
the second element to the columns
"""
# Empty list
listofpos = []
# isin() method will return a dataframe with boolean values,
# True at the positions where element exists
result = dfobj.isin([value])
# any() method will return a boolean series
seriesobj = result.any()
# Get list of column names where element exists
columnnames = list(seriesobj[seriesobj].index)
# Iterate over the list of columns and extract the row index
# where element exists
for col in columnnames:
rows = list(result[col][result[col]].index)
for row in rows:
listofpos.append((row, col))
return listofpos
class Impacts:
"""This class contains information of object possible impacts.
Attributes
---------
impacts : pandas.DataFrame
Data frame where are listed all the possible impactors.
arc_start : str
Starting date for optical observations.
arc_end : str
End date for optical observations.
observation_accepted : int
Total number of observations subtracting rejected
observations.
observation_rejected : int
Number of observations rejected.
computation : str
Date of computation (in format YYYYMMDD MJD TimeSys)
info : str
Information from the footer of the requested file.
additional_note : str
Additional information. Some objects (e.g. 99942 Apophis)
have an additional note after the main footer.
"""
def __init__(self):
"""Initialization of class attributes
"""
self.impacts = []
self.arc_start = []
self.arc_end = []
self.observation_accepted = []
self.observation_rejected = []
self.computation = []
self.info = []
self.additional_note = []
@staticmethod
def _get_footer(data_obj):
"""Get footer information for impacts content.
Parameters
----------
data_obj : object
Object in byte format.
Returns
-------
obs : list
Number of observations (total and rejected).
arc : list
Start and end dates.
comp : str
Computation date.
info : str
Additional information.
add_note : str
Addition note.
"""
# Decode data using UTF-8 and store in new space of memory
df_txt_d = io.StringIO(data_obj.decode('utf-8'))
# Read data as txt
df_txt = pd.read_fwf(df_txt_d, header=None)
# Check that there is not additonal note
index = get_indexes(df_txt, '<p> </p>')
# Assign the index for obtaining the rest of attributes and
# additional note value
if not index:
j = 0
add_note = 'There is no additional note for this object'
else:
j = 6
index = index[0][0]
add_note = df_txt.iloc[index+1, 0] + '\n' +\
df_txt.iloc[index+2, 0] + '\n' +\
df_txt.iloc[index+3, 0] + '\n' +\
df_txt.iloc[index+4, 0] + '\n' +\
df_txt.iloc[index+5, 0]
# Remove unnecessary words
add_note = add_note.replace('<p>','').replace('</p>','').\
replace('<span style="color: #0000CD;"><strong>','').\
replace('</strong></span>','').replace('<sup>','^').\
replace('</sup>','')
# Drop NaN values if necessary
df_txt = df_txt.dropna(how='all')
# Template for observations data:
# Based on {total} optical observations (of which {rejected}
# are rejected as outliers)
obs_total = df_txt.iloc[-7-j][0].split('on ')[1].\
split('optical')[0].strip()
obs_rejected = df_txt.iloc[-7-j][0].split('which ')[1].\
split('are')[0].strip()
obs = [obs_total, obs_rejected]
# Template for date of observations: from {start} to {end}.
arc_start = df_txt.iloc[-6-j][0].split('from ')[1].\
split('to ')[0].strip()
arc_end = df_txt.iloc[-6-j][0].split('to ')[1].\
split('.')[0] + '.' + df_txt.iloc[-6-j][0].\
split('to ')[1].split('.')[1]
arc = [arc_start, arc_end]
# Computation date
comp = df_txt.iloc[-1-j][0].split('=')[2].strip()
# Get information text
info = df_txt.iloc[-5-j][0] + '\n\n' + df_txt.iloc[-4-j][0] +\
'\n' + df_txt.iloc[-3-j][0] + '\n\n' + df_txt.iloc[-2-j][0]
return obs, arc, comp, info, add_note
def _impacts_parser(self, data_obj):
"""Parse and arrange the possible impacts data
Parameters
----------
data_obj : object
Object in byte format.
Raises
------
ValueError
If there is not risk file available for requested
object
"""
# Check that there is not additonal note
df_check_d = io.StringIO(data_obj.decode('utf-8'))
# Read as txt file
df_check = pd.read_fwf(df_check_d, engine='python')
index = get_indexes(df_check, '<p> </p>')
# Assign the skipfooter if there is or not additional note
if not index:
footer_num = 12
else:
footer_num = 21
# Decode data using UTF-8 and store in memory
df_impacts_d = io.StringIO(data_obj.decode('utf-8'))
# Read data as csv
df_impacts = pd.read_csv(df_impacts_d, skiprows=[0, 2, 3, 4],
skipfooter=footer_num,
delim_whitespace=True, engine='python')
# Check if there are information for the object
if len(df_impacts.index) == 0:
logging.warning('Required risk file is not '
'available for this object')
raise ValueError('Required risk file is not '
'available for this object')
# The previous skipfooter allow strange cases to show proper
# impacts table. For the rest of the cases an additional row
# must be dropped
if df_impacts.iloc[-1,0] == 'Based':
# Drop last row
df_impacts = df_impacts.iloc[:-1]
# Reassign numeric types to columns
df_impacts['MJD'] = pd.to_numeric(df_impacts['MJD'])
df_impacts['sigimp'] = pd.to_numeric(df_impacts['sigimp'])
df_impacts['dist'] = pd.to_numeric(df_impacts['dist'])
df_impacts['width'] = pd.to_numeric(df_impacts['width'])
df_impacts['p_RE'] = pd.to_numeric(df_impacts['p_RE'])
df_impacts['exp.'] = pd.to_numeric(df_impacts['exp.'])
df_impacts['en.'] = pd.to_numeric(df_impacts['en.'])
df_impacts['PS'] = pd.to_numeric(df_impacts['PS'])
# Convert Date column to datetime format
# Create auxilary columns
df_impacts[['date1','date2']] = df_impacts['date']\
.str.split(".",expand=True)
# Convert each auxiliary column to datetime format and add
df_impacts['date'] = pd.to_datetime(df_impacts['date1'],
format='%Y/%m/%d') +\
(df_impacts['date2'].astype(float)/1e3)\
.map(timedelta)
# Remove auxiliary columns
df_impacts = df_impacts.drop(['date1','date2'], axis=1)
# Add number of decimals
df_impacts['dist'].map(lambda x: f"{x:.2f}")
df_impacts['width'].map(lambda x: f"{x:.3f}")
# Rename new column and drop duplicate columns
col_dict = {'exp.': 'Exp. Energy in MT',
'en.': 'PS',
'PS': 'TS'}
df_impacts = df_impacts.drop(columns=['+/-',
'TS']).rename(
columns=col_dict)
# Adding help to impacts Dataframe
df_impacts.help = ('Data frame with possible impacts '
'information:\n'
'-Date: date for the potential impact in '
'datetime format\n'
'-MJD: Modified Julian Day for the '
'potential impact\n'
'-sigma: approximate location along the '
'Line Of Variation (LOV) in sigma space\n'
'-sigimp: The lateral distance in '
'sigma-space from the LOV to the Earth '
'surface. A zero implies that the LOV '
'passes through the Earth'
'-dist: Minimum Distance in Earth radii. '
'The lateral distance from the LOV to the '
'center of the Earth\n'
'-width: one-sigma semi-width of the '
'Target Plane confidence region in Earth '
'radii\n'
'-stretch: Stretching factor. '
'It indicates how much the '
'confidence region at the epoch has been '
'stretched by the time of the approach. This is '
'a close cousin of the Lyapounov exponent. '
'Units are in Earth radii divided by sigma '
'(RE/sig)\n'
'-p_RE: probability of Earth Impact (IP)\n'
'-Exp. Energy in MT: Expected energy. It is '
'the product of the impact energy and the '
'impact probability\n'
'-PS: Palermo Scale\n'
'-TS: Torino Scale')
# Assign Data structure to attribute
self.impacts = df_impacts
# Get info from footer
footer = self._get_footer(data_obj)
# Assign parsed data to attributes
# Change format to datetime and show in isoformat()
arc_start = footer[1][0].split('.')
arc_start = datetime.strptime(arc_start[0], '%Y/%m/%d') +\
timedelta(float(arc_start[1])/1e3)
self.arc_start = arc_start.isoformat()
# Change format to datetime and show in isoformat()
arc_end = footer[1][1].split('.')
arc_end = datetime.strptime(arc_end[0], '%Y/%m/%d') +\
timedelta(float(arc_end[1])/1e3)
self.arc_end = arc_end.isoformat()
self.observation_accepted = int(footer[0][0]) - \
int(footer[0][1])
self.observation_rejected = int(footer[0][1])
self.computation = footer[2]
self.additional_note = footer[4]
# Assign info text from pandas
self.info = footer[3]
class CloseApproaches:
"""This class contains information of object close approaches.
"""
@staticmethod
def clo_appr_parser(data_obj):
"""Parse and arrange the close approaches data.
Parameters
----------
data_obj : object
Object in byte format.
Returns
-------
df_close_appr : pandas.DataFrame
Data frame with the close approaches information.
Raises
------
ValueError
If file is empty.
"""
# Decode data using UTF-8 and store in memory
df_impacts_d = io.StringIO(data_obj.decode('utf-8'))
# Check if the decoded data is empty before reading
if not df_impacts_d.getvalue():
df_close_appr = pd.DataFrame()
else:
# Read data as csv
df_close_appr = pd.read_csv(df_impacts_d,
delim_whitespace=True)
# Convert Date column to datetime format
# Create auxilary columns
df_close_appr[['date1','date2']] =\
df_close_appr['CALENDAR-TIME'].str.split(".",
expand=True)
# Convert each auxiliary column to datetime format and add
df_close_appr['CALENDAR-TIME'] =\
pd.to_datetime(df_close_appr['date1'],
format='%Y/%m/%d') +\
(df_close_appr['date2'].\
astype(float)/1e5).map(timedelta)
# Remove auxiliary columns
df_close_appr = df_close_appr.drop(['date1','date2'], axis=1)
# Create help attribute
df_close_appr.help = ('Close approaches data frame contains:\n'
'-BODY: planet or massive asteroid is '
'involved in the close approach\n'
'-CALENDAR-TIME: date of the close '
'approach in datetime format\n'
'-MJD-TIME: Modified Julian Date of the'
'approach\n'
'-TIME-UNCERT.: time uncertainty in '
'MJD2000\n'
'-NOM.-DISTANCE: Nominal distance at '
'the close approach in au\n'
'-MIN.-POSS.-DIST.: Minimum possible '
'distance at the close approach in au\n'
'-DIST.-UNCERT.: distance uncertainty in '
'au\n'
'-STRETCH: It indicates how much the '
'confidence region at the epoch has '
'been stretched by the time of the '
'approach. This is a close cousin of '
'the Lyapounov exponent. Units in au\n'
'-WIDTH: width of the stretching\n'
'-PROBABITLIY: Close approach '
'probability. A value of 1 indicates a '
'certain close approach')
return df_close_appr
class PhysicalProperties:
"""
This class contains information of asteroid physical properties
Attributes
---------
physical_properties : DataFrame
Data structure containing property, value, units and source
from the complete set of physical properties
sources : DataFrame
Data structure containing source number, name and
additional information
Raises
------
ValueError
If the name of the object is not found
"""
def __init__(self):
"""
Initialization of class attributes
"""
# Physical properties
self.physical_properties = []
# Sources
self.sources = []
@staticmethod
def _get_prop_sources(data_obj_d, rows):
"""
Obtain the sources parsed
Parameters
----------
data_obj_d : object
Object in byte format decoded.
rows : int
Index where references start
Returns
-------
sources : Data structure
Data structure containing all property sources
"""
# Read as csv to allow delimiter. Since pandas not allow ","
# as delimiter (error in parsing columns)
sources= pd.read_csv(data_obj_d, header=None, skiprows=rows+2,
engine='python', delimiter='],')
# Add the erased bracket
sources[0] = sources[0]+']'
# Split first column with the first found commar
sources[[1, 2]] = sources[1].str.split(r',', 1, expand=True)
# Replace whitespace as blank spaces for better reading
sources[2] = sources[2].str.replace(r"\s(?=\d\.\))", r"\n",
regex=True)
# Name columns as the portal
sources.columns = ['No.', 'Name', 'Additional']
return sources
def _phys_prop_parser(self, data_obj):
"""
Parse and arrange the physical properties data
Parameters
----------
data_obj : object
Object in byte format.
Raises
------
ValueError
If the name of the object is not encountered
"""
# Check that there is not additonal note
df_check_d = io.StringIO(data_obj.decode('utf-8'))
if not df_check_d.getvalue():
raise ValueError('Object not found or misspelt')
# Read as txt file
df_check = pd.read_fwf(df_check_d, header=None,
engine='python')
# Get index where references start and count rows
ref_index = get_indexes(df_check, "REFERENCES")
n_rows = ref_index[0][0]
# Set reference point at the begging of the file
df_check_d.seek(0)
df_check = pd.read_fwf(df_check_d, header=None,
engine='python', nrows=n_rows)
# Held exception for Taxonomy (all) property
# Split the comple DF to obtain the columns
df_check = df_check[0].str.split(',', expand=True)
# Initialize index
index = 0
if len(df_check.columns) > 4:
# rest = len(df_check.columns) - 4
# Iterate over each element in last col to find
# rows with additional elements separated by commas
for element in df_check.iloc[:, -1]:
if isinstance(element, str):
for i in range(2, len(df_check.columns)-2):
df_check.iloc[index, 1] =\
df_check.iloc[index, 1] + ',' +\
df_check.iloc[index, i]
df_check.iloc[index,2] = df_check.iloc[index,
len(df_check.columns)-2]
df_check.iloc[index,3] = df_check.iloc[index,
len(df_check.columns)-1]
index += 1
# Only get the main columns
df_check = df_check.iloc[:, 0:4]
# Set reference point at the begging of the file
df_check_d.seek(0)
# Read as csv for parsing
ast_prop = pd.read_csv(df_check_d, header=None, skiprows=0,
nrows=n_rows, delimiter=',',
on_bad_lines='skip')
# Create if condition for the exception since Taxonomy (all)
# will be skipped. The DF is updated with the values of the
# redundant one
if not 'Taxonomy (all)' in ast_prop.values:
ast_prop.update(df_check)
elif not 'Quality' in ast_prop.values:
ast_prop.update(df_check)
# Rename columns
ast_prop.columns = ['Property', 'Value(s)', 'Units',
'Reference(s)']
# Group identical properties into one. The values from the
# other columns will be group using commas (as a list)
phys_prop = ast_prop.groupby(['Property'], as_index=False,
sort=False)[['Value(s)', 'Units',
'Reference(s)']]\
.agg(','.join)
# Split using commas to create arrays
phys_prop['Value(s)'] = phys_prop['Value(s)']\
.apply(lambda x: x.split(',')
if isinstance(x, str) and ',' in x else x)
phys_prop['Units'] = phys_prop['Units']\
.apply(lambda x: x.split(',')
if isinstance(x, str) and ',' in x else x)
phys_prop['Reference(s)'] = phys_prop['Reference(s)']\
.apply(lambda x: x.split(',')
if isinstance(x, str) and ',' in x else x)
# Values need to be converted to numeric type when possible
phys_prop['Value(s)'] = phys_prop['Value(s)']\
.apply(lambda x: pd.to_numeric(x,
errors='ignore', downcast='float'))
# Properties
self.physical_properties = phys_prop
# Sources
# Set reference point at the begging of the file
df_check_d.seek(0)
self.sources = self._get_prop_sources(df_check_d, n_rows)
class AsteroidObservations:
"""This class contains information of asteroid observations.
Attributes
----------
version : float
File version.
errmod : str
Error model for the data.
rmsast : float
Root Mean Square for asteroid observations.
rmsmag : float
Root Mean Square for magnitude.
optical_observations : pandas.DataFrame
Data frame which contains optical observations (without roving
observer and satellite observation).
radar_observations : pandas.DataFrame
Data structure which contains radar observations.
roving_observations : pandas.DataFrame
Data structure which contains "roving observer" observations.
sat_observations : pandas.DataFrame
Data structure which contains satellite observations.
"""
def __init__(self):
"""Initialization of class attributes
"""
self.version = []
self.errmod = []
self.rmsast = []
self.rmsmag = []
self.optical_observations = []
self.radar_observations = []
self.roving_observations = []
self.sat_observations = []
@staticmethod
def _get_head_obs(df_d):
"""Get and parse header of asteroid observations file.
Parameters
----------
df_d : object
StringIO object with data.
Returns
-------
ver : int
File version.
err : str
Error model.
ast : float
Root Mean Square for asteroid observations.
mag : float
Root Mean Square for magnitude.
"""
df_head = pd.read_csv(df_d, nrows=4, header=None)
# Template for version: version = {ver}
ver = df_head.iloc[0][0].split('=')[1].strip()
ver = float(ver)
# Template for errmod: errmod = '{err}'
err = df_head.iloc[1][0].split("'")[1].strip()
# Template for RMSast: RMSast = {ast}
ast = df_head.iloc[2][0].split('=')[1].strip()
ast = float(ast)
# Template for RMSast: RMSmag = {mag}
mag = df_head.iloc[3][0]
if mag == 'END_OF_HEADER':
mag = 'No data for RMSmag'
else:
mag = float(df_head.iloc[3][0].split('=')[1].strip())
return ver, err, ast, mag
@staticmethod
def _get_opt_info(data_obj, diff, head, cols_sep):
"""Get optical information from asteroid observation file.
Parameters
----------
df_d : object
Object in byte format with data decoded.
diff : int
Optical observations data frame length.
head : list
Header rows to be skipped.
Returns
-------
df_optical_obs : pandas.DataFrame
Parsed data frame for optical observations.
df_roving_obs : pandas.DataFrame
Parsed data frame for "roving observer" observations.
df_sat_obs : pandas.DataFrame
Parsed data frame for satellite observations.
"""
# Decode data for check v and s optical observations
df_check = io.StringIO(data_obj.decode('utf-8'))
# Set attributes
df_obs = pd.read_fwf(df_check, colspecs=cols_sep, skiprows=head,
engine='python', skipfooter=diff)
# Check if there are "Roving Observer" observations
df_index = df_obs.iloc[:,1:4]
v_indexes = get_indexes(df_index, 'v')
# Check if there are "Roving Observer" observations
s_index = get_indexes(df_index, 's')
# Remove indexes located in 'N' column
s_indexes = []
for indexes in s_index:
if 'T' in indexes:
s_indexes = s_indexes + [indexes]
# Initialization of a list which contain the row indexes
v_rows = []
s_rows = []
# Roving Observations
if not v_indexes:
df_roving_obs = 'There are no "Roving Observer" '\
'observations for this object'
else:
# Save a list with the row indexes that needs to be saved
for v_index in v_indexes:
# Add to the list in order to consider header lines
v_rows = v_rows + [v_index[0]+len(head)+1]
# Decode data for final roving observations
df_v = io.StringIO(data_obj.decode('utf-8'))
# Define colspecs fwf
cols_v = [(0,10), (11,12), (12,14), (15,16), (17,21),
(22,24), (25,34), (34,44), (45,55), (56,64),
(65,68)]
# Usea pandas to read these rows
df_roving_obs = pd.read_fwf(df_v, delim_whitespace=True,
skiprows=lambda x: x not in v_rows,
engine='python', header=None,
colspecs=cols_v)
# Rename columns as in file
df_roving_obs.columns = ['Design.', 'K', 'T', 'N', 'Date',
'MM', 'DD.dddddd', 'E longitude',
'Latitude', 'Altitude', 'Obs Code']
# Create and Convert Date column to datetime format
# Create date column in YYYY-MM format
df_roving_obs['Date'] = df_roving_obs['Date'].astype(str) +\
'/' + df_roving_obs['MM'].astype(str)
# Convert to datetime and add timedelta for days substracting
# the day added in the datetime conversion for YYYY-MM
df_roving_obs['Date'] = pd.to_datetime(df_roving_obs['Date'],
format='%Y/%m') +\
df_roving_obs['DD.dddddd']\
.map(timedelta)-timedelta(days=1)
# Remove columns for months and days
df_roving_obs = df_roving_obs.drop(['MM','DD.dddddd'], axis=1)
# Satellite Observations
if not s_indexes:
df_sat_obs = 'There are no Satellite observations for '\
'this object'
else:
# Save a list with the row indexes that needs to be saved
for s_index in s_indexes:
# Number 7 is add to the list in order to consider
# header lines
s_rows = s_rows + [s_index[0]+len(head)+1]
# Decode data for final satellite observations
df_s = io.StringIO(data_obj.decode('utf-8'))
# Define colspecs fwf
cols_s = [(0,10), (11,12), (12,15), (15,16), (17,21),
(22,24), (25,34), (34,35), (40,59), (64,83),
(88,107), (108,111)]
# Usea pandas to read these rows
df_sat_obs = pd.read_fwf(df_s, delim_whitespace=True,
skiprows=lambda x: x not in s_rows,
engine='python', header=None,
colspecs=cols_s)
# Rename columns as in file
df_sat_obs.columns = ['Design.', 'K', 'T', 'N', 'Date',
'MM', 'DD.dddddd',
'Parallax info.', 'X', 'Y',
'Z', 'Obs Code']
# Create and Convert Date column to datetime format
# Create date column in YYYY-MM format
df_sat_obs['Date'] = df_sat_obs['Date'].astype(str) +\
'/' + df_sat_obs['MM'].astype(str)
# Convert to datetime and add timedelta for days substracting
# the day added in the datetime conversion for YYYY-MM
df_sat_obs['Date'] = pd.to_datetime(df_sat_obs['Date'],
format='%Y/%m') +\
df_sat_obs['DD.dddddd']\
.map(timedelta)-timedelta(days=1)
# Remove columns for months and days
df_sat_obs = df_sat_obs.drop(['MM','DD.dddddd'], axis=1)
# For satellite observations columns "T" contains
# whitespacese. Strip them
df_sat_obs['T'] = df_sat_obs['T'].str.strip()
# Rest of optical observations
df_opt = io.StringIO(data_obj.decode('utf-8'))
# Read data using pandas as text, skiping header and footer
# and v_rows and s_rows if any
df_optical_obs = pd.read_fwf(df_opt, skipfooter=diff,
colspecs=cols_sep,
engine='python',
skiprows=head + v_rows + s_rows)
# Replace NaN values for blank values
df_optical_obs = df_optical_obs.fillna('')
# Rename Columns as in file
df_optical_obs.columns = ['Design.', 'K', 'T', 'N', 'Date',
'MM', 'DD.ddd',
'Date Accuracy', 'RA HH',
'RA MM', 'RA SS.sss', 'RA Accuracy',
'RA RMS', 'RA F', 'RA Bias',
'RA Resid', 'DEC sDD', 'DEC MM',
'DEC SS.ss', 'DEC Accuracy',
'DEC RMS', 'DEC F', 'DEC Bias',
'DEC Resid', 'MAG Val', 'MAG B',
'MAG RMS', 'MAG Resid', 'Ast Cat',
'Obs Code', 'Chi', 'A', 'M']
# Create and Convert Date column to datetime format
# Create date column in YYYY-MM format
df_optical_obs['Date'] = df_optical_obs['Date'].astype(str) +\
'/' + df_optical_obs['MM'].astype(str)
# Convert to datetime and add timedelta for days substracting
# the day added in the datetime conversion for YYYY-MM
df_optical_obs['Date'] = pd.to_datetime(df_optical_obs['Date'],
format='%Y/%m') +\
df_optical_obs['DD.ddd']\
.map(timedelta)-timedelta(days=1)
# Remove columns for months and days
df_optical_obs = df_optical_obs.drop(['MM','DD.ddd'], axis=1)
# Create help attribute for dataframe
df_optical_obs.help = ('This dataframe shows the information of '
'optical observations. The fields are:\n'
'-Designation: number or the provisional '
'designation of the asteroid.\n'
'-K, Type: observation type and technology'
' provided by the MPC. Note that for '
'satellite (s) and roving (v) observations'
'there are 2 additional dataframes which '
'contain the information given by the MPC.\n'
'-Date: date in UTC iso format.\n'
'-Right Ascension: The data provided include'
' the observation, the a priori accuracy (as'
' supplied by the MPC), the a priori RMS '
'used for weighing, a flag indicating a '
'forced weight, the bias, and the residuals '
'in arcsec.\n'
'-Declination: same format as Right '
'Ascension.\n'
'-Apparent magnitude: The columns contain '
'the apparent magnitude as reported, the a '
'priori RMS used for weighing, and the '
'residual, all in magnitudes.\n'
'-Quality: observatory code is extracted from'
' the MPC published observation, the value of'
' chi from the chi**2 test (characterization '
'of the relative quality of the observation).'
' The "Used A" column is "Yes" if the '
'observation is used in our orbit, and "No" '
'if it has been discarded. The same for the '
'photometry in the "Used M" column.')
return df_optical_obs, df_roving_obs, df_sat_obs
@staticmethod
def _get_rad_info(df_d, index):
"""Get radar information from asteroid observations file
Parameters
----------
df_d : object
stringIO object with data decoded.
index : int
Position at which radar information starts.
Returns
-------
df_rad : pandas.DataFrame
Parsed data frame for radar observations.
"""
# Read decoded DataFrame and skip rows
df_rad = pd.read_fwf(df_d, engine='python', sep=' ',
skiprows=index[0][0]+8)
# Drop NaN columns and rename
df_rad = df_rad.drop(['F', 'S'], axis=1)
# Create Datetime column
df_rad['YYYY'] = df_rad['YYYY'].apply(str) + '-' + \
df_rad['MM'].apply(str) + '-' + df_rad['DD'].apply(str) + \
'-' + df_rad['hh:mm:ss']
df_rad['YYYY'] = pd.to_datetime(df_rad['YYYY'])
# Dropping old name columns
df_rad = df_rad.drop(['MM', 'DD', 'hh:mm:ss'], axis=1)
# Check variable column data
if 'rms' in df_rad.columns:
# Rename columns
cols_dict = {'! Design': 'Design',
'Unnamed: 11': 'F',
'Unnamed: 17': 'S',
'YYYY': 'Datetime'}
df_rad.rename(columns=cols_dict, inplace=True)
else:
# Rename columns
cols_dict = {'! Design': 'Design',
'Unnamed: 10': 'F',
'Unnamed: 16': 'S',
'YYYY': 'Datetime'}
df_rad.rename(columns=cols_dict, inplace=True)
# Splitting bad joined columns
split1 = df_rad["Accuracy rms"].str.split(" ", n=1,
expand=True)
df_rad["Accuracy"] = split1[0]
df_rad["rms"] = split1[1]
# Dropping old Name columns
df_rad.drop(columns=["Accuracy rms"], inplace=True)
# Splitting bad joined columns
split2 = df_rad["TRX RCX"].str.split(" ", n=1, expand=True)
# Making separate first name column from new Data structure
df_rad["TRX"] = split2[0]
# Making separate last name column from new Data structure
df_rad["RCX"] = split2[1]
# Dropping old Name columns
df_rad.drop(columns=["TRX RCX"], inplace=True)
# Reorder columns
df_rad = df_rad[['Design', 'K', 'T', 'Datetime', 'Measure',
'Accuracy', 'rms', 'F', 'Bias', 'Resid',
'TRX', 'RCX', 'Chi', 'S']]
df_rad.help = ('This dataframe contains the information for '
'radar observations:\n'
'-Designation: number or the provisional '
'designation of the asteroid.\n'
'-K, Type: observation type and technology'
'provided by the MPC. A "c" indicates the '
'radar observation is referenced to the '
'asteroid center of mass, and an "s" indicates '
'the measurement is referenced to the radar '
'bounce point.\n'
'-Datetime: date in UTC format.\n'
'-Radar range or range rate: refers to columns '
'measure (km or km/day), accuracy (precision of'
'the measurement), rms, F, bias and Resid.\n'
'-Quality: transmit (TRX) and receive (RCX) '
'station are given. When these differ, an '
'observation is considered as belonging to '
'the receiver station. the value of'
' chi from the chi**2 test (characterization '
'of the relative quality of the observation).'
'The "S" column is "Yes" if the '
'observation is used in our orbit, and "No" '
'if it has been discarded.')
return df_rad
def _ast_obs_parser(self, data_obj):
"""Get asteroid observation properties parsed from object data
Parameters
----------
data_obj : object
Object in byte format.
Raises
------
ValueError
If the required observations file is empty or does not exist
"""
# Decode data using UTF-8 and store in memory for header
df_head_d = io.StringIO(data_obj.decode('utf-8'))
# Check file exists or is not empty
if not df_head_d.getvalue():
logging.warning('Required data observations file is '
'empty for this object')
raise ValueError('Required data observations file is '
'empty for this object')
# Obtain header
df_head = self._get_head_obs(df_head_d)
self.version = df_head[0]
self.errmod = df_head[1]
self.rmsast = df_head[2]
self.rmsmag = df_head[3]
# Decode data using UTF-8 and store in memory for
# observations
df_d = io.StringIO(data_obj.decode('utf-8'))
# Check there is valid data for RMS magnitude and set header
# length
if isinstance(self.rmsmag, str):
head = [0, 1, 2, 3, 4]
else:
head = [0, 1, 2, 3, 4, 5]
# Read data in fixed width format
cols = [(0,10), (11,12), (12,15), (15,16), (17,21), (22,24),
(25,38), (40,49), (50,52), (53,55), (56,62), (64,73),
(76,82), (83,84), (87,93), (96,102), (103,106),
(107,109), (110,115), (117,126), (129,135), (136,137),
(140,146), (149,155), (156,161), (161,162), (164,168),
(170,175), (177,179), (180,183), (188,193), (194,195),
(196,197)]
df_p = pd.read_fwf(df_d, colspecs=cols,
skiprows=head, engine='python')
# Check if there is radar observations data
if not get_indexes(df_p, '! Object'):
# Set length of asteriod observations to zero
diff = 0
# Get observations
total_observations = self._get_opt_info(data_obj, diff,
head, cols)
# Set attributes
self.optical_observations = total_observations[0]
self.radar_observations = 'There is no relevant radar '\
'information'
self.roving_observations = total_observations[1]
self.sat_observations = total_observations[2]
else:
# # Decode data for optical and radar observations
df_rad = io.StringIO(data_obj.decode('utf-8'))
# Get position at which radar observations start
index = get_indexes(df_p, '! Object')
# Set lenght of radar obsrevations to remove footer
diff = len(df_p) - index[0][0]
# Get observations
total_observations = self._get_opt_info(data_obj, diff,
head, cols)
# Set attributes
self.optical_observations = total_observations[0]
self.radar_observations = self._get_rad_info(df_rad, index)
self.roving_observations = total_observations[1]
self.sat_observations = total_observations[2]
class OrbitProperties:
"""This class contains information of asteroid orbit properties.
Attributes
----------
form : str
File format.
rectype : str
Record type.
refsys : str
Default reference system.
epoch : str
Epoch in MJD format.
mag : pandas.DataFrame
Data frame which contains magnitude values.
lsp : pandas.DataFrame
Data structure with information about non-gravitational
parameters (model, numer of parameters, dimension, etc.).
ngr : pandas.DataFrame
Data frame which contains non-gravitational parameters.
"""
def __init__(self):
"""Initialization of class attributes
"""
# Document info
self.form = []
self.rectype = []
self.refsys = []
# Orbit properties
self.epoch = []
self.mag = []
self.lsp = []
# Non-gravitational parameters
self.ngr = []
@staticmethod
def _get_matrix(dfd, matrix_name, dimension, orbit_element, **kwargs):
"""Get covariance or correlaton matrix from df.
Parameters
----------
dfd : pandas.DataFrame
Data frame with object data to be parsed.
matrix_name : str
Matrix name to be obtained.
dimension : int
Matrix dimension.
orbit_element : str
Orbit elements for the matrix.
**kwargs : str
If there is only one additional NGR parameter it should be
introduced to show properly in the matrix.
Returns
-------
mat : Data structure
Data structure with matrix data
Raises
------
ValueError
If the matrix name is not correct
"""
# Define dictionary for types of matrices
matrix_dict = {'cov': 'COV',
'cor': 'COR',
'nor': 'NOR'}
# Define indexes and colunm namaes according to orbit element type
mat_var = {'keplerian': ['a', 'e', 'i', 'long. node',
'arg. peric', 'M'],
'equinoctial': ['a', 'e*sin(LP)', 'e*cos(LP)',
'tan(i/2)*sin(LN)', 'tan(i/2)*cos(LN)',
'mean long.']}
# Get matrix location according to its name
if matrix_name in matrix_dict:
i = get_indexes(dfd, matrix_dict[matrix_name])[0][0]
# Check if there is a matrix
if not i:
mat = 'There is no ' + matrix_dict[matrix_name] +\
'matrix for this object'
logging.warning('There is no %s matrix for this object',
matrix_dict[matrix_name])
else:
# Define the matrix according to its dimension
if dimension == 6:
# Define matrix structure
mat_data = {mat_var[orbit_element][0]:
[dfd.iloc[i, 1], dfd.iloc[i, 2],
dfd.iloc[i, 3], dfd.iloc[i+1, 1],
dfd.iloc[i+1, 2], dfd.iloc[i+1, 3]],
mat_var[orbit_element][1]:
[dfd.iloc[i, 2], dfd.iloc[i+2, 1],
dfd.iloc[i+2, 2], dfd.iloc[i+2, 3],
dfd.iloc[i+3, 1], dfd.iloc[i+3, 2]],
mat_var[orbit_element][2]:
[dfd.iloc[i, 3], dfd.iloc[i+2, 2],
dfd.iloc[i+3, 3], dfd.iloc[i+4, 1],
dfd.iloc[i+4, 2], dfd.iloc[i+4, 3]],
mat_var[orbit_element][3]:
[dfd.iloc[i+1, 1], dfd.iloc[i+2, 3],
dfd.iloc[i+4, 1], dfd.iloc[i+5, 1],
dfd.iloc[i+5, 2], dfd.iloc[i+5, 3]],
mat_var[orbit_element][4]:
[dfd.iloc[i+1, 2], dfd.iloc[i+3, 1],
dfd.iloc[i+4, 2], dfd.iloc[i+5, 2],
dfd.iloc[i+6, 1], dfd.iloc[i+6, 2]],
mat_var[orbit_element][5]:
[dfd.iloc[i+1, 3], dfd.iloc[i+3, 2],
dfd.iloc[i+4, 3], dfd.iloc[i+5, 3],
dfd.iloc[i+6, 2], dfd.iloc[i+6, 3]]}
# Rename matrix indexes
matrix_indexes = mat_var[orbit_element]
# Build the matrix
mat = pd.DataFrame(mat_data, index=matrix_indexes)
elif dimension == 7:
# Obtain from kwargs the non-gravitational parameter
ngr_parameter = kwargs['ngr']
# Define matrix structure
mat_data = {mat_var[orbit_element][0]:
[dfd.iloc[i, 1], dfd.iloc[i, 2],
dfd.iloc[i, 3], dfd.iloc[i+1, 1],
dfd.iloc[i+1, 2], dfd.iloc[i+1, 3],
dfd.iloc[i+2, 1]],
mat_var[orbit_element][1]:
[dfd.iloc[i, 2], dfd.iloc[i+2, 2],
dfd.iloc[i+2, 3], dfd.iloc[i+3, 1],
dfd.iloc[i+3, 2], dfd.iloc[i+3, 3],
dfd.iloc[i+4, 1]],
mat_var[orbit_element][2]:
[dfd.iloc[i, 3], dfd.iloc[i+2, 3],
dfd.iloc[i+4, 2], dfd.iloc[i+4, 3],
dfd.iloc[i+5, 1], dfd.iloc[i+5, 2],
dfd.iloc[i+5, 3]],
mat_var[orbit_element][3]:
[dfd.iloc[i+1, 1], dfd.iloc[i+3, 1],
dfd.iloc[i+4, 3], dfd.iloc[i+6, 1],
dfd.iloc[i+6, 2], dfd.iloc[i+6, 3],
dfd.iloc[i+7, 1]],
mat_var[orbit_element][4]:
[dfd.iloc[i+1, 2], dfd.iloc[i+3, 2],
dfd.iloc[i+5, 1], dfd.iloc[i+6, 2],
dfd.iloc[i+7, 2], dfd.iloc[i+7, 3],
dfd.iloc[i+8, 1]],
mat_var[orbit_element][5]:
[dfd.iloc[i+1, 3], dfd.iloc[i+3, 3],
dfd.iloc[i+5, 2], dfd.iloc[i+6, 3],
dfd.iloc[i+7, 3], dfd.iloc[i+8, 2],
dfd.iloc[i+8, 3]],
ngr_parameter: [dfd.iloc[i+2, 1],
dfd.iloc[i+4, 1],
dfd.iloc[i+5, 3],
dfd.iloc[i+7, 1],
dfd.iloc[i+8, 1],
dfd.iloc[i+8, 3],
dfd.iloc[i+9, 1]]}
# Rename matrix indexes
matrix_indexes = mat_var[orbit_element] + [ngr_parameter]
# Build the matrix
mat = pd.DataFrame(mat_data, index=matrix_indexes)
elif dimension == 8:
# Define matrix structure
mat_data = {mat_var[orbit_element][0]:
[dfd.iloc[i, 1], dfd.iloc[i, 2],
dfd.iloc[i, 3], dfd.iloc[i+1, 1],
dfd.iloc[i+1, 2], dfd.iloc[i+1, 3],
dfd.iloc[i+2, 1], dfd.iloc[i+2, 2]],
mat_var[orbit_element][1]:
[dfd.iloc[i, 2], dfd.iloc[i+2, 3],
dfd.iloc[i+3, 1], dfd.iloc[i+3, 2],
dfd.iloc[i+3, 3], dfd.iloc[i+4, 1],
dfd.iloc[i+4, 2], dfd.iloc[i+4, 3]],
mat_var[orbit_element][2]:
[dfd.iloc[i, 3], dfd.iloc[i+3, 1],
dfd.iloc[i+5, 1], dfd.iloc[i+5, 2],
dfd.iloc[i+5, 3], dfd.iloc[i+6, 1],
dfd.iloc[i+6, 2], dfd.iloc[i+6, 3]],
mat_var[orbit_element][3]:
[dfd.iloc[i+1, 1], dfd.iloc[i+3, 2],
dfd.iloc[i+5, 2], dfd.iloc[i+7, 1],
dfd.iloc[i+7, 2], dfd.iloc[i+7, 3],
dfd.iloc[i+8, 1], dfd.iloc[i+8, 2]],
mat_var[orbit_element][4]:
[dfd.iloc[i+1, 2], dfd.iloc[i+3, 3],
dfd.iloc[i+5, 3], dfd.iloc[i+7, 2],
dfd.iloc[i+8, 3], dfd.iloc[i+9, 1],
dfd.iloc[i+9, 2], dfd.iloc[i+9, 3]],
mat_var[orbit_element][5]:
[dfd.iloc[i+1, 3], dfd.iloc[i+4, 1],
dfd.iloc[i+6, 1], dfd.iloc[i+7, 3],
dfd.iloc[i+9, 1], dfd.iloc[i+10, 1],
dfd.iloc[i+10, 2], dfd.iloc[i+10, 3]],
'Area-to-mass ratio':
[dfd.iloc[i+2, 1], dfd.iloc[i+4, 2],
dfd.iloc[i+6, 2], dfd.iloc[i+8, 1],
dfd.iloc[i+9, 2], dfd.iloc[i+10, 2],
dfd.iloc[i+11, 1], dfd.iloc[i+11, 2]],
'Yarkovsky parameter':
[dfd.iloc[i+2, 2], dfd.iloc[i+4, 3],
dfd.iloc[i+6, 3], dfd.iloc[i+8, 2],
dfd.iloc[i+9, 3], dfd.iloc[i+10, 3],
dfd.iloc[i+11, 2], dfd.iloc[i+11, 3]]}
# Rename matrix indexes
matrix_indexes = mat_var[orbit_element] +\
['Area-to-mass ratio', 'Yarkovsky parameter']
# Build the matrix
mat = pd.DataFrame(mat_data, index=matrix_indexes)
else: # pragma: no cover
raise ValueError('Valid matrix name are cov, cor and nor')
return mat
@staticmethod
def _get_head_orb(data_obj):
"""Get and parse header of orbit properties file.
Parameters
----------
data_obj : object
Object data in byte format.
Returns
-------
form : str
Format file.
rectype : str
File record type.
refsys : str
Default reference system.
"""
# Decode data using UTF-8 and store in memory for doc info
df_info_d = io.StringIO(data_obj.decode('utf-8'))
# Read as txt file
df_info = pd.read_fwf(df_info_d, nrows=3, header=None)
# Template for format data:
# format = '{format}' ! file format
format_txt = df_info.iloc[0][0].split("'")[1].strip()
form = format_txt
# Template for record type:
# rectype = '{rectype}' ! record type (1L/ML)
rectype = df_info.iloc[1][0].split("'")[1].strip()
# Template for reference system:
# refsys = {refsys} ! default reference system"
refsys = df_info.iloc[2][0].split("=")[1].split("!")[0].strip()
return form, rectype, refsys
def _orb_prop_parser(self, data_obj):
"""Get orbit properties parsed from object data
Parameters
----------
data_obj : object
Object data in byte format.
Raises
------
ValueError
If the orbit properties file is empty or does not exists
"""
# Decode data using UTF-8 and store in memory for orb props
df_orb_d = io.StringIO(data_obj.decode('utf-8'))
# Check file exists or is not empty
if not df_orb_d.getvalue():
logging.warning('Required orbit properties file is '
'empty for this object')
raise ValueError('Required orbit properties file is '
'empty for this object')
# Obtain header
df_head = self._get_head_orb(data_obj)
self.form = df_head[0]
self.rectype = df_head[1]
self.refsys = df_head[2]
# Check if there is an additional line
df_check_d = io.StringIO(data_obj.decode('utf-8'))
# Read as txt file
df_check = pd.read_fwf(df_check_d, skiprows=[0,1,2,3,4],
header=None, engine='python',
delim_whitespace=True)
if 'SOLUTION' in df_check.iloc[0][0]:
last_skip_rows = [0,1,2,3,4,5,10]
else:
last_skip_rows = [0,1,2,3,4,9]
# Read data as csv
df_orb = pd.read_csv(df_orb_d, delim_whitespace=True,
skiprows=last_skip_rows,
engine='python')
# Epoch in MJD
self.epoch = df_orb.iloc[1, 1] + ' MJD'
# MAG
# Get MAG index
mag_index = get_indexes(df_orb, 'MAG')
# Check if U_par parameter is assigned
if bool(mag_index) is False:
self.mag = 'There is no MAG assigned to this object'
if 'SOLUTION' in df_check.iloc[0][0]:
last_skip_rows = [0,1,2,3,4,5,9]
else:
last_skip_rows = [0,1,2,3,4,8]
else:
mag = df_orb.iloc[2:3, 1:3].reset_index(drop=True)
# MAG - Rename columns and indexes
mag.index = ['MAG']
mag.columns = ['', '']
self.mag = mag.astype(float)
# Decode data using UTF-8 and store in memory for lsp
df_new_d = io.StringIO(data_obj.decode('utf-8'))
# Read data as csv
df_new = pd.read_csv(df_new_d, delim_whitespace=True,
skiprows=last_skip_rows,
engine='python')
# LSP
# Get LSP index
lsp_index = get_indexes(df_new, 'LSP')[0][0]
# Check if there are additional non-gravitational parameters
if int(df_new.iloc[lsp_index,3]) == 7:
lsp = df_new.iloc[lsp_index:lsp_index+1, 1:5]
lsp.columns = ['model used', 'number of model parameters',
'dimension', 'list of parameters determined']
ngr = df_new.iloc[lsp_index+3:lsp_index+4, 1:3].astype(float)
ngr.index = ['NGR']
ngr.columns = ['Area-to-mass ratio in m^2/ton',
'Yarkovsky parameter in 1E-10au/day^2']
elif int(df_new.iloc[lsp_index,3]) == 8:
lsp = df_new.iloc[lsp_index:lsp_index+1, 1:6]
lsp.columns = ['model used', 'number of model parameters',
'dimension', 'list of parameters determined', '']
ngr = df_new.iloc[lsp_index+3:lsp_index+4, 1:3].astype(float)
ngr.index = ['NGR']
ngr.columns = ['Area-to-mass ratio in m^2/ton',
'Yarkovsky parameter in 1E-10au/day^2']
else:
lsp = df_new.iloc[lsp_index:lsp_index+1, 1:4]
lsp.columns = ['model used', 'number of model parameters',
'dimension']
ngr = ('There are no gravitational parameters '
'calculated for this object')
# Rename indexes
lsp.index = ['LSP']
self.lsp = lsp.astype(int)
# Non-gravitational parameters
self.ngr = ngr
class KeplerianOrbitProperties(OrbitProperties):
"""This class contains information of asteroid orbit
properties in Keplerian reference frame. This class inherits the attributes
from OrbitProperties.
Attributes
----------
kep : pandas.DataFrame
Data frame which contains the Keplerian elements information.
perihelion : int
Orbit perihelion in au.
aphelion : int
Orbit aphelion in au.
anode : int
Ascending node-Earth separation in au.
dnode : int
Descending node-Earth separation in au.
moid : int
Minimum Orbit Intersection distance in au.
period : int
Orbit period in days.
pha : string
Potential hazardous asteroid classification.
vinfty : int
Infinite velocity.
u_par : int
Uncertainty parameter as defined by MPC.
orb_type : string
Type of orbit.
rms : pandas.DataFrame
Root mean square for Keplerian elements
cov : pandas.DataFrame
Covariance matrix for Keplerian elements
cor : pandas.DataFrame
Correlation matrix for Keplerian elements
"""
def __init__(self):
"""Initialization of class attributes
"""
# Get attributes from paren OrbitProperties
super().__init__()
# Orbit properties
self.kep = []
self.perihelion = []
self.aphelion = []
self.anode = []
self.dnode = []
self.moid = []
self.period = []
self.pha = []
self.vinfty = []
self.u_par = []
self.orb_type = []
self.rms = []
# Covariance and correlation matrices
self.cov = []
self.cor = []
def _orb_kep_prop_parser(self, data_obj):
"""Get orbit properties parsed from object data
Parameters
----------
data_obj : object
Object data in byte format.
Raises
------
ValueError
If the required orbit properties file is empty or does not
exist
"""
# Assign parent attributes
self._orb_prop_parser(data_obj)
# Decode data using UTF-8 and store in memory for orb props
df_orb_d = io.StringIO(data_obj.decode('utf-8'))
# Read data as csv
df_orb = pd.read_csv(df_orb_d, delim_whitespace=True,
skiprows=[0,1,2,3,4,9], engine='python')
# Keplerian elements
keplerian = df_orb.iloc[0:1, 1:7]
# Kep - Rename columns and indexes
keplerian.columns = ['a', 'e', 'i', 'long. node',
'arg. peric.', 'mean anomaly']
keplerian.index = ['KEP']
self.kep = keplerian.astype(float)
# Get perihelion index to provide location for rest of attributes
perihelion_index = get_indexes(df_orb, 'PERIHELION')[0][0]
# Perihelion
self.perihelion = float(df_orb.iloc[perihelion_index, 2])
# Aphelion
self.aphelion = float(df_orb.iloc[perihelion_index+1, 2])
# Ascending node - Earth Separation
self.anode = float(df_orb.iloc[perihelion_index+2, 2])
# Descending node - Earth Separation
self.dnode = float(df_orb.iloc[perihelion_index+3, 2])
# MOID (Minimum Orbit Intersection Distance)
self.moid = float(df_orb.iloc[perihelion_index+4, 2])
# Period
self.period = float(df_orb.iloc[perihelion_index+5, 2])
# PHA (Potential Hazardous Asteroid)
self.pha = df_orb.iloc[perihelion_index+6, 2]
# Vinfty
self.vinfty = float(df_orb.iloc[perihelion_index+7, 2])
# U_par
check_upar = get_indexes(df_orb, 'U_PAR')
# Check if U_par parameter is assigned
if bool(check_upar) is False:
self.u_par = 'There is no u_par assigned to this object'
else:
self.u_par = float(df_orb.iloc[check_upar[0][0], 2])
# Get index for RMS
rms_index = get_indexes(df_orb, 'RMS')[0][0]
# Determine Orb Type parameter knowing the RMS index
self.orb_type = str(df_orb.iloc[rms_index-1, 2])
# Check the dimension of the matrix to give complete RMS
matrix_dimension = int(self.lsp.iloc[0, 2])
if matrix_dimension == 8:
# RMS (Root Mean Square)
rms = df_orb.iloc[rms_index:rms_index+1, 2:10]
# Rename colums
rms.columns = ['a', 'e', 'i', 'long. node', 'arg. peric.',
'mean anomaly', 'Area-to-mass ratio',
'Yarkovsky parameter']
ngr_parameter = 'Yarkovsky parameter and Area-to-mass ratio'
elif matrix_dimension == 7:
# RMS (Root Mean Square)
rms = df_orb.iloc[rms_index:rms_index+1, 2:9]
# Check which of NGR parameters is 0 to rename cols
if float(self.ngr.iloc[0][0]) == 0:
ngr_parameter = 'Yarkovsky parameter'
else:
ngr_parameter = 'Area-to-mass ratio'
# Rename columns
rms.columns = ['a', 'e', 'i', 'long. node',
'arg. peric.', 'mean anomaly',
ngr_parameter]
else:
# RMS (Root Mean Square)
rms = df_orb.iloc[rms_index:rms_index+1, 2:8]
#Rename columns
rms.columns = ['a', 'e', 'i', 'long. node', 'arg. peric.',
'mean anomaly']
ngr_parameter = 'There are no additional NRG parameters'
# RMS - Rename indexes
rms.index = ['RMS']
self.rms = rms.astype(float)
# Covariance matrix
self.cov = self._get_matrix(df_orb, 'cov', matrix_dimension,
'keplerian', ngr=ngr_parameter)\
.astype(float)
# Correlation matrix
self.cor = self._get_matrix(df_orb, 'cor', matrix_dimension,
'keplerian', ngr=ngr_parameter)\
.astype(float)
class EquinoctialOrbitProperties(OrbitProperties):
"""This class contains information of asteroid orbit
properties in equinoctial reference frame. This class inherits
the attributes from OrbitProperties.
Attributes
----------
equinoctial : pandas.DataFrame
Data frame which contains the equinoctial elements information.
rms : DataFrame
Root Mean Square for equinoctial elements.
eig : pandas.DataFrame
Eigenvalues for the covariance matrix.
wea : pandas.DataFrame
Eigenvector corresponding to the largest eigenvalue.
cov : pandas.DataFrame
Covariance matrix for equinoctial elements.
nor : pandas.DataFrame
Normalization matrix for equinoctial elements.
"""
def __init__(self):
"""Initialization of class attributes
"""
# Get attributes from paren OrbitProperties
super().__init__()
# Orbit properties
self.equinoctial = []
self.rms = []
self.eig = []
self.wea = []
# Covariance and nor matrices
self.cov = []
self.nor = []
def _orb_equi_prop_parser(self, data_obj):
"""Get orbit properties parsed from object data
Parameters
----------
data_obj : object
Object data in byte format.
Raises
------
ValueError
If the required orbit properties file is empty or does not
exist
"""
# Assign parent attributes
self._orb_prop_parser(data_obj)
# Decode data using UTF-8 and store in memory for orb props
df_orb_d = io.StringIO(data_obj.decode('utf-8'))
# Check if there is an additional line
df_check_d = io.StringIO(data_obj.decode('utf-8'))
# Read as txt file
df_check = pd.read_fwf(df_check_d, skiprows=[0,1,2,3,4],
header=None, engine='python',
delim_whitespace=True)
if 'SOLUTION' in df_check.iloc[0][0]:
last_skip_rows = [0,1,2,3,4,5,10]
else:
last_skip_rows = [0,1,2,3,4,9]
# Read data as csv
df_orb = pd.read_csv(df_orb_d, delim_whitespace=True,
skiprows=last_skip_rows, engine='python')
# Equinoctial elements
equinoctial = df_orb.iloc[0:1, 1:7]
# Equinoctial - Rename columns and indexes
equinoctial.columns = ['a', 'e*sin(LP)', 'e*cos(LP)',
'tan(i/2)*sin(LN)', 'tan(i/2)*cos(LN)',
'mean long.']
equinoctial.index = ['EQU']
self.equinoctial = equinoctial.astype(float)
# Get index for RMS
rms_index = get_indexes(df_orb, 'RMS')[0][0]
# Check the dimension of the matrix to give complete RMS
matrix_dimension = int(self.lsp.iloc[0, 2])
if matrix_dimension == 8:
# RMS (Root Mean Square)
rms = df_orb.iloc[rms_index:rms_index+1, 2:10]
# EIG
eig = df_orb.iloc[rms_index+1:rms_index+2, 2:10]
# WEA
wea = df_orb.iloc[rms_index+2:rms_index+3, 2:10]
# Assign column names
column_names = ['a', 'e*sin(LP)', 'e*cos(LP)',
'tan(i/2)*sin(LN)', 'tan(i/2)*cos(LN)',
'mean long.', 'Area-to-mass ratio',
'Yarkovsky parameter']
ngr_parameter = 'Yarkovsky parameter and Area-to-mass ratio'
elif matrix_dimension == 7:
# RMS (Root Mean Square)
rms = df_orb.iloc[rms_index:rms_index+1, 2:9]
# EIG
eig = df_orb.iloc[rms_index+1:rms_index+2, 2:9]
# WEA
wea = df_orb.iloc[rms_index+2:rms_index+3, 2:9]
# Check which of NGR parameters is 0 to rename cols
if float(self.ngr.iloc[0][0]) == 0:
ngr_parameter = 'Yarkovsky parameter'
else:
ngr_parameter = 'Area-to-mass ratio'
# Assign column names
column_names = ['a', 'e*sin(LP)', 'e*cos(LP)',
'tan(i/2)*sin(LN)', 'tan(i/2)*cos(LN)',
'mean long.',
ngr_parameter]
else:
# RMS (Root Mean Square)
rms = df_orb.iloc[rms_index:rms_index+1, 2:8]
# EIG
eig = df_orb.iloc[rms_index+1:rms_index+2, 2:8]
# WEA
wea = df_orb.iloc[rms_index+2:rms_index+3, 2:8]
# Assign column names
column_names = ['a', 'e*sin(LP)', 'e*cos(LP)',
'tan(i/2)*sin(LN)', 'tan(i/2)*cos(LN)',
'mean long.']
ngr_parameter = 'There are no additional NRG parameters'
# Rename columns
rms.columns = eig.columns = wea.columns = column_names
# RMS - Rename indexes
rms.index = ['RMS']
self.rms = rms.astype(float)
# EIG - Rename indexes
eig.index = ['EIG']
self.eig = eig.astype(float)
# EIG - Rename indexes
wea.index = ['WEA']
self.wea = wea.astype(float)
# Covariance matrix
self.cov = self._get_matrix(df_orb, 'cov', matrix_dimension,
'equinoctial', ngr=ngr_parameter)\
.astype(float)
# Correlation matrix
self.nor = self._get_matrix(df_orb, 'nor', matrix_dimension,
'equinoctial', ngr=ngr_parameter)\
.astype(float)
class Ephemerides:
"""This class contains information of object ephemerides.
Attributes
----------
observatory : str
Name of the observatory from which ephemerides are obtained.
tinit : str
Start date from which ephemerides are obtained.
tfinal : str
End date from which ephemerides are obtained.
tstep : str
Time step and time unit used during ephemerides calculation.
ephemerides : pandas.DataFrame
Data frame which contains the information of the object
ephemerides.
"""
def __init__(self):
"""Initialization of class attributes
"""
# # Document info
self.observatory = []
self.tinit = []
self.tfinal = []
self.tstep = []
# Ephemerides
self.ephemerides = []
@staticmethod
def _get_head_ephem(data_obj):
"""Get and parse header of ephemerides file.
Parameters
----------
data_obj : object
Object in bytes format.
Returns
-------
obs : str
Observatory name.
idate : str
Start date of the ephemerides.
fdate : str
Final date of the ephemerides.
tstep : str
Value and units for time step.
"""
data_d = io.StringIO(data_obj.decode('utf-8'))
head_ephe = pd.read_fwf(data_d, nrows=5, header=None)
# Template for observatory: Observatory: {observatory}
obs = head_ephe.iloc[1][0].split(':')[1].strip()
# Template for initial date: Initial Date: {init_date}
idate = head_ephe.iloc[2][0].split(':')[1].strip() + ':' +\
head_ephe.iloc[2][0].split(':')[2].strip()
# Template for initial date: Final Date: {final_date}
fdate = head_ephe.iloc[3][0].split(':')[1].strip() + ':' +\
head_ephe.iloc[3][0].split(':')[2].strip()
# Template for initial date: Time step: {step}
tstep = head_ephe.iloc[4][0].split(':')[1].strip()
return obs, idate, fdate, tstep
def _ephem_parser(self, name, observatory, start, stop, step, step_unit):
"""Parse and arrange the ephemeries data.
Parameters
----------
name : str
Name of the requested object.
observatory :
Name of the observatory from which ephemerides are obtained.
start : str
Start date from which ephemerides are obtained.
stop : str
End date from which ephemerides are obtained.
step : str
Value for the time step (e.g. '1', '0.1', etc.).
step_unit : str
Units of the time step.
Raises
------
KeyError
Some of the parameters introduced in the method is not
valid.
"""
# Unique base url for asteroid properties
url_ephe = EPHEM_URL + str(name).replace(' ', '%20') +\
'&oc=' + str(observatory) + '&t0=' +\
str(start).replace(' ', 'T') + 'Z&t1=' +\
str(stop).replace(' ', 'T') + 'Z&ti=' + str(step) +\
'&tiu=' + str(step_unit)
# Request data two times if the first attempt fails
try:
# Get object data
data_obj = requests.get(url_ephe, timeout=TIMEOUT,
verify=VERIFICATION).content
except ConnectionError: # pragma: no cover
print('Initial attempt to obtain object data failed. '
'Reattempting...')
logging.warning('Initial attempt to obtain object data'
'failed.')
# Wait 5 seconds
time.sleep(5)
# Get object data
data_obj = requests.get(url_ephe, timeout=TIMEOUT,
verify=VERIFICATION).content
# Check if file contains errors due to bad URL keys
check = io.StringIO(data_obj.decode('utf-8'))
check_r = pd.read_fwf(check, delimiter='"', header=None)
if len(check_r) == 1:
error = check_r[0][0]
raise KeyError(error)
# Get ephemerides if file is correct
ephems_d = io.StringIO(data_obj.decode('utf-8'))
# Since ephemerides col space is fixed, it is defined in order
# to set the length (number of spaces) for each field
col_space = [(1,12), (13,19), (20,32) ,(34,37), (38,40),
(41,47), (49,52), (53,55), (56, 61), (62,68),
(69,74), (75,83), (84, 95), (96,102), (103, 109),
(110,116), (117,122), (123,128), (129,136),
(137,144), (146,154), (156,164), (166,174),
(175,180), (182,189), (192,199), (201,206)]
# Read pandas as txt
ephem = pd.read_fwf(ephems_d, header=None, skiprows=9,
engine='python', colspecs=col_space)
# Rename columns
ephem.columns = ['Date', 'Hour', 'MJD in UTC', 'RA h', 'RA m',
'RA s', 'DEC d', 'DEC \'','DEC "', 'Mag',
'Alt (deg)', 'Airmass', 'Sun elev. (deg)',
'SolEl (deg)', 'LunEl (deg)', 'Phase (deg)',
'Glat (deg)', 'Glon (deg)', 'R (au)',
'Delta (au)', 'Ra*cosDE ("/min)',
'DEC ("/min)', 'Vel ("/min)', 'PA (deg)',
'Err1 (")', 'Err2 (")', 'AngAx (deg)']
# Convert Date to datetime iso format
ephem['Date'] = | pd.to_datetime(ephem['Date']) | pandas.to_datetime |
#!/usr/bin/env python
"""
Module implementing the Data class that manages data for
it's associated PandasTable.
Created Jan 2014
Copyright (C) <NAME>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from types import *
import operator
import os, string, types, copy
import pickle
import numpy as np
import pandas as pd
from . import util
class TableModel(object):
"""A data model for the Table class that uses pandas
Args:
dataframe: pandas dataframe
rows: number of rows if empty table
columns: number of columns if empty table
"""
keywords = {'colors':'colors'}
def __init__(self, dataframe=None, rows=20, columns=5):
"""Constructor for table model. """
self.initialiseFields()
self.setup(dataframe, rows, columns)
return
def setup(self, dataframe, rows=20, columns=5):
"""Create table model"""
if not dataframe is None:
self.df = dataframe
else:
colnames = list(string.ascii_lowercase[:columns])
self.df = pd.DataFrame(index=range(rows),columns=colnames)
#self.df = self.getSampleData()
#self.reclist = self.df.index # not needed now?
return
@classmethod
def getSampleData(self, rows=400, cols=5, n=2):
"""Generate sample data
Args:
rows: no. of rows
cols: columns
n: length of column names
"""
import random
s = string.ascii_lowercase
def genstr(n=2):
return ''.join(random.choice(s) for i in range(n))
maxrows = 5e6
if rows>maxrows:
rows=maxrows
if cols>1e5:
cols=int(1e5)
n=2
if cols>100: n=3
colnames = [genstr(n) for i in range(cols)]
coldata = [np.random.normal(x,1,rows) for x in np.random.normal(5,3,cols)]
n = np.array(coldata).T
df = pd.DataFrame(n, columns=colnames)
col1 = colnames[0]
col2 = colnames[1]
df[col2] = df[col1]*np.random.normal(.8, .2, len(df))
df = np.round(df, 3)
cats = ['low','medium','high','very high']
df['label'] = pd.cut(df[col1], bins=4, labels=cats).astype(str)
#df['label'] = df.label.cat.as_ordered()
#don't add date if rows too large
if rows<2e6:
df['date'] = pd.date_range('1/1/2016', periods=rows, freq='H')
return df
@classmethod
def getIrisData(self):
"""Get iris dataset"""
path = os.path.dirname(__file__)
cols = ['sepal length','sepal width','petal length','petal width','class']
df = pd.read_csv(os.path.join(path,'datasets','iris.data'),names=cols)
return df
@classmethod
def getStackedData(self):
"""Get a dataframe to pivot test"""
import pandas.util.testing as tm; tm.N = 4
frame = tm.makeTimeDataFrame()
N, K = frame.shape
data = {'value' : frame.values.ravel('F'),
'variable' : np.asarray(frame.columns).repeat(N),
'date' : np.tile(np.asarray(frame.index), K)}
return pd.DataFrame(data, columns=['date', 'variable', 'value'])
def initialiseFields(self):
"""Create meta data fields"""
self.meta = {}
self.columnwidths = {} #used to store col widths
return
def save(self, filename):
"""Save dataframe"""
ftype = os.path.splitext(filename)[1]
if ftype == '.mpk':
self.df.to_msgpack(filename)
elif ftype == '.pickle':
self.df.to_pickle(filename)
elif ftype == '.xls':
self.df.to_excel(filename)
elif ftype == '.csv':
self.df.to_csv(filename)
#elif ftype == '.html':
# self.df.to_html(filename)
return
def load(self, filename, filetype=None):
"""Load file, if no filetype given assume it's msgpack format"""
if filetype == '.pickle':
self.df = pd.read_pickle(filename)
else:
self.df = pd.read_msgpack(filename)
#print (len(self.df))
return
def getlongestEntry(self, colindex, n=500):
"""Get the longest string in the column for determining width. Just uses the first
n rows for speed"""
df = self.df
col = df.columns[colindex]
try:
if df.dtypes[col] == 'float64':
c = df[col][:n].round(3)
else:
c = df[col][:n]
except:
return 1
longest = c.astype('object').astype('str').str.len().max()
if np.isnan(longest):
return 1
return longest
def getRecordAtRow(self, rowIndex):
"""Get the entire record at the specifed row"""
name = self.getRecName(rowIndex)
record = self.df.ix[name]
return record
def moveColumn(self, oldindex, newindex):
"""Changes the order of columns"""
df = self.df
cols = list(df.columns)
name = cols[oldindex]
del cols[oldindex]
cols.insert(newindex, name)
self.df = df[cols]
return
def autoAddRows(self, num):
"""Add n rows to end of dataframe. Will create rows with index starting
from highest previous row count"""
df = self.df
if len(df) == 0:
self.df = pd.DataFrame(pd.Series(range(num)))
print (df)
return
try:
ind = self.df.index.max()+1
except:
ind = len(df)+1
new = pd.DataFrame(np.nan, index=range(ind,ind+num), columns=df.columns)
self.df = pd.concat([df, new])
return
def addRow(self, rowindex):
"""Inserts a row at the required index by append/concat"""
df = self.df
a, b = df[:rowindex], df[rowindex:]
a = a.append(pd.Series(), ignore_index=1)
self.df = pd.concat([a,b])
return
def deleteRow(self, row, unique=True):
"""Delete a row"""
self.deleteRows([row], unique)
return
def deleteRows(self, rowlist=None, unique=True):
"""Delete multiple or all rows"""
df = self.df
if unique == True:
rows = list(set(range(len(df))) - set(rowlist))
self.df = df.iloc[rows]
else:
df.drop(df.index[rowlist],inplace=True)
return
def addColumn(self, colname=None, dtype=None, data=None):
"""Add a column"""
if data is None:
data = pd.Series(dtype=dtype)
self.df[colname] = data
return
def deleteColumn(self, colindex):
"""delete a column"""
df = self.df
colname = df.columns[colindex]
df.drop([colname], axis=1, inplace=True)
return
def deleteColumns(self, cols=None):
"""Remove all cols or list provided"""
df = self.df
colnames = df.columns[cols]
df.drop(colnames, axis=1, inplace=True)
return
def deleteCells(self, rows, cols):
self.df.iloc[rows,cols] = np.nan
return
def resetIndex(self):
"""Reset index behaviour"""
df = self.df
if df.index.name != None or df.index.names[0] != None:
drop = False
else:
drop = True
df.reset_index(drop=drop,inplace=True)
return
def setindex(self, colindex):
"""Index setting behaviour"""
df = self.df
colnames = list(df.columns[colindex])
indnames = df.index.names
if indnames[0] != None:
df.reset_index(inplace=True)
df.set_index(colnames, inplace=True)
return
def copyIndex(self):
"""Copy index to a column"""
df = self.df
name = df.index.name
if name == None: name='index'
df[name] = df.index#.astype('object')
return
def groupby(self, cols):
"""Group by cols"""
df = self.df
colnames = df.columns[cols]
grps = df.groupby(colnames)
return grps
def getColumnType(self, columnIndex):
"""Get the column type"""
coltype = self.df.dtypes[columnIndex]
return coltype
def getColumnCount(self):
"""Returns the number of columns in the data model"""
return len(self.df.columns)
def getColumnName(self, columnIndex):
"""Returns the name of the given column by columnIndex"""
return str(self.df.columns[columnIndex])
def getColumnData(self, columnIndex=None, columnName=None,
filters=None):
"""Return the data in a list for this col,
filters is a tuple of the form (key,value,operator,bool)"""
if columnIndex != None and columnIndex < len(self.columnNames):
columnName = self.getColumnName(columnIndex)
names = Filtering.doFiltering(searchfunc=self.filterBy,
filters=filters)
coldata = [self.data[n][columnName] for n in names]
return coldata
def getColumns(self, colnames, filters=None, allowempty=True):
"""Get column data for multiple cols, with given filter options,
filterby: list of tuples of the form (key,value,operator,bool)
allowempty: boolean if false means rows with empty vals for any
required fields are not returned
returns: lists of column data"""
def evaluate(l):
for i in l:
if i == '' or i == None:
return False
return True
coldata=[]
for c in colnames:
vals = self.getColumnData(columnName=c, filters=filters)
coldata.append(vals)
if allowempty == False:
result = [i for i in zip(*coldata) if evaluate(i) == True]
coldata = list(zip(*result))
return coldata
def getRowCount(self):
"""Returns the number of rows in the table model."""
return len(self.df)
def getValueAt(self, rowindex, colindex):
"""Returns the cell value at location specified
by columnIndex and rowIndex."""
df = self.df
value = self.df.iloc[rowindex,colindex]
if type(value) is float and np.isnan(value):
return ''
return value
def setValueAt(self, value, rowindex, colindex):
"""Changed the dictionary when cell is updated by user"""
if value == '':
value = np.nan
dtype = self.df.dtypes[colindex]
#try to cast to column type
try:
if dtype == 'float64':
value = float(value)
elif dtype == 'int':
value = int(value)
elif dtype == 'datetime64[ns]':
value = | pd.to_datetime(value) | pandas.to_datetime |
import plotly.express as px
import pandas as pd
import numpy as np
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
from app import app
# preprocessing
df = | pd.read_csv('supermarket_sales_preprocessed.csv') | pandas.read_csv |
######### imports #########
from ast import arg
from datetime import timedelta
import sys
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_constants import *
from Reff_functions import *
import glob
import os
from sys import argv
import arviz as az
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
from math import ceil
import pickle
from cmdstanpy import CmdStanModel
matplotlib.use("Agg")
from params import (
truncation_days,
start_date,
third_start_date,
alpha_start_date,
omicron_start_date,
omicron_only_date,
omicron_dominance_date,
pop_sizes,
num_forecast_days,
get_all_p_detect_old,
get_all_p_detect,
)
def process_vax_data_array(
data_date,
third_states,
third_end_date,
variant="Delta",
print_latest_date_in_ts=False,
):
"""
Processes the vaccination data to an array for either the Omicron or Delta strain.
"""
# Load in vaccination data by state and date
vaccination_by_state = pd.read_csv(
"data/vaccine_effect_timeseries_" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly
# different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state.loc[
vaccination_by_state["variant"] == variant
]
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
if print_latest_date_in_ts:
# display the latest available date in the NSW data (will be the same date between states)
print(
"Latest date in vaccine data is {}".format(
vaccination_by_state[vaccination_by_state.state == "NSW"].date.values[-1]
)
)
# Get only the dates we need + 1 (this serves as the initial value)
vaccination_by_state = vaccination_by_state[
(
vaccination_by_state.date
>= pd.to_datetime(third_start_date) - timedelta(days=1)
)
& (vaccination_by_state.date <= third_end_date)
]
vaccination_by_state = vaccination_by_state[
vaccination_by_state["state"].isin(third_states)
] # Isolate fitting states
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# If we are missing recent vaccination data, fill it in with the most recent available data.
latest_vacc_data = vaccination_by_state.columns[-1]
if latest_vacc_data < pd.to_datetime(third_end_date):
vaccination_by_state = pd.concat(
[vaccination_by_state]
+ [
pd.Series(vaccination_by_state[latest_vacc_data], name=day)
for day in pd.date_range(start=latest_vacc_data, end=third_end_date)
],
axis=1,
)
# Convert to simple array only useful to pass to stan (index 1 onwards)
vaccination_by_state_array = vaccination_by_state.iloc[:, 1:].to_numpy()
return vaccination_by_state_array
def get_data_for_posterior(data_date):
"""
Read in the various datastreams and combine the samples into a dictionary that we then
dump to a pickle file.
"""
print("Performing inference on state level Reff")
data_date = pd.to_datetime(data_date) # Define data date
print("Data date is {}".format(data_date.strftime("%d%b%Y")))
fit_date = pd.to_datetime(data_date - timedelta(days=truncation_days))
print("Last date in fitting {}".format(fit_date.strftime("%d%b%Y")))
# * Note: 2020-09-09 won't work (for some reason)
# read in microdistancing survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest Microdistancing survey is {}".format(surveys.date.values[-1]))
surveys["state"] = surveys["state"].map(states_initials).fillna(surveys["state"])
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# If you get an error here saying 'cannot create a new series when the index is not unique',
# then you have a duplicated md file.
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
always = always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
always = always.fillna(method="ffill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
survey_counts_base = (
pd.pivot_table(data=always, index="date", columns="state", values="count")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
survey_respond_base = (
pd.pivot_table(data=always, index="date", columns="state", values="respondents")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
# read in and process mask wearing data
mask_wearing = | pd.DataFrame() | pandas.DataFrame |
import os
import sys
import subprocess
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
import scipy.cluster.hierarchy
from matplotlib import cm
from decneo.commonFunctions import read, write
import multiprocessing
cwd = '/mnt/gs18/scratch/users/paterno1/otherCellTypes_choroid/LRpairs/'
celltypes = ['Endothelial', 'Pericyte', 'Fibroblast', 'Macrophage', 'SMC']
genetypes = ['ligands', 'receptors']
def loadRamiowskiLRPairs(dir):
df = pd.read_excel(dir + 'SupplementaryData2Ramilowski.xlsx', sheet_name='All.Pairs', index_col=False)
df = df[['Ligand.ApprovedSymbol', 'Receptor.ApprovedSymbol', 'Pair.Evidence']]
df = df.loc[df['Pair.Evidence'] != 'EXCLUDED']
df = df.loc[df['Pair.Evidence'] != 'EXCLUDED not receptor']
df = df.loc[df['Pair.Evidence'] != 'EXCLUDED not ligand']
df = df.drop(columns=['Pair.Evidence'])
df.columns = ['ligand', 'receptor']
df = df.reset_index(drop=True)
return pd.MultiIndex.from_arrays([df['ligand'].values, df['receptor'].values], names=['ligand', 'receptor'])
def doLR(geneL, geneR, cutoff, hcutoff, dataDict, LR = None, suffix = 'data', makePlot = True, saveData = True):
name = '%s-%s' % (geneL, geneR)
grays = plt.cm.gray_r
resDict = dict()
for genetype in genetypes:
resDict.update({genetype: dict()})
for celltype in celltypes:
df_peaks = dataDict[hcutoff][genetype][celltype]
gene = geneL if genetype=='ligands' else geneR
se_peak = df_peaks[gene] if gene in df_peaks.columns else pd.Series(dtype=float)
resDict[genetype].update({celltype: se_peak[se_peak>=cutoff]})
if makePlot:
fig, ax = plt.subplots(figsize=[7,7])
cx = np.array([0.,0.25,0.5,0.75,1.])*1.35
gy = [0.2, 0.8]
colorg = ['blue', 'green']
sh = [-1, 1]
de = 0.035
celltypesO = ['SMC', 'Pericyte', 'Endothelial', 'Fibroblast', 'Macrophage']
if makePlot:
ax.text(0, 0.9, name, ha='center', va='center', fontsize=20)
gg = []
for ig1, genetype1 in enumerate(genetypes):
if makePlot:
ax.text(0.5*1.35, gy[ig1]+0.05*sh[ig1], genetype1, ha='center', va='center', fontsize=20)
for ic1, celltype1 in enumerate(celltypesO):
if makePlot:
ax.text(cx[ic1], gy[ig1]+0.00*sh[ig1], celltype1, ha='center', va='center', fontsize=15)
t1 = resDict[genetype1][celltype1]
group1 = 0
h1 = 1.
g1 = t1
temp1 = cx[ic1] + (-1/2 + group1 + 0.5)*de, gy[ig1]-0.05*sh[ig1]
if makePlot:
mec = 'k'
ax.plot(*temp1, 'o', ms=0.9*len(g1)/2, color=colorg[ig1], mec=mec, mew=1.0)
ggc = []
ig2, genetype2 = 1, 'receptors'
if genetype2!=genetype1:
for ic2, celltype2 in enumerate(celltypesO):
t2 = resDict[genetype2][celltype2]
group2 = 0
temp2 = cx[ic2] + (1/2-group2-0.5)*de, gy[ig2]-0.05*sh[ig2]
c = pd.MultiIndex.from_tuples([(a, b) for a in g1.index for b in t2.index], names=['ligand', 'receptor'])
comP = LR.intersection(c)
com = comP.shape[0]
if makePlot:
if com>0:
alpha = 1. if com >= 15 else max(com/15., 0.2)
ax.annotate("", xy=(temp2[0], temp2[1]), xycoords='data', xytext=(temp1[0], temp1[1]), textcoords='data',
arrowprops=dict(facecolor=grays(alpha), edgecolor=grays(alpha), shrink=0.04, headwidth=7, width=com/5, alpha=1.), zorder=-1)
ggc.append(', '.join(list(comP.get_level_values(0) + '-' + comP.get_level_values(1))) if com>0 else None)
templ = [genetype1, celltype1, ', '.join(list(g1.index.values))]
templ.extend(ggc)
gg.append(templ)
if makePlot:
plt.axis('off')
fig.tight_layout()
plt.savefig('%s %s.png' % (name, suffix), dpi=300)
exportEmf(cwd, '%s %s' % (name, suffix))
dfx = pd.DataFrame(gg).drop(0, axis=1)
dfx.columns = ['cell type', 'ligands', 'SMC pairs', 'Pericyte pairs', 'Endothelial pairs', 'Fibroblast pairs', 'Macrophage pairs']
recc = dfx.iloc[5:]['ligands'].values
dfx = dfx.iloc[:5]
dfx.insert(2, 'receptors', recc)
return dfx
def doLRw(params):
args, kwargs = params
return doLR(*args, **kwargs).set_index('cell type', drop=True).stack().rename((args[0], args[1]))
if __name__ == '__main__':
LR = loadRamiowskiLRPairs(cwd)
dataDictVoigt = read(cwd + 'dataDictVoigtv2_10_14_2021')
dataDictPanglao = read(cwd + 'dataDictPanglao')
print('LR pairs from Ramilowski et al. 2015:\n', LR, flush=True)
if True:
for hcutoff in ['0.0']:
allDataDicts = {'choroid': dataDictVoigt}
#allDataDicts = {'choroid': dataDictVoigt, 'mouse': dataDictPanglao['Mus musculus'], 'human': dataDictPanglao['Homo sapiens']}
for key in allDataDicts.keys():
tligands = pd.concat([allDataDicts[key][hcutoff]['ligands'][celltype].index.to_series() for celltype in celltypes]).drop_duplicates().sort_values().index
treceptors = pd.concat([allDataDicts[key][hcutoff]['receptors'][celltype].index.to_series() for celltype in celltypes]).drop_duplicates().sort_values().index
tpairs = pd.Series(index=pd.MultiIndex.from_product([tligands, treceptors], names=['ligand', 'receptor']), data=0)
tpairs = tpairs.index
nCPUs = 100
print('hcutoff:', hcutoff, ', data:', key, 'nCPUs:', nCPUs, ', pairs:', len(tpairs))
pool = multiprocessing.Pool(processes=nCPUs)
df = pd.DataFrame(pool.map(doLRw, [((pair[0], pair[1], 0.3, hcutoff, allDataDicts[key]), dict(LR=LR, makePlot=False, saveData=False)) for pair in tpairs.values]))
pool.close()
pool.join()
df.to_hdf('v3_allpairs_%s_%s.h5' % (hcutoff, key), key='df', mode='a', complevel=4, complib='zlib')
del df
if True:
for hcutoff in ['0.0']:
allDataDicts = {'choroid': dataDictVoigt}
#allDataDicts = {'choroid': dataDictVoigt, 'mouse': dataDictPanglao['Mus musculus'], 'human': dataDictPanglao['Homo sapiens']}
for key in allDataDicts.keys():
print('hcutoff:', hcutoff, ', data:', key)
df_all = pd.read_hdf(cwd + 'v3_allpairs_%s_%s.h5' % (hcutoff, key), key='df')
for celltype1 in celltypes:
for celltype2 in celltypes:
# celltype1 has ligands, celltype2 has receptors
df = df_all.xs([(celltype1, celltype2 + ' pairs')], axis=1).fillna('')
tempName = cwd + 'v3_allpairs_%s_%s_%s_%s.h5' % (hcutoff, key, celltype1, celltype2)
df.to_hdf(tempName + '.h5', key='df', mode='a', complevel=4, complib='zlib')
df = pd.read_hdf(tempName + '.h5', key='df')
l = len(df)
df.index = | pd.MultiIndex.from_tuples(df.index.values, names=['ligand', 'receptor']) | pandas.MultiIndex.from_tuples |
# -*- coding: utf-8 -*-
# flake8: noqa
"""Domain module
This module defines preconfigured CORDEX domain from csv tables. The module
also contains some tools to create a domain dataset from a csv tables or simply
from grid information.
Example:
To get a list of available implementations, create cordex domains, write
them to netcdf with some dummy data, you can use ,e.g.,::
from cordex import domain as dm
eur11 = dm.cordex_domain('EUR-11')
"""
import numpy as np
import pandas as pd
import xarray as xr
from ..tables import domains
from . import cf
from . import utils
def domain_names(table_name=None):
"""Returns a list of short names of all availabe Cordex domains
Parameters
----------
table_name:
Only return domain names from this table.
Returns
-------
domain names : list
List of available cordex domains.
"""
if table_name:
return list(domains.tables[table_name].index)
else:
return list(domains.table.index)
def cordex_domain(
short_name,
dummy=False,
add_vertices=False,
tables=None,
attrs=None,
mapping_name=None,
):
"""Creates an xarray dataset containg the domain grid definitions.
Parameters
----------
short_name:
Name of the Cordex Domain.
dummy : str or logical
Name of dummy field, if dummy=topo, the cdo topo operator will be
used to create some dummy topography data. dummy data is useful for
looking at the domain with ncview.
add_vertices : bool
Add grid boundaries in the gloabl coordinates (lon_vertices and lat_vertices).
tables: dataframe or list of dataframes, default: cordex_tables
Tables from which to look up the grid information. Index in the table
should be the short name of the domain, e.g., `EUR-11`. If no table is
provided, all standard tables will be searched.
attrs: str or dict
Global attributes that should be added to the dataset. If `attrs='CORDEX'`
a set of standard CF global attributes.
mapping_name: str
Variable name of the grid mapping, if mapping_name is `None`, the CF standard
variable name is used.
Returns
-------
Dataset : xarray.core.Dataset
Dataset containing the coordinates.
Example
-------
To create a cordex rotated pole domain dataset, you can use ,e.g.,::
import cordex as cx
eur11 = cx.cordex_domain('EUR-11')
"""
if attrs is None:
attrs = {}
if tables is None:
tables = domains.table
if isinstance(tables, list):
config = pd.concat(tables).loc[short_name]
else:
config = tables.loc[short_name]
return create_dataset(
**config,
name=short_name,
dummy=dummy,
add_vertices=add_vertices,
attrs=attrs,
mapping_name=mapping_name
)
def create_dataset(
nlon,
nlat,
dlon,
dlat,
ll_lon,
ll_lat,
pollon,
pollat,
name=None,
dummy=False,
add_vertices=False,
attrs=None,
mapping_name=None,
**kwargs
):
"""Create domain dataset from grid information.
Parameters
----------
nlon : int
longitudal number of grid boxes
nlat : int
latitudal number of grid boxes
dlon : float
longitudal resolution (degrees)
dlat : float
latitudal resolution (degrees)
ll_lon : float
lower left rotated longitude (degrees)
ll_lat : float
lower left rotated latitude (degrees)
pollon : float
pol longitude (degrees)
pollat : float
pol latitude (degrees)
dummy : str or logical
Name of dummy field, if dummy=topo, the cdo topo operator will be
used to create some dummy topography data. dummy data is useful for
looking at the domain with ncview.
add_vertices : bool
Add grid boundaries in the gloabl coordinates (lon_vertices and lat_vertices).
attrs: str or dict
Global attributes that should be added to the dataset. If `attrs='CORDEX'`
a set of standard CF global attributes.
mapping_name: str
Variable name of the grid mapping, if mapping_name is `None`, the CF standard
variable name is used.
"""
if attrs == "CORDEX":
attrs = cf.DEFAULT_CORDEX_ATTRS
elif attrs is None:
attrs = {}
if name:
attrs["CORDEX_domain"] = name
rlon, rlat = _init_grid(nlon, nlat, dlon, dlat, ll_lon, ll_lat)
lon, lat = rotated_coord_transform(*_stack(rlon, rlat), pollon, pollat)
pole = _grid_mapping(pollon, pollat)
return _get_dataset(
rlon,
rlat,
lon,
lat,
pole,
add_vertices=add_vertices,
dummy=dummy,
mapping_name=mapping_name,
attrs=attrs,
)
def domain_info(short_name, tables=None):
"""Returns a dictionary containg the domain grid definitions.
Returns a dictionary with grid information according to the
Cordex archive specifications.
See https://is-enes-data.github.io/cordex_archive_specifications.pdf
Parameters
----------
short_name:
Name of the Cordex Domain.
Returns
-------
domain info : dict
Dictionary containing the grid information.
"""
if tables is None:
tables = domains.table
if isinstance(tables, list):
config = | pd.concat(tables) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 8 13:32:06 2021
Revisiting some older SKS, SKKS, SYNTH data to make plots for chapter 2 of my thesis
Chapter 2, i.e., global data collection chapter
Wrangles data and makes SI comp. plot, synthetics SNR v splitting params, |BAZ - SPOL| histograms, etc.,
@author: ja17375
"""
import pandas as pd
import matplotlib.pyplot as plt
RES_PATH = '/Users/ja17375/DiscrePy/Sheba/Results/'
def read_and_cat_synth():
df = | pd.DataFrame() | pandas.DataFrame |
# utilities
import pickle
import inflection
import warnings
from IPython.display import Image
from tabulate import tabulate
# data manipulation
import pandas as pd
import numpy as np
# plots
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
# for categorical correlations
from collections import Counter
from scipy.stats import chi2_contingency
from pyitlib import discrete_random_variable as drv
from itertools import permutations
# statistical tests
from statsmodels.stats.weightstats import DescrStatsW, CompareMeans
# Algorithms - Classifiers
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
import xgboost as xgb
from catboost import CatBoostClassifier
# metrics
from sklearn.metrics import precision_recall_fscore_support, confusion_matrix, f1_score
from sklearn.metrics import roc_auc_score, accuracy_score, cohen_kappa_score
from sklearn.metrics import precision_recall_curve, precision_score, recall_score
from sklearn.calibration import calibration_curve
class GeneralUtils(object):
def __init__(self):
pass
def test_package(self):
print("Hello! This is a test!")
# Função para definição de alguns parâmetros do notebook, como tamanho de gráficos, número de linhas do output, etc.
def visual_settings(self):
plt.style.use('bmh')
plt.rcParams['figure.figsize'] = [20, 15]
plt.rcParams['font.size'] = 16
pd.options.display.max_columns = None
pd.options.display.max_rows = 100
| pd.set_option('display.expand_frame_repr', False) | pandas.set_option |
import pandas as pd
import numpy as np
from tqdm import tqdm
from Bio.PDB import Selection, PDBParser
import os
def extract_beads(pdb_path):
amino_acids = pd.read_csv('/home/hyang/bio/erf/data/amino_acids.csv')
vocab_aa = [x.upper() for x in amino_acids.AA3C]
vocab_dict = {x.upper(): y for x, y in zip(amino_acids.AA3C, amino_acids.AA)}
p = PDBParser()
try:
structure = p.get_structure('X', pdb_path)
except:
return 0
residue_list = Selection.unfold_entities(structure, 'R')
ca_center_list = []
cb_center_list = []
res_name_list = []
res_num_list = []
chain_list = []
for res in residue_list:
if res.get_resname() not in vocab_aa:
# raise ValueError('protein has non natural amino acids')
continue
chain_list.append(res.parent.id)
res_name_list.append(vocab_dict[res.get_resname()])
res_num_list.append(res.id[1])
try:
ca_center_list.append(res['CA'].get_coord())
except KeyError:
return 0
if res.get_resname() != 'GLY':
try:
cb_center_list.append(res['CB'].get_coord())
except KeyError:
return 0
else:
cb_center_list.append(res['CA'].get_coord())
ca_center = np.vstack(ca_center_list)
cb_center = np.vstack(cb_center_list)
df = pd.DataFrame({'chain_id': chain_list,
'group_num': res_num_list,
'group_name': res_name_list,
'x': ca_center[:, 0],
'y': ca_center[:, 1],
'z': ca_center[:, 2],
'xcb': cb_center[:, 0],
'ycb': cb_center[:, 1],
'zcb': cb_center[:, 2]})
df.to_csv(f'{pdb_path}_bead.csv', index=False)
return 1
def extract_ru():
# decoy_set = '4state_reduced'
# decoy_set = 'lattice_ssfit'
# decoy_set = 'lmds'
decoy_set = 'lmds_v2'
root_dir = f'/home/hyang/bio/erf/data/decoys/rudecoy/multiple/{decoy_set}'
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
modified_pdb_id = []
for pdb_id in pdb_id_list:
pdb_list = pd.read_csv(f'{root_dir}/{pdb_id}/list', header=None, names=['pdb'])['pdb'].values
bad_list = []
for i, pdb in tqdm(enumerate(pdb_list)):
pdb_path = f'{root_dir}/{pdb_id}/{pdb}'
if os.path.exists(pdb_path + '_bead.csv'):
continue
if os.path.exists(pdb_path):
result = extract_beads(pdb_path)
else:
result = 0
if result == 0:
# some structure prediction only has CA.
bad_list.append(pdb)
pdb_list[i] = '0'
if len(bad_list) > 0:
pdb_list = pdb_list[pdb_list != '0']
modified_pdb_id.append(pdb_id)
df = pd.DataFrame({'pdb': pdb_list})
df.to_csv(f'{root_dir}/{pdb_id}/flist.txt', index=False)
def match_rmsd():
# decoy_set = '4state_reduced'
# decoy_set = 'lattice_ssfit'
# decoy_set = 'lmds'
decoy_set = 'lmds_v2'
root_dir = f'/home/hyang/bio/erf/data/decoys/rudecoy/multiple/{decoy_set}'
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
for pdb_id in pdb_id_list:
df = | pd.read_csv(f'{root_dir}/{pdb_id}/flist.txt') | pandas.read_csv |
import sys
import seaborn as sns
import prince
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy.cluster.hierarchy as sch
from sklearn.cluster import KMeans, DBSCAN, Birch, MeanShift, \
SpectralClustering, AffinityPropagation, FeatureAgglomeration, AgglomerativeClustering
from sklearn.decomposition import PCA, KernelPCA, LatentDirichletAllocation, NMF, \
IncrementalPCA, SparsePCA, TruncatedSVD, MiniBatchDictionaryLearning, FastICA
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
from sklearn.manifold import MDS, Isomap, TSNE, LocallyLinearEmbedding
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import plotly.express as px
from plotly.offline import plot
from curami.commons import file_utils
sample_count_string = "total samples"
av_count_string = "total attributes"
unique_attribute_count_string = "unique attribute count"
unique_value_count_string = "unique value count"
def main(*args):
# plot_correlation_heatmap()
# cluster()
# clustering_dbscan()
# pca_plot_test()
# mca_plot_test()
# extract_plot_test()
extract_cluster_plot()
# hierarchical_clustering()
def plot_correlation_heatmap():
pd_unique_attributes = pd.read_csv(file_utils.all_data_file, encoding="utf-8")
plt.figure(figsize=(20, 20))
corr = pd_unique_attributes.corr()
print(corr)
fig = sns.heatmap(corr, square=True)
fig.axes.set_title("Attribute Correlation", fontsize=20)
plt.tick_params(labelsize=10)
plt.savefig(file_utils.results_directory + "correlation_heatmap.png")
plt.show()
def clustering_kmeans():
pd_unique_attributes = pd.read_csv(file_utils.all_data_file, encoding="utf-8")
# pd_unique_attributes.replace(1.0, 1, inplace=True)
# pd_unique_attributes.fillna(0, inplace=True)
# pca = PCA(n_components=20).fit(pd_unique_attributes[0:100000])
# pca_2d = pca.transform(pd_unique_attributes)
print(pd_unique_attributes.describe())
print(pd_unique_attributes.dtypes)
del pd_unique_attributes['accession']
kmeans = KMeans(n_clusters=10, random_state=0).fit(pd_unique_attributes)
labels = kmeans.labels_
centers = kmeans.cluster_centers_
pd_kmeans = pd.DataFrame(labels)
pd_unique_attributes.insert(pd_unique_attributes.shape[1], 'kmeans', pd_kmeans)
fig = plt.figure()
ax = fig.add_subplot(111)
scatter = ax.scatter(pd_unique_attributes['organism'], pd_unique_attributes['sex'],
c=pd_kmeans[0], s=50)
ax.set_title('K-Means Clustering')
ax.set_xlabel('organism')
ax.set_ylabel('sex')
plt.colorbar(scatter)
plt.show()
def clustering_dbscan():
pd_unique_attributes = pd.read_csv(file_utils.all_data_file, encoding="utf-8").sample(50000)
print(pd_unique_attributes.describe())
print(pd_unique_attributes.dtypes)
accessions = pd_unique_attributes['accession']
del pd_unique_attributes['accession']
dbscan = DBSCAN(eps=0.3, min_samples=1000, metric='euclidean')
dbscan.fit(pd_unique_attributes)
labels = dbscan.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print(labels)
print("no of clusters: %d" % n_clusters_)
print("noise points: %d" % n_noise_)
pca = PCA(n_components=2)
pca_2d = pca.fit_transform(pd_unique_attributes)
plt.scatter(pca_2d[:, 0], pca_2d[:, 1], c=(labels + 2) / 20)
plt.show()
def pca_plot_test():
pd_unique_attributes = pd.read_csv(file_utils.all_data_file, encoding="utf-8")
print(pd_unique_attributes.describe())
print(pd_unique_attributes.dtypes)
accessions = pd_unique_attributes['accession']
del pd_unique_attributes['accession']
pca = PCA(n_components=3)
pca_2d = pca.fit_transform(pd_unique_attributes)
fig = plt.figure()
ax = fig.add_subplot(221, projection='3d')
ax.scatter(pca_2d[:, 0], pca_2d[:, 1], pca_2d[:, 2])
ax = fig.add_subplot(222)
ax.scatter(pca_2d[:, 0], pca_2d[:, 1])
ax = fig.add_subplot(223)
ax.scatter(pca_2d[:, 0], pca_2d[:, 2])
ax = fig.add_subplot(224)
ax.scatter(pca_2d[:, 1], pca_2d[:, 2])
plt.show()
def mca_plot_test():
pd_unique_attributes = pd.read_csv(file_utils.all_data_file, encoding="utf-8")
print(pd_unique_attributes.describe())
print(pd_unique_attributes.dtypes)
accessions = pd_unique_attributes['accession']
del pd_unique_attributes['accession']
mca = prince.MCA(n_components=3).fit(pd_unique_attributes)
mca_2d = mca.transform(pd_unique_attributes)
fig = plt.figure()
ax = fig.add_subplot(221, projection='3d')
ax.scatter(mca_2d[0], mca_2d[1], mca_2d[2])
ax1 = fig.add_subplot(222)
ax1.scatter(mca_2d[0], mca_2d[1])
ax1 = fig.add_subplot(223)
ax1.scatter(mca_2d[0], mca_2d[2])
ax1 = fig.add_subplot(224)
ax1.scatter(mca_2d[1], mca_2d[2])
plt.show()
def extract_plot_test():
pd_unique_attributes = pd.read_csv(file_utils.all_data_file, encoding="utf-8")#.sample(50000)
print(pd_unique_attributes.describe())
print(pd_unique_attributes.dtypes)
accessions = pd_unique_attributes['accession']
del pd_unique_attributes['accession']
# model = KernelPCA(n_components=3, kernel='rbf') # memory error
# model = MDS(n_components=3, n_init=12, max_iter=1200, metric=True, n_jobs=4, random_state=2019) # memory error
# model = Isomap(n_components=3, n_jobs = 4, n_neighbors = 5) # memory error
# model = LocallyLinearEmbedding(n_components=3, n_neighbors = 10,method = 'modified', n_jobs = 4, random_state=2019) # memory error
# model = LinearDiscriminantAnalysis(n_components=3) # need target labels
model = PCA(n_components=3)
# model = NMF(n_components=3, init='random', random_state=0)
# model = IncrementalPCA(n_components=3)
# model = SparsePCA(n_components=3, alpha=0.0001, random_state=2019, n_jobs=-1)
# model = TruncatedSVD(n_components=3,algorithm='randomized', random_state=2019, n_iter=5)
# model = GaussianRandomProjection(n_components=3,eps = 0.5, random_state=2019)
# model = SparseRandomProjection(n_components=3,density = 'auto', eps = 0.5, random_state=2019, dense_output = False)
# model = MiniBatchDictionaryLearning(n_components=3,batch_size = 200,alpha = 1,n_iter = 25, random_state=2019)
# model = FastICA(n_components=154, algorithm = 'parallel',whiten = True,max_iter = 100, random_state=2019)
# model = TSNE(n_components=3,learning_rate=300,perplexity = 30,early_exaggeration = 12,init = 'random', random_state=2019) # expensive, didnt give good results
transformed_data = model.fit_transform(pd_unique_attributes)
fig = plt.figure()
fig.add_subplot(221, projection='3d').scatter(transformed_data[:, 0], transformed_data[:, 1], transformed_data[:, 2])
fig.add_subplot(222).scatter(transformed_data[:, 0], transformed_data[:, 1])
fig.add_subplot(223).scatter(transformed_data[:, 0], transformed_data[:, 2])
fig.add_subplot(224).scatter(transformed_data[:, 1], transformed_data[:, 2])
plt.show()
def extract_cluster_plot():
# pd_unique_attributes = pd.read_csv(file_utils.all_data_file, encoding="utf-8").sample(500000)
pd_unique_attributes = pd.read_csv(file_utils.all_data_file, encoding="utf-8")
print(pd_unique_attributes.describe())
print(pd_unique_attributes.dtypes)
accessions = pd_unique_attributes['accession']
del pd_unique_attributes['accession']
model = PCA(n_components=5)
transformed_data = model.fit_transform(pd_unique_attributes)
# cluster_model = SpectralClustering(n_clusters=10) # memory error
# cluster_model = AffinityPropagation() # memory error
# cluster_model = FeatureAgglomeration(n_clusters=5) # doesn't return labels, but clusters well
# cluster_model = DBSCAN(eps=0.3, min_samples=1000, metric='euclidean')
cluster_model = KMeans(n_clusters=20)
# cluster_model = Birch(n_clusters=10)
# cluster_model = MeanShift() # very good 4 clusters, though expensive
cluster_model.fit(transformed_data)
labels = cluster_model.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print(labels)
print("no of clusters: %d" % n_clusters_)
print("noise points: %d" % n_noise_)
model = PCA(n_components=3)
transformed_data = model.fit_transform(pd_unique_attributes)
# save labeled data
pd_unique_attributes['accession'] = accessions
pd_unique_attributes['cluster_labels'] = labels
pd_unique_attributes.sort_values("cluster_labels", inplace=True)
pd_unique_attributes.to_csv(file_utils.clustered_samples_file, index=False, encoding=file_utils.encoding)
# fig = plt.figure()
# fig.add_subplot(221, projection='3d').scatter(transformed_data[:, 0], transformed_data[:, 1], transformed_data[:, 2], c=(labels + 2) / 20)
# # fig.add_subplot(222, projection='3d').scatter(transformed_data[:, 0], transformed_data[:, 1], transformed_data[:, 3], c=(labels + 2) / 20)
# # fig.add_subplot(223, projection='3d').scatter(transformed_data[:, 0], transformed_data[:, 1], transformed_data[:, 4], c=(labels + 2) / 20)
# # fig.add_subplot(224, projection='3d').scatter(transformed_data[:, 2], transformed_data[:, 3], transformed_data[:, 4], c=(labels + 2) / 20)
# fig.add_subplot(222).scatter(transformed_data[:, 0], transformed_data[:, 1], c=(labels + 1) / 20)
# fig.add_subplot(223).scatter(transformed_data[:, 0], transformed_data[:, 2], c=(labels + 1) / 20)
# fig.add_subplot(224).scatter(transformed_data[:, 1], transformed_data[:, 2], c=(labels + 1) / 20)
# plt.show()
transformed_data_df = | pd.DataFrame(data=transformed_data, columns=["feature_1", "feature_2", "feature_3"]) | pandas.DataFrame |
import datetime
import os
from concurrent.futures import ProcessPoolExecutor
from math import ceil
import pandas as pd
# In[] 读入源数据
def get_source_data():
# 源数据路径
DataPath = 'data/'
# 读入源数据
off_train = pd.read_csv(os.path.join(DataPath, 'ccf_offline_stage1_train.csv'),
parse_dates=['Date_received', 'Date'])
off_train.columns = ['User_id', 'Merchant_id', 'Coupon_id', 'Discount_rate', 'Distance', 'Date_received', 'Date']
on_train = pd.read_csv(os.path.join(DataPath, 'ccf_online_stage1_train.csv'), parse_dates=['Date_received', 'Date'])
on_train.columns = ['User_id', 'Merchant_id', 'Action', 'Coupon_id', 'Discount_rate', 'Date_received', 'Date']
off_test = pd.read_csv(os.path.join(DataPath, 'ccf_offline_stage1_test_revised.csv'), parse_dates=['Date_received'])
off_test.columns = ['User_id', 'Merchant_id', 'Coupon_id', 'Discount_rate', 'Distance', 'Date_received']
print(off_train.info())
print(off_train.head(5))
return off_train, on_train, off_test
# In[] null,na 特殊处理
def null_process_offline(dataset, predict=False):
dataset.Distance.fillna(11, inplace=True)
dataset.Distance = dataset.Distance.astype(int)
dataset.Coupon_id.fillna(0, inplace=True)
dataset.Coupon_id = dataset.Coupon_id.astype(int)
dataset.Date_received.fillna(date_null, inplace=True)
dataset[['discount_rate_x', 'discount_rate_y']] = dataset[dataset.Discount_rate.str.contains(':') == True][
'Discount_rate'].str.split(':', expand=True).astype(int)
dataset['discount_rate'] = 1 - dataset.discount_rate_y / dataset.discount_rate_x
dataset.discount_rate = dataset.discount_rate.fillna(dataset.Discount_rate).astype(float)
if predict:
return dataset
else:
dataset.Date.fillna(date_null, inplace=True)
return dataset
def null_process_online(dataset):
dataset.Coupon_id.fillna(0, inplace=True)
# online.Coupon_id = online.Coupon_id.astype(int)
dataset.Date_received.fillna(date_null, inplace=True)
dataset.Date.fillna(date_null, inplace=True)
return dataset
# In[] 生成交叉训练集
def data_process(off_train, on_train, off_test):
# train feature split
# 交叉训练集一:收到券的日期大于4月14日和小于5月14日
time_range = ['2016-04-16', '2016-05-15']
dataset1 = off_train[(off_train.Date_received >= time_range[0]) & (off_train.Date_received <= time_range[1])].copy()
dataset1['label'] = 0
dataset1.loc[
(dataset1.Date != date_null) & (dataset1.Date - dataset1.Date_received <= datetime.timedelta(15)), 'label'] = 1
# 交叉训练集一特征offline:线下数据中领券和用券日期大于1月1日和小于4月13日
time_range_date_received = ['2016-01-01', '2016-03-31']
time_range_date = ['2016-01-01', '2016-04-15']
feature1_off = off_train[(off_train.Date >= time_range_date[0]) & (off_train.Date <= time_range_date[1]) | (
(off_train.Coupon_id == 0) & (off_train.Date_received >= time_range_date_received[0]) & (
off_train.Date_received <= time_range_date_received[1]))]
# 交叉训练集一特征online:线上数据中领券和用券日期大于1月1日和小于4月13日[on_train.date == 'null' to on_train.coupon_id == 0]
feature1_on = on_train[(on_train.Date >= time_range_date[0]) & (on_train.Date <= time_range_date[1]) | (
(on_train.Coupon_id == 0) & (on_train.Date_received >= time_range_date_received[0]) & (
on_train.Date_received <= time_range_date_received[1]))]
# 交叉训练集二:收到券的日期大于5月15日和小于6月15日
time_range = ['2016-05-16', '2016-06-15']
dataset2 = off_train[(off_train.Date_received >= time_range[0]) & (off_train.Date_received <= time_range[1])]
dataset2['label'] = 0
dataset2.loc[
(dataset2.Date != date_null) & (dataset2.Date - dataset2.Date_received <= datetime.timedelta(15)), 'label'] = 1
# 交叉训练集二特征offline:线下数据中领券和用券日期大于2月1日和小于5月14日
time_range_date_received = ['2016-02-01', '2016-04-30']
time_range_date = ['2016-02-01', '2016-05-15']
feature2_off = off_train[(off_train.Date >= time_range_date[0]) & (off_train.Date <= time_range_date[1]) | (
(off_train.Coupon_id == 0) & (off_train.Date_received >= time_range_date_received[0]) & (
off_train.Date_received <= time_range_date_received[1]))]
# 交叉训练集二特征online:线上数据中领券和用券日期大于2月1日和小于5月14日
feature2_on = on_train[(on_train.Date >= time_range_date[0]) & (on_train.Date <= time_range_date[1]) | (
(on_train.Coupon_id == 0) & (on_train.Date_received >= time_range_date_received[0]) & (
on_train.Date_received <= time_range_date_received[1]))]
# 测试集
dataset3 = off_test
# 测试集特征offline :线下数据中领券和用券日期大于3月15日和小于6月30日的
time_range = ['2016-03-16', '2016-06-30']
feature3_off = off_train[((off_train.Date >= time_range[0]) & (off_train.Date <= time_range[1])) | (
(off_train.Coupon_id == 0) & (off_train.Date_received >= time_range[0]) & (
off_train.Date_received <= time_range[1]))]
# 测试集特征online :线上数据中领券和用券日期大于3月15日和小于6月30日的
feature3_on = on_train[((on_train.Date >= time_range[0]) & (on_train.Date <= time_range[1])) | (
(on_train.Coupon_id == 0) & (on_train.Date_received >= time_range[0]) & (
on_train.Date_received <= time_range[1]))]
# get train feature
ProcessDataSet1 = get_features(dataset1, feature1_off, feature1_on)
ProcessDataSet2 = get_features(dataset2, feature2_off, feature2_on)
ProcessDataSet3 = get_features(dataset3, feature3_off, feature3_on)
return ProcessDataSet1, ProcessDataSet2, ProcessDataSet3
def get_features(dataset, feature_off, feature_on):
dataset = get_offline_features(dataset, feature_off)
return get_online_features(feature_on, dataset)
# In[] 定义获取feature的函数
def get_offline_features(X, offline):
# X = X[:1000]
print(len(X), len(X.columns))
temp = offline[offline.Coupon_id != 0]
coupon_consume = temp[temp.Date != date_null]
coupon_no_consume = temp[temp.Date == date_null]
user_coupon_consume = coupon_consume.groupby('User_id')
X['weekday'] = X.Date_received.dt.weekday
X['day'] = X.Date_received.dt.day
# # 距离优惠券消费次数
# temp = coupon_consume.groupby('Distance').size().reset_index(name='distance_0')
# X = pd.merge(X, temp, how='left', on='Distance')
#
# # 距离优惠券不消费次数
# temp = coupon_no_consume.groupby('Distance').size().reset_index(name='distance_1')
# X = pd.merge(X, temp, how='left', on='Distance')
#
# # 距离优惠券领取次数
# X['distance_2'] = X.distance_0 + X.distance_1
#
# # 距离优惠券消费率
# X['distance_3'] = X.distance_0 / X.distance_2
# temp = coupon_consume[coupon_consume.Distance != 11].groupby('Distance').size()
# temp['d4'] = temp.Distance.sum() / len(temp)
# X = pd.merge(X, temp, how='left', on='Distance')
'''user features'''
# 优惠券消费次数
temp = user_coupon_consume.size().reset_index(name='u2')
X = pd.merge(X, temp, how='left', on='User_id')
# X.u2.fillna(0, inplace=True)
# X.u2 = X.u2.astype(int)
# 优惠券不消费次数
temp = coupon_no_consume.groupby('User_id').size().reset_index(name='u3')
X = pd.merge(X, temp, how='left', on='User_id')
# 使用优惠券次数与没使用优惠券次数比值
X['u19'] = X.u2 / X.u3
# 领取优惠券次数
X['u1'] = X.u2.fillna(0) + X.u3.fillna(0)
# 优惠券核销率
X['u4'] = X.u2 / X.u1
# 普通消费次数
temp = offline[(offline.Coupon_id == 0) & (offline.Date != date_null)]
temp1 = temp.groupby('User_id').size().reset_index(name='u5')
X = pd.merge(X, temp1, how='left', on='User_id')
# 一共消费多少次
X['u25'] = X.u2 + X.u5
# 用户使用优惠券消费占比
X['u20'] = X.u2 / X.u25
# 正常消费平均间隔
temp = pd.merge(temp, temp.groupby('User_id').Date.max().reset_index(name='max'))
temp = pd.merge(temp, temp.groupby('User_id').Date.min().reset_index(name='min'))
temp = pd.merge(temp, temp.groupby('User_id').size().reset_index(name='len'))
temp['u6'] = ((temp['max'] - temp['min']).dt.days / (temp['len'] - 1))
temp = temp.drop_duplicates('User_id')
X = pd.merge(X, temp[['User_id', 'u6']], how='left', on='User_id')
# 优惠券消费平均间隔
temp = pd.merge(coupon_consume, user_coupon_consume.Date.max().reset_index(name='max'))
temp = pd.merge(temp, temp.groupby('User_id').Date.min().reset_index(name='min'))
temp = pd.merge(temp, temp.groupby('User_id').size().reset_index(name='len'))
temp['u7'] = ((temp['max'] - temp['min']).dt.days / (temp['len'] - 1))
temp = temp.drop_duplicates('User_id')
X = pd.merge(X, temp[['User_id', 'u7']], how='left', on='User_id')
# 15天内平均会普通消费几次
X['u8'] = X.u6 / 15
# 15天内平均会优惠券消费几次
X['u9'] = X.u7 / 15
# 领取优惠券到使用优惠券的平均间隔时间
temp = coupon_consume.copy()
temp['days'] = (temp.Date - temp.Date_received).dt.days
temp = (temp.groupby('User_id').days.sum() / temp.groupby('User_id').size()).reset_index(name='u10')
X = pd.merge(X, temp, how='left', on='User_id')
# 在15天内使用掉优惠券的值大小
X['u11'] = X.u10 / 15
# 领取优惠券到使用优惠券间隔小于15天的次数
temp = coupon_consume.copy()
temp['days'] = (temp.Date - temp.Date_received).dt.days
temp = temp[temp.days <= 15]
temp = temp.groupby('User_id').size().reset_index(name='u21')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户15天使用掉优惠券的次数除以使用优惠券的次数
X['u22'] = X.u21 / X.u2
# 用户15天使用掉优惠券的次数除以领取优惠券未消费的次数
X['u23'] = X.u21 / X.u3
# 用户15天使用掉优惠券的次数除以领取优惠券的总次数
X['u24'] = X.u21 / X.u1
# 消费优惠券的平均折率
temp = user_coupon_consume.discount_rate.mean().reset_index(name='u45')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券的最低消费折率
temp = user_coupon_consume.discount_rate.min().reset_index(name='u27')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券的最高消费折率
temp = user_coupon_consume.discount_rate.max().reset_index(name='u28')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销过的不同优惠券数量
temp = coupon_consume.groupby(['User_id', 'Coupon_id']).size()
temp = temp.groupby('User_id').size().reset_index(name='u32')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户领取所有不同优惠券数量
temp = offline[offline.Date_received != date_null]
temp = temp.groupby(['User_id', 'Coupon_id']).size().reset_index(name='u47')
X = pd.merge(X, temp, how='left', on=['User_id', 'Coupon_id'])
# 用户核销过的不同优惠券数量占所有不同优惠券的比重
X['u33'] = X.u32 / X.u47
# 用户平均每种优惠券核销多少张
X['u34'] = X.u2 / X.u47
# 核销优惠券用户-商家平均距离
temp = offline[(offline.Coupon_id != 0) & (offline.Date != date_null) & (offline.Distance != 11)]
temp = temp.groupby('User_id').Distance
temp = pd.merge(temp.count().reset_index(name='x'), temp.sum().reset_index(name='y'), on='User_id')
temp['u35'] = temp.y / temp.x
temp = temp[['User_id', 'u35']]
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券中的最小用户-商家距离
temp = coupon_consume[coupon_consume.Distance != 11]
temp = temp.groupby('User_id').Distance.min().reset_index(name='u36')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券中的最大用户-商家距离
temp = coupon_consume[coupon_consume.Distance != 11]
temp = temp.groupby('User_id').Distance.max().reset_index(name='u37')
X = pd.merge(X, temp, how='left', on='User_id')
# 优惠券类型
discount_types = [
'0.2', '0.5', '0.6', '0.7', '0.75', '0.8', '0.85', '0.9', '0.95', '30:20', '50:30', '10:5',
'20:10', '100:50', '200:100', '50:20', '30:10', '150:50', '100:30', '20:5', '200:50', '5:1',
'50:10', '100:20', '150:30', '30:5', '300:50', '200:30', '150:20', '10:1', '50:5', '100:10',
'200:20', '300:30', '150:10', '300:20', '500:30', '20:1', '100:5', '200:10', '30:1', '150:5',
'300:10', '200:5', '50:1', '100:1',
]
X['discount_type'] = -1
for k, v in enumerate(discount_types):
X.loc[X.Discount_rate == v, 'discount_type'] = k
# 不同优惠券领取次数
temp = offline.groupby(['User_id', 'Discount_rate']).size().reset_index(name='u41')
X = pd.merge(X, temp, how='left', on=['User_id', 'Discount_rate'])
# 不同优惠券使用次数
temp = coupon_consume.groupby(['User_id', 'Discount_rate']).size().reset_index(name='u42')
X = pd.merge(X, temp, how='left', on=['User_id', 'Discount_rate'])
# 不同优惠券不使用次数
temp = coupon_no_consume.groupby(['User_id', 'Discount_rate']).size().reset_index(name='u43')
X = pd.merge(X, temp, how='left', on=['User_id', 'Discount_rate'])
# 不同打折优惠券使用率
X['u44'] = X.u42 / X.u41
# 满减类型优惠券领取次数
temp = offline[offline.Discount_rate.str.contains(':') == True]
temp = temp.groupby('User_id').size().reset_index(name='u48')
X = pd.merge(X, temp, how='left', on='User_id')
# 打折类型优惠券领取次数
temp = offline[offline.Discount_rate.str.contains('\.') == True]
temp = temp.groupby('User_id').size().reset_index(name='u49')
X = pd.merge(X, temp, how='left', on='User_id')
'''offline merchant features'''
# 商户消费次数
temp = offline[offline.Date != date_null].groupby('Merchant_id').size().reset_index(name='m0')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家优惠券被领取后核销次数
temp = coupon_consume.groupby('Merchant_id').size().reset_index(name='m1')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商户正常消费笔数
X['m2'] = X.m0.fillna(0) - X.m1.fillna(0)
# 商家优惠券被领取次数
temp = offline[offline.Date_received != date_null].groupby('Merchant_id').size().reset_index(name='m3')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家优惠券被领取后核销率
X['m4'] = X.m1 / X.m3
# 商家优惠券被领取后不核销次数
temp = coupon_no_consume.groupby('Merchant_id').size().reset_index(name='m7')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商户当天优惠券领取次数
temp = X[X.Date_received != date_null]
temp = temp.groupby(['Merchant_id', 'Date_received']).size().reset_index(name='m5')
X = pd.merge(X, temp, how='left', on=['Merchant_id', 'Date_received'])
# 商户当天优惠券领取人数
temp = X[X.Date_received != date_null]
temp = temp.groupby(['User_id', 'Merchant_id', 'Date_received']).size().reset_index()
temp = temp.groupby(['Merchant_id', 'Date_received']).size().reset_index(name='m6')
X = pd.merge(X, temp, how='left', on=['Merchant_id', 'Date_received'])
# 商家优惠券核销的平均消费折率
temp = coupon_consume.groupby('Merchant_id').discount_rate.mean().reset_index(name='m8')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家优惠券核销的最小消费折率
temp = coupon_consume.groupby('Merchant_id').discount_rate.max().reset_index(name='m9')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家优惠券核销的最大消费折率
temp = coupon_consume.groupby('Merchant_id').discount_rate.min().reset_index(name='m10')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家优惠券核销不同的用户数量
temp = coupon_consume.groupby(['Merchant_id', 'User_id']).size()
temp = temp.groupby('Merchant_id').size().reset_index(name='m11')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家优惠券领取不同的用户数量
temp = offline[offline.Date_received != date_null].groupby(['Merchant_id', 'User_id']).size()
temp = temp.groupby('Merchant_id').size().reset_index(name='m12')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 核销商家优惠券的不同用户数量其占领取不同的用户比重
X['m13'] = X.m11 / X.m12
# 商家优惠券平均每个用户核销多少张
X['m14'] = X.m1 / X.m12
# 商家被核销过的不同优惠券数量
temp = coupon_consume.groupby(['Merchant_id', 'Coupon_id']).size()
temp = temp.groupby('Merchant_id').size().reset_index(name='m15')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家领取过的不同优惠券数量的比重
temp = offline[offline.Date_received != date_null].groupby(['Merchant_id', 'Coupon_id']).size()
temp = temp.groupby('Merchant_id').count().reset_index(name='m18')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家被核销过的不同优惠券数量占所有领取过的不同优惠券数量的比重
X['m19'] = X.m15 / X.m18
# 商家被核销优惠券的平均时间
temp = pd.merge(coupon_consume, coupon_consume.groupby('Merchant_id').Date.max().reset_index(name='max'))
temp = pd.merge(temp, temp.groupby('Merchant_id').Date.min().reset_index(name='min'))
temp = pd.merge(temp, temp.groupby('Merchant_id').size().reset_index(name='len'))
temp['m20'] = ((temp['max'] - temp['min']).dt.days / (temp['len'] - 1))
temp = temp.drop_duplicates('Merchant_id')
X = pd.merge(X, temp[['Merchant_id', 'm20']], how='left', on='Merchant_id')
# 商家被核销优惠券中的用户-商家平均距离
temp = coupon_consume[coupon_consume.Distance != 11].groupby('Merchant_id').Distance
temp = pd.merge(temp.count().reset_index(name='x'), temp.sum().reset_index(name='y'), on='Merchant_id')
temp['m21'] = temp.y / temp.x
temp = temp[['Merchant_id', 'm21']]
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家被核销优惠券中的用户-商家最小距离
temp = coupon_consume[coupon_consume.Distance != 11]
temp = temp.groupby('Merchant_id').Distance.min().reset_index(name='m22')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家被核销优惠券中的用户-商家最大距离
temp = coupon_consume[coupon_consume.Distance != 11]
temp = temp.groupby('Merchant_id').Distance.max().reset_index(name='m23')
X = | pd.merge(X, temp, how='left', on='Merchant_id') | pandas.merge |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import dataclasses
from dataclasses import dataclass
import json
from pathlib import Path
import numpy as np
import pandas as pd
from axcell.models.structure.nbsvm import *
from sklearn.metrics import confusion_matrix
from matplotlib import pyplot as plt
import seaborn as sn
from enum import Enum
import pickle
class Labels(Enum):
OTHER=0
DATASET=1
PAPER_MODEL=2
COMPETING_MODEL=3
METRIC=4
EMPTY=5
class LabelsExt(Enum):
OTHER=0
PARAMS=6
TASK=7
DATASET=1
SUBDATASET=8
PAPER_MODEL=2
BEST_MODEL=9
ENSEMBLE_MODEL=10
COMPETING_MODEL=3
METRIC=4
EMPTY=5
label_map = {
"dataset": Labels.DATASET.value,
"dataset-sub": Labels.DATASET.value,
"model-paper": Labels.PAPER_MODEL.value,
"model-best": Labels.PAPER_MODEL.value,
"model-ensemble": Labels.PAPER_MODEL.value,
"model-competing": Labels.COMPETING_MODEL.value,
"dataset-metric": Labels.METRIC.value
}
label_map_ext = {
"dataset": LabelsExt.DATASET.value,
"dataset-sub": LabelsExt.SUBDATASET.value,
"model-paper": LabelsExt.PAPER_MODEL.value,
"model-best": LabelsExt.BEST_MODEL.value,
"model-ensemble": LabelsExt.ENSEMBLE_MODEL.value,
"model-competing": LabelsExt.COMPETING_MODEL.value,
"dataset-metric": LabelsExt.METRIC.value,
"model-params": LabelsExt.PARAMS.value,
"dataset-task": LabelsExt.TASK.value
}
# put here to avoid recompiling, used only in _limit_context
elastic_tag_split_re = re.compile("(<b>.*?</b>)")
# e = Experiment(remove_num=False, drop_duplicates=False, vectorizer='count',
# this_paper=True, merge_fragments=True, merge_type='concat',
# evidence_source='text_highlited', split_btags=True, fixed_tokenizer=True,
# fixed_this_paper=True, mask=False, evidence_limit=None, context_tokens=None,
# analyzer='word', lowercase=True, class_weight='balanced', multinomial_type='multinomial',
# solver='lbfgs', C=0.1, dual=False, penalty='l2', ngram_range=[1, 3],
# min_df=10, max_df=0.9, max_iter=1000, results={}, has_model=False)
# ULMFiT related parameters
# remove_num, drop_duplicates, this_paper, merge_fragments, merge_type, evidence_source, split_btags
# fixed_tokenizer?, fixed_this_paper (remove), mask, evidence_limit, context_tokens, lowercase
# class_weight? (consider adding support),
@dataclass
class Experiment:
this_paper: bool = False
merge_fragments: bool = False
merge_type: str = "concat" # "concat", "vote_maj", "vote_avg", "vote_max"
evidence_source: str = "text" # "text" or "text_highlited"
split_btags: bool = False # <b>Test</b> -> <b> Test </b>
fixed_tokenizer: bool = False # if True, <b> and </b> are not split into < b > and < / b >
fixed_this_paper: bool = False # if True and this_paper, filter this_paper before merging fragments
mask: bool = False # if True and evidence_source = "text_highlited", replace <b>...</b> with xxmask
evidence_limit: int = None # maximum number of evidences per cell (grouped by (ext_id, this_paper))
context_tokens: int = None # max. number of words before <b> and after </b>
lowercase: bool = True
remove_num: bool = True
drop_duplicates: bool = True
mark_this_paper: bool = False
distinguish_model_source: bool = True
results: dict = dataclasses.field(default_factory=dict)
has_model: bool = False # either there's already pretrained model or it's a saved experiment and there's a saved model as well
name: str = None
def _get_next_exp_name(self, dir_path):
dir_path = Path(dir_path)
files = [f.name for f in dir_path.glob("*.exp.json")]
for i in range(100000):
name = f"{i:05d}.exp.json"
if name not in files:
return dir_path / name
raise Exception("You have too many files in this dir, really!")
@staticmethod
def _dump_pickle(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f)
@staticmethod
def _load_pickle(path):
with open(path, 'rb') as f:
return pickle.load(f)
def _save_model(self, path):
self._dump_pickle(self._model, path)
def _load_model(self, path):
self._model = self._load_pickle(path)
return self._model
def load_model(self):
path = self._path.parent / f"{self._path.stem}.model"
return self._load_model(path)
def save_model(self, path):
if hasattr(self, "_model"):
self._save_model(path)
def save(self, dir_path):
dir_path = Path(dir_path)
dir_path.mkdir(exist_ok=True, parents=True)
filename = self._get_next_exp_name(dir_path)
j = dataclasses.asdict(self)
with open(filename, "wt") as f:
json.dump(j, f)
self.save_model(dir_path / f"{filename.stem}.model")
return filename.name
def to_df(self):
d = dataclasses.asdict(self)
res = d.pop("results")
d.update(res)
row = pd.DataFrame({k: [v] for k, v in d.items()})
return row
def new_experiment(self, **kwargs):
# reset this fields unless their provided in load()
kwargs.setdefault("has_model", False)
kwargs.setdefault("results", {})
return dataclasses.replace(self, **kwargs)
def update_results(self, **kwargs):
self.results.update(**kwargs)
def train_model(self, train_df, valid_df):
raise NotImplementedError("train_model should be implemented in subclass")
def get_trained_model(self, train_df, valid_df):
self._model = self.train_model(train_df, valid_df)
self.has_model = True
return self._model
def _limit_context(self, text):
parts = elastic_tag_split_re.split(text)
new_parts = []
end = len(parts)
for i, part in enumerate(parts):
if i % 2 == 0:
toks = tokenize(part)
if i == 0:
toks = toks[-self.context_tokens:]
elif i == end:
toks = toks[:self.context_tokens]
else:
j = len(toks) - 2 * self.context_tokens
if j > 0:
toks = toks[:self.context_tokens] + toks[-self.context_tokens:]
new_parts.append(' '.join(toks))
else:
new_parts.append(part)
return ' '.join(new_parts)
def _transform_df(self, df):
df.cell_reference = (df.cell_reference != '').astype(str)
df.cell_styles = df.cell_styles.astype(str)
if self.merge_type not in ["concat", "vote_maj", "vote_avg", "vote_max"]:
raise Exception(f"merge_type must be one of concat, vote_maj, vote_avg, vote_max, but {self.merge_type} was given")
if self.mark_this_paper and (self.merge_type != "concat" or self.this_paper):
raise Exception("merge_type must be 'concat' and this_paper must be false")
#df = df[df["cell_type"] != "table-meta"] # otherwise we get precision 0 on test set
if self.evidence_limit is not None:
df = df.groupby(by=["ext_id", "this_paper"]).head(self.evidence_limit)
if self.context_tokens is not None:
df.loc["text_highlited"] = df["text_highlited"].apply(self._limit_context)
df.loc["text"] = df["text_highlited"].str.replace("<b>", " ").replace("</b>", " ")
if self.evidence_source != "text":
df = df.copy(True)
if self.mask:
df["text"] = df[self.evidence_source].replace(re.compile("<b>.*?</b>"), " xxmask ")
else:
df["text"] = df[self.evidence_source]
elif self.mask:
raise Exception("Masking with evidence_source='text' makes no sense")
duplicates_columns = ["text", "cell_content", "cell_type", "row_context", "col_context", "cell_reference", "cell_layout", "cell_styles"]
columns_to_keep = ["ext_id", "cell_content", "cell_type", "row_context", "col_context", "cell_reference", "cell_layout", "cell_styles"]
if self.mark_this_paper:
df = df.groupby(by=columns_to_keep + ["this_paper"]).text.apply(
lambda x: "\n".join(x.values)).reset_index()
this_paper_map = {
True: "this paper",
False: "other paper"
}
df.text = "xxfld 3 " + df.this_paper.apply(this_paper_map.get) + " " + df.text
df = df.groupby(by=columns_to_keep).text.apply(
lambda x: " ".join(x.values)).reset_index()
elif not self.fixed_this_paper:
if self.merge_fragments and self.merge_type == "concat":
df = df.groupby(by=columns_to_keep + ["this_paper"]).text.apply(
lambda x: "\n".join(x.values)).reset_index()
if self.drop_duplicates:
df = df.drop_duplicates(duplicates_columns).fillna("")
if self.this_paper:
df = df[df.this_paper]
else:
if self.this_paper:
df = df[df.this_paper]
if self.merge_fragments and self.merge_type == "concat":
df = df.groupby(by=columns_to_keep).text.apply(
lambda x: "\n".join(x.values)).reset_index()
if self.drop_duplicates:
df = df.drop_duplicates(duplicates_columns).fillna("")
if self.split_btags:
df["text"] = df["text"].replace(re.compile(r"(\</?b\>)"), r" \1 ")
df = df.replace(re.compile(r"(xxref|xxanchor)-[\w\d-]*"), "\\1 ")
if self.remove_num:
df = df.replace(re.compile(r"(^|[ ])\d+\.\d+(\b|%)"), " xxnum ")
df = df.replace(re.compile(r"(^|[ ])\d+(\b|%)"), " xxnum ")
df = df.replace(re.compile(r"\bdata set\b"), " dataset ")
df["label"] = df["cell_type"].apply(lambda x: label_map.get(x, 0))
if not self.distinguish_model_source:
df["label"] = df["label"].apply(lambda x: x if x != Labels.COMPETING_MODEL.value else Labels.PAPER_MODEL.value)
df["label"] = | pd.Categorical(df["label"]) | pandas.Categorical |
"""
The grapevine variant pipeline outputs nucleotide mutations (SNPs, indels)
and amino acid substitutions (synonymous and nonsynonymous) in separate files.
However, they do not link the nucleotide mutations, or provide
amino acid indels, so we have to do that ourselves here.
Writes out nucleotide to amino acid links for SNPs that can be used to upload to mutation database.
Writes out nucleotide indels, but leaves empty amino acid links for upload to mutation database.
TODO: translate nucleotide indels to amino acid indels.
"""
import pandas as pd
import numpy as np
import Bio.SeqIO as SeqIO
from Bio.Seq import Seq
import argparse
import csv
from datetime import datetime
import subprocess
def get_aa_at_gene_pos(row, ref_aa_seq_dict):
"""
Helper function to apply on every row of a data frame
to find the reference amino acid at given a gene and amino acid position.
Parameters:
==============
- row: dict
Row of dataframe in dict form. Should have fields aa_pos (1-based aa position in gene), gene (gene name)
- ref_aa_seq_dict: SeqIO dict
SeqIO Dict containing reference amino acid sequence.
Should have format {gene_name: SeqRecord of gene amino acid sequence}
Returns:
==============
- aa: str
reference amino acid at gene and amino acid position specified in the row
"""
if not row["gene"] or row["gene"] not in ref_aa_seq_dict:
aa = ""
else:
aa_pos_1based = row["aa_pos"]
aa_pos_0based = aa_pos_1based - 1
# NB: the stop codon is never represented in the AA sequences, so they are 1AA shorter than they should be
if aa_pos_0based == len(ref_aa_seq_dict[row["gene"]].seq):
aa = "*"
else:
aa = ref_aa_seq_dict[row["gene"]].seq[aa_pos_0based]
return aa
def get_ref_at_nuc_pos(row, ref_nuc_seq_dict):
"""
Helper function to apply on every row of a data frame
to find the reference nucleotide at given a genomic nucleotide position.
Parameters:
==============
- row: dict
Row of dataframe in dict form. Should have fields pos (1-based nucleotide position)
- ref_nuc_seq_dict: SeqIO dict
SeqIO Dict containing reference genomic sequence.
Should have format {"MN908947.3": SeqRecord of genome nucleotide sequence}
Returns:
==============
- nuc: str
reference nucleotide at genomic nucleotide position specified in the row
"""
nuc_pos_1based = row["pos"]
nuc_pos_0based = nuc_pos_1based - 1
nuc = str(ref_nuc_seq_dict["MN908947.3"].seq[nuc_pos_0based:nuc_pos_0based + 1])
return nuc
def get_ref_multilen_at_nuc_pos(row, ref_nuc_seq_dict):
"""
Helper function to apply on every row of a data frame
to find the reference starting at given a genomic nucleotide position
and ending at a given length.
Parameters:
==============
- row: dict
Row of dataframe in dict form. Should have fields pos (1-based nucleotide position),
length (length of reference to extract in bp)
- ref_nuc_seq_dict: SeqIO dict
SeqIO Dict containing reference genomic sequence.
Should have format {"MN908947.3": SeqRecord of genome nucleotide sequence}
Returns:
==============
- nuc: str
reference nucleotide at genomic nucleotide position specified in the row
"""
nuc_pos_1based = row["pos"]
nuc_pos_0based = nuc_pos_1based - 1
nuc = str(ref_nuc_seq_dict["MN908947.3"].seq[nuc_pos_0based: nuc_pos_0based + row["length"]])
return nuc
def convert_nuc_pos_to_aa_pos(gene_df, nuc_mut_df):
"""
Helper function
to convert a nucleotide position to an amino acid position within a gene.
Parameters:
==============
- gene_df: pandas.DataFrame
dataframe specifying gene coordinates. Should have columns:
- start: nucleotide start position of gene (CDS) coding sequence with respect to genome, 1 based
- end: nucleotide end position of gene (CDS) coding sequence with respect to genome, 1 based
- gene: gene name
- cds_num: position of the (CDS) coding sequence within the gene, 0-based.
A gene can have multiple coding sequences, and they can overlap each other, for
example if there is programmed ribosomal slippage that causes translation to frameshift backwards/forwards.
- nuc_mut_df: pandas.DataFrame
Dataframe specifying nucleotide mutations. Each row represents a SNP.
Can have more columns than required columns.
Required columns:
- nuc_pos: 1-based nucleotide position of mutation
Returns:
==============
- nuc_mut_df: str
Returns a copy of the input nuc_mut_df with new columns:
- gene: gene name
- cds_num: 0-based index of coding region within gene
- aa_pos: 1-based amino acid position of the nucleotide mutation within the gene
- codon_start_pos: 1-based start position of the codon that covers nuc_pos
- codon_end_pos: 1-based start position of the codon that covers nuc_pos
Adds a row for each possible amino acid position covered by a nucleotide mutation.
For example nucleotide mutations that fall in overlapping regions of multiple coding regions or genes
can map to multiple amino acid positions.
"""
nuc_mut_in_gene_df_list = []
for idx, row in gene_df.iterrows():
nuc_mut_in_range_df = nuc_mut_df.loc[
~nuc_mut_df["nuc_pos"].isna() &
(nuc_mut_df["nuc_pos"] >= row["start"]) &
(nuc_mut_df["nuc_pos"] <= row["end"])].copy()
nuc_mut_in_range_df["gene"] = row["gene"]
nuc_mut_in_range_df["cds_num"] = row["cds_num"]
# Handle discontinuous coding regions within same gene.
# Handle frameshifts, such as in orf1ab in which
# programmed ribosomal slippage causes translation to slip 1 base backwards, then continue.
# AA position is dependent on the total aa length in all previous coding sequences of the gene
prev_cds_rows = gene_df.loc[
(gene_df["gene"] == row["gene"]) &
(gene_df["cds_num"] < row["cds_num"])
]
prev_cds_aa_length = prev_cds_rows["aa_length"].sum()
# 1-based amino acid position with respect to CDS (coding region), not with respect to entire gene
aa_pos_wrt_cds_ser = (np.floor((nuc_mut_in_range_df["nuc_pos"] - row["start"]) /3)) + 1
# 1-based amino acid position with respect to entire gene
nuc_mut_in_range_df["aa_pos"] = aa_pos_wrt_cds_ser + prev_cds_aa_length
# 1-based codon position with respect to entire genome (ie nucleotide coordinates)
nuc_mut_in_range_df["codon_end_pos"] = aa_pos_wrt_cds_ser * 3 + row["start"] - 1
nuc_mut_in_range_df["codon_start_pos"] = aa_pos_wrt_cds_ser * 3 + row["start"] - 3
nuc_mut_in_gene_df_list.append(nuc_mut_in_range_df)
nuc_mut_in_gene_df = pd.concat(nuc_mut_in_gene_df_list)
# type int won't allow NA values, but type Int64 will
nuc_mut_in_gene_df["aa_pos"] = nuc_mut_in_gene_df["aa_pos"].astype(float).astype('Int64')
nuc_mut_in_gene_df["cds_num"] = nuc_mut_in_gene_df["cds_num"].astype(float).astype('Int64')
nuc_mut_in_gene_df["codon_end_pos"] = nuc_mut_in_gene_df["codon_end_pos"].astype(float).astype('Int64')
nuc_mut_in_gene_df["codon_start_pos"] = nuc_mut_in_gene_df["codon_start_pos"].astype(float).astype('Int64')
nuc_mut_out_gene_df = nuc_mut_df[~nuc_mut_df["nuc_pos"].isin(nuc_mut_in_gene_df["nuc_pos"])].copy()
nuc_mut_out_gene_df["gene"] = ""
nuc_mut_out_gene_df["cds_num"] = np.nan
nuc_mut_out_gene_df["aa_pos"] = np.nan
nuc_mut_out_gene_df["codon_end_pos"] = np.nan
nuc_mut_out_gene_df["codon_start_pos"] = np.nan
nuc_mut_out_gene_df["aa_pos"] = nuc_mut_out_gene_df["aa_pos"].astype(float).astype('Int64')
nuc_mut_out_gene_df["cds_num"] = nuc_mut_out_gene_df["cds_num"].astype(float).astype('Int64')
nuc_mut_out_gene_df["codon_end_pos"] = nuc_mut_out_gene_df["codon_end_pos"].astype(float).astype('Int64')
nuc_mut_out_gene_df["codon_start_pos"] = nuc_mut_out_gene_df["codon_start_pos"].astype(float).astype('Int64')
nuc_mut_full_df = pd.concat([nuc_mut_in_gene_df, nuc_mut_out_gene_df])
nuc_mut_full_df = nuc_mut_full_df.sort_values(["nuc_pos", "gene", "aa_pos"], ascending=True)
nuc_mut_full_df["aa_pos"] = nuc_mut_full_df["aa_pos"].astype(float).astype('Int64')
nuc_mut_full_df["cds_num"] = nuc_mut_full_df["cds_num"].astype(float).astype('Int64')
nuc_mut_full_df["codon_end_pos"] = nuc_mut_full_df["codon_end_pos"].astype(float).astype('Int64')
nuc_mut_full_df["codon_start_pos"] = nuc_mut_full_df["codon_start_pos"].astype(float).astype('Int64')
return nuc_mut_full_df
def get_mutated_codon(df, ref_nuc_seq_dict):
"""
Helper function to get the mutated codon and potentially mutated amino acid.
Parameters:
==============
- df: pandas.DataFrame
DataFrame should contain only the rows pertaining to a single codon in a sample,
as identified by columns: seqHash, gene, cds_num, codon_start_pos, codon_end_pos.
Each row represents a SNP.
Dataframe can contain more columns than required columns.
Required columns:
- seqHash
- gene: gene name
- cds_num: 0based index of coding region within gene
- nuc_pos: 1based genome position of nucleotide mutation
- nuc_to: mutated nucleotide at nuc_pos
- codon_start_pos: 1-based start position of the codon that covers nuc_pos
- codon_end_pos: 1-based start position of the codon that covers nuc_pos
- ref_nuc_seq_dict: SeqIO dict
SeqIO Dict containing reference genomic sequence.
Should have format {"MN908947.3": SeqRecord of genome nucleotide sequence}
Returns:
==============
- df: pandas.DataFrame
Modifies the input df inplace and adds new columns:
- codon_to: the mutated codon covering position nuc_pos
- aa_to_translated: the amino acid translation of codon_to
"""
df = df.sort_values(["nuc_pos"], ascending=True)
codon_to = ""
for nuc_pos_1based in range(df.iloc[0]["codon_start_pos"], df.iloc[0]["codon_end_pos"]+1):
nuc_pos_0based = nuc_pos_1based - 1
nuc = str(ref_nuc_seq_dict["MN908947.3"].seq[nuc_pos_0based:nuc_pos_0based + 1])
if nuc_pos_1based in df["nuc_pos"].values:
nuc = df.loc[df["nuc_pos"] == nuc_pos_1based, "nuc_to"].values[0]
codon_to += nuc
df["codon_to"] = codon_to
df["aa_to_translated"] = str(Seq(codon_to).translate())
return df
def convert_nuc_mut_to_aa(nuc_mut_df, ref_nuc_seq_dict):
"""
Helper function to translate mutated codons into amino acids,
and only return the SNP - AA associations that yield synonymous substitutions.
Each row in the input nuc_mut_df represents a SNP and one of its associated amino acid position.
A SNP can be associated with multiple amino acid positions if it occurs in
overlapping genes or overlapping coding regions.
However, that SNP may or may not yield a synonymous substitution at that that
amino acid position.
Parameters:
==============
- nuc_mut_df: pandas.DataFrame
Each row represents a SNP and the associated amino acid position.
Dataframe can contain more columns than required columns. Required columns:
- gene: name of gene that mutation occurs in, or "" if it doesn't occur in a gene
- cds_num: 0based index of coding region within the gene
- nuc_pos: 1based nucleotide position of mutation in the genome
- nuc_to: nucleotide mutation at nuc_pos
- codon_start_pos: 1based genome position of beginning of codon containing the mutation at nuc_pos
- codon_end_pos: 1based genome position of end of codon containing the mutation at nuc_pos
- ref_nuc_seq_dict: SeqIO dict
SeqIO Dict containing reference genomic sequence.
Should have format {"MN908947.3": SeqRecord of genome nucleotide sequence}
Returns:
==============
- valid_syn_df: pandas.DataFrame
Makes a copy of the input dataframe and adds new columns:
- codon_to: the mutated codon covering position nuc_pos
- aa_to: the amino acid translation of codon_to
valid_syn_df will only contain the rows in which the nucleotide position corresponds to a
synonymous mutation
"""
nuc_mut_trans_df = ( nuc_mut_df
.groupby(["seqHash",
"gene", "cds_num", "codon_start_pos", "codon_end_pos"])
.apply(get_mutated_codon, ref_nuc_seq_dict=ref_nuc_seq_dict)
)
valid_syn_df = nuc_mut_trans_df.loc[
(nuc_mut_trans_df["aa_from"] == nuc_mut_trans_df["aa_to_translated"]) &
(nuc_mut_trans_df["aa_to_translated"] != "")
].reset_index(drop=True)
valid_syn_df = valid_syn_df.rename(columns={"aa_to_translated": "aa_to"})
return valid_syn_df
def translate_snps(genes_tsv, ref_nuc_fasta_filename, ref_aa_fasta_filename,
nuc_mut_tsv, aa_mut_tsv,
snp_aa_link_tsv,
gene_overlap_tsv=None):
"""
Links SNPs to known amino acid substitutions from the output of
the grapevine variant pipeline.
The grapevine variant pipeline outputs SNPs and amino acid substitutions (synonymous and nonsynonymous)
in separate files. Although it directly converts SNPs to the amino acid substitutions,
it never writes the linkage down. So we need to calculate it ourselves.
Parameters:
==============
- genes_tsv: str
Path to TSV of gene coordinates.
Should have columns:
- start: nucleotide start position of gene (CDS) coding sequence with respect to genome, 1 based
- end: nucleotide end position of gene (CDS) coding sequence with respect to genome, 1 based
- gene: gene name
- cds_num: position of the (CDS) coding sequence within the gene, 0-based.
A gene can have multiple coding sequences, and they can overlap each other, for
example if there is programmed ribosomal slippage that causes translation to frameshift backwards/forwards.
- ref_nuc_fasta_filename: str
Path to reference nucleotide fasta
- ref_aa_fasta_filename: str
Path to reference amino acid fasta
- nuc_mut_tsv: str
path to TSV of SNPs.
Expects that each SNP is on a separate line.
Columns should be: seqHash, SNP
For SNP, format should be "<nuc from><nuc pos><nuc to>"
- aa_mut_tsv: str
Path to TSV of amino acid substitutions.
Expects that each substitution is on a separate line.
Columns should be: seqHash, aa_mutation.
For aa_mutation:
- Synonymous substitutions will have format: synSNP:<nuc from><nuc pos><nuc to>
- Nonsynonymous substitutions will have format gene:<aa from><aa pos><aa to>
- snp_aa_link_tsv: str
Path to output TSV to write nucleotide to amino acid mutation links.
Will have columns: ["seqHash",
"genome_mutation.genome", "genome_mutation.pos", "genome_mutation.ref", "genome_mutation.alt",
"protein_mutation.gene", "protein_mutation.pos", "protein_mutation.ref", "protein_mutation.alt"]
- gene_overlap_tsv: str
path to input TSV of coordinates of gene overlap regions.
Expects columns to be: start, end, gene_cds
gene_cds column format should be: <gene>_cds<0 based cds number within gene>
Returns:
==============
tuple (link_mut_df, link_mut_ann_df)
- link_mut_df: pandas.DataFrame
Dataframe for the nucleotide to amino acid mutation linkage with the columns:
["seqHash",
"genome_mutation.genome", "genome_mutation.pos", "genome_mutation.ref", "genome_mutation.alt",
"protein_mutation.gene", "protein_mutation.pos", "protein_mutation.ref", "protein_mutation.alt"]
"""
ref_nuc_seq_dict = SeqIO.to_dict(SeqIO.parse(ref_nuc_fasta_filename, "fasta"))
ref_aa_seq_dict = SeqIO.to_dict(SeqIO.parse(ref_aa_fasta_filename, "fasta"))
if gene_overlap_tsv:
known_overlaps_df = pd.read_csv(gene_overlap_tsv, sep="\t", comment='#')
else:
known_overlaps_df = pd.DataFrame(columns=["start", "end", "gene_cds"])
gene_df = pd.read_csv(genes_tsv, sep="\t", comment="#")
gene_df["aa_length"] = (gene_df["end"] - gene_df["start"] + 1) / 3
# Check that distance between end and start is in multiples of 3
# ie check that start and end correspond to codon start and end
assert np.sum((gene_df["end"] - gene_df["start"] + 1) % 3 != 0) == 0
gene_df["aa_length"] = gene_df["aa_length"].astype(int)
# columns: seqHash, aa_mutation
aa_mut_df = pd.read_csv(aa_mut_tsv, sep="\t", comment="#")
# There might be samples with no amino acid mutations.
# We drop any samples with empty amino acid mutations to
# make merging easier
aa_mut_df = aa_mut_df.dropna()
if aa_mut_df.shape[0] < 1:
nonsyn_mut_df = pd.DataFrame(columns=["gene", "cds_num", "aa_mutation", "aa_from", "aa_pos", "aa_to"])
syn_mut_df = pd.DataFrame(columns=["nuc_from", "nuc_pos", "nuc_to",
"gene", "cds_num", "aa_mutation", "aa_from", "aa_pos", "aa_to"])
else:
# Split up the nonsynonymous and synonymous substitutions from the aa_mut_df,
# because we need to treat them differently
nonsyn_mut_df = aa_mut_df[~aa_mut_df["aa_mutation"].str.startswith("synSNP")].copy().reset_index(drop=True)
nonsyn_mut_df[["gene", "aa_from_pos_to"]] = nonsyn_mut_df["aa_mutation"].str.split(":", expand=True)
nonsyn_mut_df[["aa_from", "aa_pos", "aa_to"]] = (nonsyn_mut_df["aa_from_pos_to"]
.str.extract(r"([A-Z\*])([0-9]+)([A-Z\*]*)", expand=True))
# type int won't allow NA values but type Int64 will.
# But oddly, we need to cast to float before we can cast to int64
nonsyn_mut_df["aa_pos"] = nonsyn_mut_df["aa_pos"].astype('float').astype('Int64')
syn_mut_df = aa_mut_df[aa_mut_df["aa_mutation"].str.startswith("synSNP")].copy().reset_index(drop=True)
syn_mut_df[["consequence", "nuc_from_pos_to"]] = syn_mut_df["aa_mutation"].str.split(":", expand=True)
syn_mut_df[["nuc_from", "nuc_pos", "nuc_to"]] = syn_mut_df["nuc_from_pos_to"].str.extract(r"([A-Z])([0-9]+)([A-Z])", expand=True)
syn_mut_df["nuc_pos"] = syn_mut_df["nuc_pos"].astype(float).astype("Int64")
# Has columns: seqHash, aa_mutation, nuc_from, nuc_pos, nuc_to
# Also has throwaway columns: consequence, nuc_from_pos_to.
# Append columns: gene, cds_num, aa_pos, codon_start_pos, codon_end_pos
# Each row represents a SNP that we know should lead to a synonymous substitution (according to gofasta)
# If a SNP happens to cover multiple amino acid positions because it hits an overlapping gene region, overlapping coding region,
# we add another row to represent each SNP - amino acid position mapping.
syn_mut_df = convert_nuc_pos_to_aa_pos(gene_df=gene_df, nuc_mut_df=syn_mut_df)
syn_mut_df["aa_pos"] = syn_mut_df["aa_pos"].astype('float').astype('Int64')
# https://stackoverflow.com/questions/43196907/valueerror-wrong-number-of-items-passed-meaning-and-suggestions
# apply on empty dataframe borks:
if syn_mut_df.shape[0] > 0:
syn_mut_df["aa_from"] = syn_mut_df.apply(get_aa_at_gene_pos, axis="columns", ref_aa_seq_dict=ref_aa_seq_dict)
else:
syn_mut_df["aa_from"] = ""
# Has columns: seqHash, aa_mutation, nuc_from, nuc_pos, nuc_to, gene, cds_num, aa_pos, codon_start_pos, codon_end_pos
# Also has throwaway columns: consequence, nuc_from_pos_to.
# Append columns: codon_to, aa_to. codon_to is a throwaway column we won't use later.
# Cull the rows such that only SNP - amino acid position mappings
# that result in synonymous substitutions exist.
syn_mut_df = convert_nuc_mut_to_aa(ref_nuc_seq_dict=ref_nuc_seq_dict, nuc_mut_df=syn_mut_df)
# Columns: seqHash, SNP
nuc_mut_df = | pd.read_csv(nuc_mut_tsv, sep="\t", comment="#") | pandas.read_csv |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Load the UK Covid overview data
df1 = pd.read_csv('overview_2021-07-15.csv')
print(df1.head().to_string())
print(df1.tail().to_string())
# Drop columns that don't provide any additional data
df1.drop(['areaCode', 'areaName', 'areaType'], axis='columns', inplace=True)
print(df1.head().to_string())
# Set the date column to be a datetime column
df1['date'] = pd.to_datetime(df1['date'])
# Want to select 2020-02-15 to the 2020-12-31 in terms of dates
# Sort the rows into ascending date order
df1.sort_values(by=["date"], ignore_index=True, inplace=True)
# Want to select 2020-02-15 to the 2020-12-31 in terms of dates
# Set up a mask to indicate the date election
date_mask = (df1['date'] > '2020-02-14') & (df1['date'] <= '2020-12-31')
# Select all the rows that meet the mask search criteria
df1 = df1.loc[date_mask]
print(df1.head().to_string())
print(df1.tail().to_string())
# Find how many null values for column
is_null_count = df1.isnull().sum()
print(is_null_count)
# Remove columns with no data in them
df1.drop(['newPeopleVaccinatedFirstDoseByPublishDate',
'newPeopleVaccinatedSecondDoseByPublishDate'],
axis='columns',
inplace=True)
# Select a random sample of 10 rows form the DataFrame
print(df1.sample(10).to_string())
print('-' * 25)
# Load the google Mobility data for the UK
df2 = pd.read_csv('2020_GB_Region_Mobility_Report.csv', low_memory=False)
# Drop columns that don;t provide any additional data
df2.drop(['country_region_code',
'country_region',
'sub_region_1',
'sub_region_2',
'metro_area',
'iso_3166_2_code',
'census_fips_code',
'place_id'],
axis='columns',
inplace=True)
df2['date'] = | pd.to_datetime(df2['date']) | pandas.to_datetime |
import numpy
import pandas
import scipy
import sklearn.metrics as metrics
from sklearn.model_selection import train_test_split
import statsmodels.api as stats
# The SWEEP Operator
def SWEEPOperator (pDim, inputM, tol):
# pDim: dimension of matrix inputM, positive integer
# inputM: a square and symmetric matrix, numpy array
# tol: singularity tolerance, positive real
aliasParam = []
nonAliasParam = []
A = numpy.array(inputM, copy = True, dtype = numpy.float)
diagA = numpy.diagonal(A)
for k in range(pDim):
akk = A[k,k]
if (akk >= (tol * diagA[k])):
nonAliasParam.append(k)
for i in range(pDim):
if (i != k):
for j in range(pDim):
if (j != k):
A[i,j] = A[i,j] - A[i,k] * (A[k,j] / akk)
A[j,i] = A[i,j]
A[i,k] = A[i,k] / akk
A[k,i] = A[i,k]
A[k,k] = - 1.0 / akk
else:
aliasParam.append(k)
for i in range(pDim):
A[i,k] = 0.0
A[k,i] = 0.0
return A, aliasParam, nonAliasParam
# A function that find the non-aliased columns, fit a logistic model, and return the full parameter estimates
def build_mnlogit (fullX, y):
# Find the non-redundant columns in the design matrix fullX
nFullParam = fullX.shape[1]
XtX = numpy.transpose(fullX).dot(fullX)
invXtX, aliasParam, nonAliasParam = SWEEPOperator(pDim = nFullParam, inputM = XtX, tol = 1e-13)
# Build a multinomial logistic model
X = fullX.iloc[:, list(nonAliasParam)]
logit = stats.MNLogit(y, X)
thisFit = logit.fit(method='ncg', maxiter = 1000, xtol = 1e-8,
full_output = True, disp = True)
thisParameter = thisFit.params
thisLLK = thisFit.llf
# The number of free parameters
y_category = y.cat.categories
nYCat = len(y_category)
thisDF = len(nonAliasParam) * (nYCat - 1)
# Return model statistics
return (thisLLK, thisDF, thisParameter, thisFit)
inputData = pandas.read_csv('C:\\Users\\minlam\\Documents\\IIT\\Machine Learning\\Data\\policy_2001.csv',
delimiter=',',
usecols = ['CLAIM_FLAG', 'CREDIT_SCORE_BAND', 'BLUEBOOK_1000', 'CUST_LOYALTY', 'MVR_PTS', 'TIF', 'TRAVTIME'])
# Print number of missing values per variable
print('Number of Missing Values:')
print(pandas.Series.sort_index(inputData.isna().sum()))
# Specify CLAIM_FLAG as a categorical variable
inputData['CLAIM_FLAG'] = inputData['CLAIM_FLAG'].astype('category')
y_category = inputData['CLAIM_FLAG'].cat.categories
nYCat = len(y_category)
# Specify CREDIT_SCORE_BAND as a categorical variable
inputData['CREDIT_SCORE_BAND'] = inputData['CREDIT_SCORE_BAND'].astype('category')
# Create Training and Test partitions
policy_train, policy_test = train_test_split(inputData, test_size = 0.33, random_state = 20201014, stratify = inputData['CLAIM_FLAG'])
nObs_train = policy_train.shape[0]
nObs_test = policy_test.shape[0]
# Build the logistic model
y = policy_train['CLAIM_FLAG']
# Train a Logistic Regression model using the Forward Selection method
devianceTable = pandas.DataFrame()
u = pandas.DataFrame()
# Step 0: Intercept only model
u = y.isnull()
designX = pandas.DataFrame(u.where(u, 1)).rename(columns = {'CLAIM_FLAG': 'const'})
LLK0, DF0, fullParams0, thisFit = build_mnlogit (designX, y)
devianceTable = devianceTable.append([[0, 'Intercept', DF0, LLK0, None, None, None]])
# Consider Model 1 is CLAIM_FLAG = Intercept + <predictor>
predList = ['CREDIT_SCORE_BAND', 'BLUEBOOK_1000', 'CUST_LOYALTY', 'MVR_PTS', 'TIF', 'TRAVTIME']
step = 1.0
for pred in predList:
step += 0.1
thisVar = policy_train[pred]
dType = thisVar.dtypes.name
if (dType == 'category'):
designX = | pandas.get_dummies(thisVar) | pandas.get_dummies |
import ast
import json
import pickle
from typing import Tuple
import numpy as np
import pandas as pd
from tqdm import tqdm
def _concat_browsing_and_search(browsing_df: pd.DataFrame, search_df: pd.DataFrame) -> pd.DataFrame:
browsing_df["is_search"] = False
search_df["is_search"] = True
res = pd.concat([browsing_df, search_df], axis=0)
res.sort_values(["session_id_hash", "server_timestamp_epoch_ms"], ascending=True, inplace=True)
res.reset_index(drop=True, inplace=True)
return res
def _preprocessing_browsing_train(df: pd.DataFrame) -> pd.DataFrame:
df["server_timestamp"] = pd.to_datetime(
df["server_timestamp_epoch_ms"], unit="ms",
)
df.fillna(np.nan, inplace=True)
df.sort_values(["session_id_hash", "server_timestamp_epoch_ms"], ascending=True, inplace=True)
return df.reset_index(drop=True)
def _preprocessing_search_train(df: pd.DataFrame) -> pd.DataFrame:
df["server_timestamp"] = pd.to_datetime(
df["server_timestamp_epoch_ms"], unit="ms",
)
df["query_vector"] = (
df["query_vector"]
.apply(lambda x: ast.literal_eval(x) if isinstance(x, str) else np.nan)
)
df["product_skus_hash"] = (
df["product_skus_hash"]
.apply(lambda x: ast.literal_eval(x) if isinstance(x, str) else np.nan)
)
df["clicked_skus_hash"] = (
df["clicked_skus_hash"]
.apply(lambda x: ast.literal_eval(x) if isinstance(x, str) else np.nan)
)
df.fillna(np.nan, inplace=True)
df.sort_values(["session_id_hash", "server_timestamp_epoch_ms"], ascending=True, inplace=True)
return df.reset_index(drop=True)
def _preprocessing_sku_to_content(df: pd.DataFrame) -> pd.DataFrame:
df["image_vector"] = (
df["image_vector"]
.apply(lambda x: ast.literal_eval(x) if isinstance(x, str) else np.nan)
)
df["description_vector"] = (
df["description_vector"]
.apply(lambda x: ast.literal_eval(x) if isinstance(x, str) else np.nan)
)
df["category_hash_first_level"] = (
df["category_hash"]
.apply(lambda x: x.split("/")[0] if isinstance(x, str) else np.nan)
)
df["category_hash_second_level"] = (
df["category_hash"]
.apply(lambda x: x.split("/")[1] if isinstance(x, str) and len(x.split("/")) >= 2 else np.nan)
)
df["category_hash_third_level"] = (
df["category_hash"]
.apply(lambda x: x.split("/")[2] if isinstance(x, str) and len(x.split("/")) >= 3 else np.nan)
)
df.fillna(np.nan, inplace=True)
return df.reset_index(drop=True)
def load_test_data() -> pd.DataFrame:
with open('../session_rec_sigir_data/test/intention_test_phase_2.json', 'r') as f:
test = json.load(f)
dataset = _convert_json_to_dataframe(test)
return dataset
def _convert_json_to_dataframe(json_data: dict) -> pd.DataFrame:
events = []
nb_after_adds = []
for query_label in tqdm(json_data):
nb_after_add = query_label["nb_after_add"]
query = query_label["query"]
for event in query:
events.append(event)
nb_after_adds.append(nb_after_add)
res = pd.DataFrame(events)
res['nb_after_add'] = nb_after_adds
res["server_timestamp"] = pd.to_datetime(
res["server_timestamp_epoch_ms"], unit="ms",
)
res.fillna(np.nan, inplace=True)
res.sort_values(["session_id_hash", "server_timestamp_epoch_ms"], ascending=True, inplace=True)
res.reset_index(drop=True, inplace=True)
return res
def load_datasets() -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
browsing_train = pd.read_csv('../session_rec_sigir_data/train/browsing_train.csv')
search_train = | pd.read_csv('../session_rec_sigir_data/train/search_train.csv') | pandas.read_csv |
import requests
import os
import pandas as pd
import json
import matplotlib.pyplot as plt
import re
import numpy as np
from requests.exceptions import HTTPError
import os
def fred_function(**kwargs):
"""
Using this function can collect data from FRED API, check the status of the request the server returns and inform the user of any errors.
Besides, it can also return a dataset to collect the series of data we need and transform them into a dataframe.
Parameters
----------
api_key : a string to put your API key
series_id : a string of the id for a series. Such as 'GNPC96' for quarterly 'Real Gross National Product';'GNPCA'for annualy 'Real Gross National Product'.
realtime_start : YYYY-MM-DD formatted string, optional, default: today's date
realtime_end : YYYY-MM-DD formatted string, optional, default: today's date
observation_start : YYYY-MM-DD formatted string, optional, default: 1776-07-04 (earliest available)
observation_end : YYYY-MM-DD formatted string, optional, default: 9999-12-31 (latest available)
Returns
-------
A dataframe of the data collected from API.
Examples
--------
>>> fred_function(api_key=api_key,series_id='GNPCA',observation_start='1950-01-01')
<DataFrame>
It contains annually Real Gross National Product value from 1950-01-01 to today.
"""
params = kwargs
try:
r = requests.get('https://api.stlouisfed.org/fred/series/observations?file_type=json',params = params)
r.raise_for_status()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
fred_json = r.json()
fred_json_df = pd.DataFrame(fred_json['observations'])
return fred_json_df
def find_series(**kwargs):
"""
Using this function can collect series id from FRED API in the form of a dataframe.
Parameters
----------
api_key : a string to put your API key
category_id : a integer of the id for a category. Such as '32243' for job openings category.
Returns
-------
A dataframe of series id collected from API.
Examples
--------
>>> api_key=api_key
>>> category_id='32243'
>>> find_series(api_key=api_key,category_id='32243')
<Data Frame>
"""
params = kwargs
r = requests.get('https://api.stlouisfed.org/fred/category/series?file_type=json',params = params)
fred_json = r.json()
fred_json_df = pd.DataFrame(fred_json['seriess'])
return fred_json_df
def observations_table_payroll(api_key):
"""
Using this function can give the payroll for 6 industries.
Parameters
----------
api_key : a string to put your API key
Returns
-------
A dataframe with column names : 'Professional & Business', 'Construction', 'Financial Activities', 'Manufacturing','Service Providing','Trade Transportation and Utilities'.
Index name : month from 2002-04-01 till present
Examples
--------
>>> api_key=api_key
>>> observations_table_payroll(api_key=api_key)
<Datefram> with columns 'Professional & Business', 'Construction', 'Financial Activities', 'Manufacturing','Service Providing','Trade Transportation and Utilities'
date Professional & Business Construction Financial Activities Manufacturing Service Providing Trade Transportation and Utilities
0 2002-04-01 16044.497 6728.243 7931.101 15385.275 86443.870 25453.870
1 2002-05-01 16055.289 6721.990 7934.215 15344.723 86477.847 25436.681
"""
params = {'api_key':api_key,'category_id':32250}
r_npp = requests.get('https://api.stlouisfed.org/fred/category/series?file_type=json',params = params)
fred_json_npp = r_npp.json()
fred_json_df_npp = pd.DataFrame(fred_json_npp['seriess'])
groupdata_npp =[i for i in fred_json_df_npp['id'] if re.search(r'NPP(MNF|CON|SPT|BUS|TTU|FIN)', i)]
params = {'api_key':api_key,'series_id':'NPPMNF','observation_start':'2002-04-01'}
rr = requests.get('https://api.stlouisfed.org/fred/series/observations?file_type=json',params = params)
fred_json = rr.json()
fred_json_df= pd.DataFrame(fred_json['observations'])
df = fred_json_df[['date']]
for i in groupdata_npp:
series_number = i
params = {'api_key':api_key,'series_id':series_number,'observation_start':'2002-04-01'}
r = requests.get('https://api.stlouisfed.org/fred/series/observations?file_type=json',params = params)
fred_json = r.json()
fred_json_df = pd.DataFrame(fred_json['observations'])
fred_json_df_select = fred_json_df[['value']]
df = pd.concat([df, fred_json_df_select], axis=1)
df.columns=['date', 'Professional & Business', 'Construction', 'Financial Activities', 'Manufacturing','Service Providing','Trade Transportation and Utilities']
return df
def find_series_jobopenings(api_key):
"""
Using this function can collect the series id of 6 industries jop opening.
Parameters
----------
api_key : a string to put your API key
Returns
-------
A list of series id collected from API.
Examples
--------
>>> api_key=api_key
>>> find_series_jobopenings(api_key=api_key)
['JTS2300JOL',
'JTS3000JOL',
'JTS4000JOL',
'JTS540099JOL',
'JTS7000JOL',
'JTU510099JOL']
"""
params = {'api_key':api_key}
r = requests.get('https://api.stlouisfed.org/fred/category/series?file_type=json&category_id=32243',params = params)
fred_json = r.json()
fred_json_df = pd.DataFrame(fred_json['seriess'])
groupdata =[i for i in fred_json_df['id'] if re.search(r'JT(S3000|S2300|S7000|S540099|S4000|U510099)JOL', i)]
return groupdata
def observations_table_jobopenings(api_key):
"""
Using this function can collect 6 industries jop opening times for each month.
Parameters
----------
api_key : a string to put your API key
series_id : a string of the id for a series. Such as 'GNPC96' for quarterly 'Real Gross National Product';'GNPCA'for annualy 'Real Gross National Product'.
realtime_start : YYYY-MM-DD formatted string, optional, default: today's date
realtime_end : YYYY-MM-DD formatted string, optional, default: today's date
observation_start : YYYY-MM-DD formatted string, optional, default: 2000-12-01 (earliest available)
observation_end : YYYY-MM-DD formatted string, optional, default: 9999-12-31 (latest available)
Returns
-------
A dataframe of jop openings for 6 induestries collected from API.
Examples
--------
>>> api_key=api_key
>>> observations_table_jobopenings(api_key=api_key)
<Datefram> with columns 'Construction', 'Manufacturing','Trade Transportation and Utilities', 'Professional & Business','Service Providing', 'Financial Activities'
date Construction Manufacturing Trade Transportation and Utilities Professional & Business Service Providing Financial Activities
0 2000-12-01 172.0 374.0 766.0 871.0 566.0 250.0
1 2001-01-01 223.0 491.0 965.0 888.0 654.0 298.0
"""
params = {'api_key':api_key,'category_id':32243}
r = requests.get('https://api.stlouisfed.org/fred/category/series?file_type=json',params = params)
fred_json = r.json()
fred_json_df = | pd.DataFrame(fred_json['seriess']) | pandas.DataFrame |
#! /usr/local/bin/python
# ! -*- encoding:utf-8 -*-
from pathlib import Path
import pandas as pd
import numpy as np
import random
import os
def generate_gt(clusters, dataset):
cci_labels_gt_path = '{}/mouse_small_intestine_1189_cci_labels_gt_{}_{}.csv'
cci_labels_junk_path = '{}/mouse_small_intestine_1189_cci_labels_junk_{}_{}.csv'
data_path = 'mouse_small_intestine_1189_data.csv'
type_path = 'mouse_small_intestine_1189_cellcluster.csv'
cci_path = 'mouse_small_intestine_1189_cluster_cluster_interaction_combined.csv'
ligand_receptor_pair_path = 'mouse_ligand_receptor_pair.csv'
# prepare data and cell2type
df = pd.read_csv(data_path, index_col=0) # (gene, cell)
genes = set(df.index.tolist())
df = df.fillna(0)
df = df.transpose(copy=True) # (cell, gene)
df['id'] = range(0, len(df)) # add cell id
df['id'].astype(int)
cell2type = pd.read_csv(type_path, index_col=0)
cell2type.columns = ['cell', 'type']
assert cell2type['cell'].tolist() == df.index.tolist()
df['type'] = cell2type['type'].tolist()
# prepare cell cell interaction
cci = pd.read_csv(cci_path, header=0, index_col=0)
# prepare ligand receptor pair
lcp = | pd.read_csv(ligand_receptor_pair_path, header=0, index_col=0) | pandas.read_csv |
"""
Author: <NAME>
Date Created: 11 March 2020
Scripts related to training the VAE including
1. Normalizing gene expression data
2. Wrapper function to input training parameters and run vae
training in `vae.tybalt_2layer_model`
"""
from ponyo import vae, utils
import os
import pickle
import pandas as pd
from sklearn import preprocessing
import tensorflow as tf
import numpy as np
import random
import warnings
def fxn():
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
def set_all_seeds(seed_val=42):
"""
This function sets all seeds to get reproducible VAE trained
models.
"""
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
os.environ["PYTHONHASHSEED"] = "0"
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(seed_val)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(seed_val)
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
tf.set_random_seed(seed_val)
def normalize_expression_data(
base_dir, config_filename, raw_input_data_filename, normalized_data_filename
):
"""
0-1 normalize the expression data.
Arguments
----------
base_dir: str
Root directory containing analysis subdirectories
config_filename: str
File containing user defined parameters
raw_input_data_filename: str
File containing raw expression data
normalize_data_filename:
Output file containing normalized expression data
"""
# Read in config variables
params = utils.read_config(config_filename)
# Read data
data = pd.read_csv(raw_input_data_filename, header=0, sep="\t", index_col=0)
print(
"input: dataset contains {} samples and {} genes".format(
data.shape[0], data.shape[1]
)
)
# 0-1 normalize per gene
scaler = preprocessing.MinMaxScaler()
data_scaled_df = scaler.fit_transform(data)
data_scaled_df = pd.DataFrame(
data_scaled_df, columns=data.columns, index=data.index
)
print(
"Output: normalized dataset contains {} samples and {} genes".format(
data_scaled_df.shape[0], data_scaled_df.shape[1]
)
)
# Save scaler transform
scaler_filename = params["scaler_transform_filename"]
outfile = open(scaler_filename, "wb")
pickle.dump(scaler, outfile)
outfile.close()
# Save scaled data
data_scaled_df.to_csv(normalized_data_filename, sep="\t", compression="xz")
def train_vae(config_filename, input_data_filename):
"""
Trains VAE model using parameters set in config file
Arguments
----------
config_filename: str
File containing user defined parameters
input_data_filename: str
File path corresponding to input dataset to use
"""
# Read in config variables
params = utils.read_config(config_filename)
# Load parameters
base_dir = os.path.abspath(os.path.join(os.getcwd(), "../"))
dataset_name = params["dataset_name"]
learning_rate = params["learning_rate"]
batch_size = params["batch_size"]
epochs = params["epochs"]
kappa = params["kappa"]
intermediate_dim = params["intermediate_dim"]
latent_dim = params["latent_dim"]
epsilon_std = params["epsilon_std"]
train_architecture = params["NN_architecture"]
validation_frac = params["validation_frac"]
# Read data
normalized_data = | pd.read_csv(input_data_filename, header=0, sep="\t", index_col=0) | pandas.read_csv |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : hotgrid.py
@Author : <NAME>
@Version : 1.0
@Contact : <EMAIL>
@License : Copyright © 2007 Free Software Foundation, Inc
@Desc : None
'''
import numpy as np
import pandas as pd
from .geokit import getlngandlat, haversine
class HotGridGenerator():
def __init__(self,gridUnit,searchRadius):
self.gridUnit=gridUnit
self.searchRadius=searchRadius
self.indexList=[]
self.colList=[]
self.gridCenter=pd.DataFrame(index=self.indexList, columns=self.colList)
def grid_setting(self,df,latcol,lngcol):
gU=self.gridUnit
sR=self.searchRadius
bleed=int(sR/gU)
minlng=df[lngcol].min()
maxlng=df[lngcol].max()
minlat=df[latcol].min()
maxlat=df[latcol].max()
midlat=(maxlat+minlat)/2
midlng=(maxlng+minlng)/2
dx=haversine(minlat,maxlng,minlat,minlng)
dy=haversine(minlat,maxlng,maxlat,maxlng)
lngL=[]
latL=[]
for i in range(bleed,int(dx/gU)-bleed+1):
dist=i*gU
tlat,tlng=getlngandlat(minlat,minlng,90,dist)
lngL.append(tlng)
for i in range(bleed,int(dy/gU)-bleed+1):
dist=i*gU
tlat,tlng=getlngandlat(minlat,minlng,0,dist)
latL.append(tlat)
self.indexList=[]
self.colList=[]
for k in range(0,len(latL)):
self.indexList.append(str(k).zfill(3))
for k in range(0,len(lngL)):
self.colList.append(str(k).zfill(3))
self.gridCenter = pd.DataFrame(index=self.indexList, columns=self.colList)
for i in range(0,len(latL)):
for j in range(0,len(lngL)):
self.gridCenter.iloc[i,j]=(latL[i],lngL[j])
self.gridCenter.sort_index(axis=0,ascending=False).describe()
return self.indexList,self.colList,self.gridCenter
def gridCounting_basic(self,df0,lat_col,lng_col):
df_typei=df0
counting = pd.DataFrame(index=self.indexList, columns=self.colList)
for i in range(0,len(self.indexList)):
for j in range(0,len(self.colList)):
gci=self.gridCenter.iloc[i,j]
t,maxlng_ij=getlngandlat(gci[0],gci[1],90,self.searchRadius)
t,minlng_ij=getlngandlat(gci[0],gci[1],90,-self.searchRadius)
maxlat_ij,t=getlngandlat(gci[0],gci[1],0,self.searchRadius)
minlat_ij,t=getlngandlat(gci[0],gci[1],0,-self.searchRadius)
df_typei_ij=df_typei[(df_typei[lat_col]>=minlat_ij)&(df_typei[lat_col]<=maxlat_ij)&(df_typei[lng_col]>=minlng_ij)&(df_typei[lng_col]<=maxlng_ij)]
distL=list(map(lambda x,y:haversine(x,y,lat_2=gci[0],lng_2=gci[1]),df_typei_ij[lat_col],df_typei_ij[lng_col]))
boolL=list(filter(lambda x:x<=self.searchRadius,distL))
counting.iloc[i,j]=len(boolL)
return counting.astype(float)
def gridCounting_byType(self,df0,lat_col,lng_col,type_col):
counting_list = []
for typei in np.sort(df0[type_col].unique()):
df_typei=df0[df0[type_col]==typei]
counting = self.gridCounting_basic(df_typei,lat_col,lng_col)
counting_list.append(counting.astype(float))
return counting_list
def gridCounting_weight(self,df0,lat_col,lng_col, weight_col):
df_typei=df0
counting = | pd.DataFrame(index=self.indexList, columns=self.colList) | pandas.DataFrame |
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import folium
from folium.plugins import HeatMap
###### Auxiliar Functions ##############
def pre_processing(df,nombre):
"""
Recibe un DataFrame y lo entrega
listo para la aplicación.
nombre: pcr, sero, anim, mur, air
"""
if nombre=='pcr':
df['RESULTADO PCR'] = df['RESULTADO PCR'].replace({'Pendiente':np.nan,
'NO LLEGO MUESTRA ':np.nan})#.astype(float)
df['EDAD'] = df['EDAD'].replace({'NO REGISTRA':np.nan,'71,00':71}).astype(float).astype('Int16')
df['MUNICIPIO'] = df['MUNICIPIO'].astype(object).replace({1:'Lorica',
2:'Planeta Rica',
3:'Tierralta',
4:'Sahagun',
5:'Montelibano',
6:'Montería'})
#df['NOMBRE'] = df['PRIMER NOMBRE']+df['SEGUNDO NOMBRE']+df['PRIMER APELLIDO']+df['SEGUNDO APELLIDO']
elif nombre=='sero':
df['MUNICIPIO'] = df['MUNICIPIO'].astype(object).replace({1:'Lorica',
2:'Planeta Rica',
3:'Tierralta',
4:'Sahagun',
5:'Montelibano',
6:'Montería'})
df['RESULTADO SEROLOGIA'] = df['RESULTADO SEROLOGIA'].replace({'2':0,
'POSITIVO':1,
'NEGATIVO':0,
'Pendiente':np.nan,
'NO LLEGO MUESTRA ':np.nan}).astype(float)
df['NOMBRE'] = df['PRIMER NOMBRE']+df['SEGUNDO NOMBRE']+df['PRIMER APELLIDO']+df['SEGUNDO APELLIDO']
return df
def mun_to_coord(full_ser):
"""
Recibe un Dataframe con municipios,
añade sus coordenadas
y regresa un Dataframe.
"""
full_ser['lat']=0
full_ser['lon']=0
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Montería'] = 8.7558921
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Montería'] = -75.887029
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Lorica'] = 9.2394583
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Lorica'] = -75.8139786
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Planeta Rica'] = 8.4076739
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Planeta Rica'] = -75.5840456
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Tierralta'] = 8.1717342
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Tierralta'] = -76.059376
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Sahagun'] = 8.9472964
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Sahagun'] = -75.4434972
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Montelibano'] = 7.9800534
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Montelibano'] = -75.4167198
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Cereté'] = 8.8852282
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Cereté'] = -75.7922421
full_ser['lat'].loc[full_ser['MUNICIPIO']=='San Antero'] = 9.373016
full_ser['lon'].loc[full_ser['MUNICIPIO']=='San Antero'] = -75.7595056
return full_ser
def table_target(datos,target,agrupacion='MUNICIPIO',calculation='count'):
"""
recibe datos, los agrupa por su agrupación, y
calcula el conteo, o el promedio de los valores únicos asociados
al target.
calculation: mean, count.
"""
tabla = | pd.DataFrame([]) | pandas.DataFrame |
# coding: utf-8
# In[1]:
# get_ipython().magic(u'matplotlib inline')
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import numpy.random as npr
from sklearn.cluster import KMeans
from scipy.stats import invgamma
from scipy import sparse, stats
# plt.style.use('ggplot')
# In[2]:
# import seaborn as sns
# sns.set_style("white")
# sns.set_context("paper")
# color_names = ["red",
# "windows blue",
# "medium green",
# "dusty purple",
# "orange",
# "amber",
# "clay",
# "pink",
# "greyish",
# "light cyan",
# "steel blue",
# "forest green",
# "pastel purple",
# "mint",
# "salmon",
# "dark brown"]
# colors = sns.xkcd_palette(color_names)
# In[3]:
DATA_DIR = '../dat/raw/Webscope_R3'
# In[4]:
OUT_DATA_DIR = '../dat/proc/R3_wg'
# ## R3
# In[5]:
tr_vd_data = pd.read_csv(os.path.join(DATA_DIR, 'ydata-ymusic-rating-study-v1_0-train.txt'), sep="\t", header=None,
names=['userId', 'songId', 'rating'],engine="python")
test_data = pd.read_csv(os.path.join(DATA_DIR, 'ydata-ymusic-rating-study-v1_0-test.txt'), sep="\t", header=None,
names=['userId', 'songId', 'rating'],engine="python")
# In[6]:
tr_vd_data.head(), tr_vd_data.shape
# In[7]:
test_data.head(), test_data.shape
# In[8]:
def split_train_test_proportion(data, uid, test_prop=0.5, random_seed=0):
data_grouped_by_user = data.groupby(uid)
tr_list, te_list = list(), list()
np.random.seed(random_seed)
for u, (_, group) in enumerate(data_grouped_by_user):
n_items_u = len(group)
if n_items_u >= 5:
idx = np.zeros(n_items_u, dtype='bool')
idx[np.random.choice(n_items_u, size=int(test_prop * n_items_u), replace=False).astype('int64')] = True
tr_list.append(group[np.logical_not(idx)])
te_list.append(group[idx])
else:
tr_list.append(group)
if u % 5000 == 0:
print("%d users sampled" % u)
sys.stdout.flush()
data_tr = pd.concat(tr_list)
data_te = | pd.concat(te_list) | pandas.concat |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2019, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import functools
import operator
import math
import itertools
import warnings
import contextlib
import uuid
from operator import attrgetter
import numpy as np
import pandas as pd
import pandas.api.extensions
import scipy.integrate
import scipy.signal
from lisa.utils import TASK_COMM_MAX_LEN, groupby, deprecate
class DataAccessor:
"""
Proxy class that allows extending the :class:`pandas.DataFrame` API.
**Example**::
# Define and register a dataframe accessor
@DataFrameAccessor.register_accessor
def df_foobar(df, baz):
...
df = pandas.DataFrame()
# Use the accessor with the "lisa" proxy
df.lisa.foobar(baz=1)
"""
def __init__(self, data):
self.data = data
@classmethod
def register_accessor(cls, f):
"""
Decorator to register an accessor function.
The accessor name will be the name of the function, without the
``series_`` or ``df_`` prefix.
"""
name = re.sub(r'^(?:df|series)_(.*)', r'\1', f.__name__)
cls.FUNCTIONS[name] = f
return f
def __getattr__(self, attr):
try:
f = self.FUNCTIONS[attr]
except KeyError as e:
raise AttributeError(f'Unknown method name: {attr}') from e
meth = f.__get__(self.data, self.__class__)
return meth
def __dir__(self):
attrs = set(super().__dir__())
attrs |= self.FUNCTIONS.keys()
return sorted(attrs)
@pandas.api.extensions.register_dataframe_accessor('lisa')
class DataFrameAccessor(DataAccessor):
FUNCTIONS = {}
@pandas.api.extensions.register_series_accessor('lisa')
class SeriesAccessor(DataAccessor):
FUNCTIONS = {}
@SeriesAccessor.register_accessor
def series_refit_index(series, start=None, end=None, window=None, method='inclusive', clip_window=True):
"""
Slice a series using :func:`series_window` and ensure we have a value at
exactly the specified boundaries, unless the signal started after the
beginning of the required window.
:param df: Series to act on
:type df: pandas.Series
:param start: First index value to find in the returned series.
:type start: object
:param end: Last index value to find in the returned series.
:type end: object
:param window: ``window=(start, end)`` is the same as
``start=start, end=end``. These parameters styles are mutually
exclusive.
:type window: tuple(float or None, float or None) or None
:param method: Windowing method used to select the first and last values of
the series using :func:`series_window`. Defaults to ``inclusive``,
which is suitable for signals where all the value changes have a
corresponding row without any fixed sample-rate constraints. If they
have been downsampled, ``nearest`` might be a better choice.).
:type method: str
.. note:: If ``end`` is past the end of the data, the last row will
be duplicated so that we can have a start and end index at the right
location, without moving the point at which the transition to the last
value happened. This also allows plotting series with only one item
using matplotlib, which would otherwise be impossible.
:param clip_window: Passed down to :func:`series_refit_index`.
"""
window = _make_window(start, end, window)
return _data_refit_index(series, window, method=method, clip_window=clip_window)
@DataFrameAccessor.register_accessor
def df_refit_index(df, start=None, end=None, window=None, method='inclusive', clip_window=True):
"""
Same as :func:`series_refit_index` but acting on :class:`pandas.DataFrame`
"""
window = _make_window(start, end, window)
return _data_refit_index(df, window, method=method, clip_window=clip_window)
def _make_window(start, end, window):
uses_separated = (start, end) != (None, None)
if uses_separated:
warnings.warn('start and end df_refit_index() parameters are deprecated, please use window=', DeprecationWarning, stacklevel=3)
if window is not None and uses_separated:
raise ValueError('window != None cannot be used along with start and end parameters')
if window is None:
return (start, end)
else:
return window
@DataFrameAccessor.register_accessor
def df_split_signals(df, signal_cols, align_start=False, window=None):
"""
Yield subset of ``df`` that only contain one signal, along with the signal
identification values.
:param df: The dataframe to split.
:type df: pandas.DataFrame
:param signal_cols: Columns that uniquely identify a signal.
:type signal_cols: list(str)
:param window: Apply :func:`df_refit_index` on the yielded dataframes with
the given window.
:type window: tuple(float or None, float or None) or None
:param align_start: If ``True``, same as ``window=(df.index[0], None)``.
This makes sure all yielded signals start at the same index as the
original dataframe.
:type align_start: bool
"""
if not signal_cols:
yield ({}, df)
else:
if align_start:
if window is not None:
raise ValueError('align_start=True cannot be used with window != None')
window = (df.index[0], None)
# Pandas chokes on common iterables like dict key views, so spoon feed
# it a list
signal_cols = list(signal_cols)
for group, signal in df.groupby(signal_cols, observed=True, sort=False):
# When only one column is looked at, the group is the value instead of
# a tuple of values
if len(signal_cols) < 2:
cols_val = {signal_cols[0]: group}
else:
cols_val = dict(zip(signal_cols, group))
if window:
signal = df_refit_index(signal, window=window, method='inclusive')
yield (cols_val, signal)
def _data_refit_index(data, window, method, clip_window):
if data.empty:
raise ValueError('Cannot refit the index of an empty dataframe or series')
start, end = window
if end is None:
duplicate_last = False
else:
duplicate_last = end > data.index[-1]
data = _data_window(data, window, method=method, clip_window=clip_window)
if data.empty:
return data
# When the end is after the end of the data, duplicate the last row so we
# can push it to the right as much as we want without changing the point at
# which the transition to that value happened
if duplicate_last:
data = data.append(data.iloc[-1:])
else:
# Shallow copy is enough, we only want to replace the index and not the
# actual data
data = data.copy(deep=False)
index = data.index.to_series()
# Only advance the beginning of the data, never move it in the past.
# Otherwise, we "invent" a value for the signal that did not existed,
# leading to various wrong results.
if start is not None and index.iloc[0] < start:
index.iloc[0] = start
if end is not None:
index.iloc[-1] = end
data.index = index
return data
@DataFrameAccessor.register_accessor
def df_squash(df, start, end, column='delta'):
"""
Slice a dataframe of deltas in [start:end] and ensure we have
an event at exactly those boundaries.
The input dataframe is expected to have a "column" which reports
the time delta between consecutive rows, as for example dataframes
generated by :func:`df_add_delta`.
The returned dataframe is granted to have an initial and final
event at the specified "start" ("end") index values, which values
are the same of the last event before (first event after) the
specified "start" ("end") time.
Examples:
Slice a dataframe to [start:end], and work on the time data so that it
makes sense within the interval.
Examples to make it clearer::
df is:
Time len state
15 1 1
16 1 0
17 1 1
18 1 0
-------------
df_squash(df, 16.5, 17.5) =>
Time len state
16.5 .5 0
17 .5 1
df_squash(df, 16.2, 16.8) =>
Time len state
16.2 .6 0
:returns: a new df that fits the above description
"""
if df.empty:
return df
end = min(end, df.index[-1] + df[column].iloc[-1])
res_df = pd.DataFrame(data=[], columns=df.columns)
if start > end:
return res_df
# There's a few things to keep in mind here, and it gets confusing
# even for the people who wrote the code. Let's write it down.
#
# It's assumed that the data is continuous, i.e. for any row 'r' within
# the trace interval, we will find a new row at (r.index + r.len)
# For us this means we'll never end up with an empty dataframe
# (if we started with a non empty one)
#
# What's we're manipulating looks like this:
# (| = events; [ & ] = start,end slice)
#
# | [ | ] |
# e0 s0 e1 s1 e2
#
# We need to push e0 within the interval, and then tweak its duration
# (len column). The mathemagical incantation for that is:
# e0.len = min(e1.index - s0, s1 - s0)
#
# This takes care of the case where s1 isn't in the interval
# If s1 is in the interval, we just need to cap its len to
# s1 - e1.index
prev_df = df[:start]
middle_df = df[start:end]
# Tweak the closest previous event to include it in the slice
if not prev_df.empty and start not in middle_df.index:
res_df = res_df.append(prev_df.tail(1))
res_df.index = [start]
e1 = end
if not middle_df.empty:
e1 = middle_df.index[0]
res_df[column] = min(e1 - start, end - start)
if not middle_df.empty:
res_df = res_df.append(middle_df)
if end in res_df.index:
# e_last and s1 collide, ditch e_last
res_df = res_df.drop([end])
else:
# Fix the delta for the last row
delta = min(end - res_df.index[-1], res_df[column].iloc[-1])
res_df.at[res_df.index[-1], column] = delta
return res_df
@DataFrameAccessor.register_accessor
def df_filter(df, filter_columns, exclude=False):
"""
Filter the content of a dataframe.
:param df: DataFrame to filter
:type df: pandas.DataFrame
:param filter_columns: Dict of `{"column": value)` that rows has to match
to be selected.
:type filter_columns: dict(str, object)
:param exclude: If ``True``, the matching rows will be excluded rather than
selected.
:type exclude: bool
"""
if filter_columns:
key = functools.reduce(
operator.and_,
(
df[col] == val
for col, val in filter_columns.items()
)
)
return df[~key if exclude else key]
else:
if exclude:
return df
else:
return df_make_empty_clone(df)
def df_merge(df_list, drop_columns=None, drop_inplace=False, filter_columns=None):
"""
Merge a list of :class:`pandas.DataFrame`, keeping the index sorted.
:param drop_columns: List of columns to drop prior to merging. This avoids
ending up with extra renamed columns if some dataframes have column
names in common.
:type drop_columns: list(str)
:param drop_inplace: Drop columns in the original dataframes instead of
creating copies.
:type drop_inplace: bool
:param filter_columns: Dict of `{"column": value)` used to filter each
dataframe prior to dropping columns. The columns are then dropped as
they have a constant value.
:type filter_columns: dict(str, object)
"""
df_list = list(df_list)
drop_columns = drop_columns if drop_columns else []
if filter_columns:
df_list = [
df_filter(df, filter_columns)
for df in df_list
]
# remove the column to avoid duplicated useless columns
drop_columns.extend(filter_columns.keys())
# Since we just created dataframe slices, drop_inplace would give a
# warning from pandas
drop_inplace = False
if drop_columns:
def drop(df):
filtered_df = df.drop(columns=drop_columns, inplace=drop_inplace)
# when inplace=True, df.drop() returns None
return df if drop_inplace else filtered_df
df_list = [
drop(df)
for df in df_list
]
if any(
not (df1.columns & df2.columns).empty
for (df1, df2) in itertools.combinations(df_list, 2)
):
df = pd.concat(df_list)
df.sort_index(inplace=True)
return df
else:
df1, *other_dfs = df_list
return df1.join(other_dfs, how='outer')
@DataFrameAccessor.register_accessor
def df_delta(pre_df, post_df, group_on=None):
"""
pre_df and post_df containing paired/consecutive events indexed by time,
df_delta() merges the two dataframes and adds a ``delta`` column
containing the time spent between the two events.
A typical usecase would be adding pre/post events at the entry/exit of a
function.
Rows from ``pre_df`` and ``post_df`` are grouped by the ``group_on``
columns.
E.g.: ``['pid', 'comm']`` to group by task.
Except columns listed in ``group_on``, ``pre_df`` and ``post_df`` must
have columns with different names.
Events that cannot be paired are ignored.
:param pre_df: Dataframe containing the events that start a record.
:type pre_df: pandas.DataFrame
:param post_df: Dataframe containing the events that end a record.
:type post_df: pandas.DataFrame
:param group_on: Columns used to group ``pre_df`` and ``post_df``.
E.g.: This would be ``['pid', 'comm']`` to group by task.
:type group_on: list(str)
:returns: a :class:`pandas.DataFrame` indexed by the ``pre_df`` dataframe
with:
* All the columns from the ``pre_df`` dataframe.
* All the columns from the ``post_df`` dataframe.
* A ``delta`` column (duration between the emission of a 'pre' event
and its consecutive 'post' event).
"""
pre_df = pre_df.copy(deep=False)
post_df = post_df.copy(deep=False)
# Tag the rows to remember from which df they are coming from.
pre_df["is_pre"] = True
post_df["is_pre"] = False
# Merge on columns common to the two dfs to avoid overlapping of names.
on_col = sorted(pre_df.columns & post_df.columns)
# Merging on nullable types converts columns to object.
# Merging on non-nullable types converts integer/boolean to float.
# Thus, let the on_col non-nullable and converts the others to nullable.
pre_df_cols = sorted(set(pre_df) - set(on_col))
post_df_cols = sorted(set(post_df) - set(on_col))
pre_df[pre_df_cols] = df_convert_to_nullable(pre_df[pre_df_cols])
post_df[post_df_cols] = df_convert_to_nullable(post_df[post_df_cols])
# Merge. Don't allow column renaming.
df = pd.merge(pre_df, post_df, left_index=True, right_index=True, on=on_col,
how='outer', suffixes=(False, False))
# Save and replace the index name by a tmp name to avoid a clash
# with column names.
index_name = df.index.name
index_tmp_name = uuid.uuid4().hex
df.index.name = index_tmp_name
df.reset_index(inplace=True)
# In each group, search for a faulty sequence (where pre/post events are
# not interleaving, e.g. pre1->pre2->post1->post2).
if group_on:
grouped = df.groupby(group_on, observed=True, sort=False)
else:
grouped = df
if grouped['is_pre'].transform(lambda x: x == x.shift()).any():
raise ValueError('Unexpected sequence of pre and post event (more than one "pre" or "post" in a row)')
# Create the 'delta' column and add the columns from post_df
# in the rows coming from pre_df.
new_columns = dict(
delta=grouped[index_tmp_name].transform(lambda time: time.diff().shift(-1)),
)
new_columns.update({col: grouped[col].shift(-1) for col in post_df_cols})
df = df.assign(**new_columns)
df.set_index(index_tmp_name, inplace=True)
df.index.name = index_name
# Only keep the rows from the pre_df, they have all the necessary info.
df = df.loc[df["is_pre"]]
# Drop the rows from pre_df with not matching row from post_df.
df.dropna(inplace=True)
df.drop(columns=["is_pre"], inplace=True)
return df
def _resolve_x(y, x):
"""
Resolve the `x` series to use for derivative and integral operations
"""
if x is None:
x = pd.Series(y.index)
x.index = y.index
return x
@SeriesAccessor.register_accessor
def series_derivate(y, x=None, order=1):
"""
Compute a derivative of a :class:`pandas.Series` with respect to another
series.
:return: A series of `dy/dx`, where `x` is either the index of `y` or
another series.
:param y: Series with the data to derivate.
:type y: pandas.DataFrame
:param x: Series with the `x` data. If ``None``, the index of `y` will be
used. Note that `y` and `x` are expected to have the same index.
:type y: pandas.DataFrame or None
:param order: Order of the derivative (1 is speed, 2 is acceleration etc).
:type order: int
"""
x = _resolve_x(y, x)
for _ in range(order):
y = y.diff() / x.diff()
return y
@SeriesAccessor.register_accessor
def series_integrate(y, x=None, sign=None, method='rect', rect_step='post'):
"""
Compute the integral of `y` with respect to `x`.
:return: A scalar :math:`\\int_{x=A}^{x=B} y \\, dx`, where `x` is either the
index of `y` or another series.
:param y: Series with the data to integrate.
:type y: pandas.DataFrame
:param x: Series with the `x` data. If ``None``, the index of `y` will be
used. Note that `y` and `x` are expected to have the same index.
:type y: pandas.DataFrame or None
:param sign: Clip the data for the area in positive
or negative regions. Can be any of:
- ``+``: ignore negative data
- ``-``: ignore positive data
- ``None``: use all data
:type sign: str or None
:param method: The method for area calculation. This can
be any of the integration methods supported in :mod:`numpy`
or `rect`
:type param: str
:param rect_step: The step behaviour for `rect` method
:type rect_step: str
*Rectangular Method*
- Step: Post
Consider the following time series data::
2 *----*----*----+
| |
1 | *----*----+
|
0 *----*----+
0 1 2 3 4 5 6 7
import pandas as pd
a = [0, 0, 2, 2, 2, 1, 1]
s = pd.Series(a)
The area under the curve is:
.. math::
\\sum_{k=0}^{N-1} (x_{k+1} - {x_k}) \\times f(x_k) \\\\
(2 \\times 3) + (1 \\times 2) = 8
- Step: Pre
::
2 +----*----*----*
| |
1 | +----*----*----+
|
0 *----*
0 1 2 3 4 5 6 7
import pandas as pd
a = [0, 0, 2, 2, 2, 1, 1]
s = pd.Series(a)
The area under the curve is:
.. math::
\\sum_{k=1}^{N} (x_k - x_{k-1}) \\times f(x_k) \\\\
(2 \\times 3) + (1 \\times 3) = 9
"""
x = _resolve_x(y, x)
if sign == "+":
y = y.clip(lower=0)
elif sign == "-":
y = y.clip(upper=0)
elif sign is None:
pass
else:
raise ValueError(f'Unsupported "sign": {sign}')
if method == "rect":
if len(x) <= 1:
raise ValueError('Cannot integrate with less than 2 points')
else:
dx = x.diff()
if rect_step == "post":
dx = dx.shift(-1)
return (y * dx).sum()
# Make a DataFrame to make sure all rows stay aligned when we drop NaN,
# which is needed by all the below methods
df = pd.DataFrame({'x': x, 'y': y}).dropna()
x = df['x']
y = df['y']
if method == 'trapz':
return np.trapz(y, x)
elif method == 'simps':
return scipy.integrate.simps(y, x)
else:
raise ValueError(f'Unsupported integration method: {method}')
@SeriesAccessor.register_accessor
def series_mean(y, x=None, **kwargs):
r"""
Compute the average of `y` by integrating with respect to `x` and dividing
by the range of `x`.
:return: A scalar :math:`\int_{x=A}^{x=B} \frac{y}{| B - A |} \, dx`,
where `x` is either the index of `y` or another series.
:param y: Series with the data to integrate.
:type y: pandas.DataFrame
:param x: Series with the `x` data. If ``None``, the index of `y` will be
used. Note that `y` and `x` are expected to have the same index.
:type y: pandas.DataFrame or None
:Variable keyword arguments: Forwarded to :func:`series_integrate`.
"""
x = _resolve_x(y, x)
integral = series_integrate(y, x, **kwargs)
if len(y) > 1:
mean = integral / (x.max() - x.min())
# If there is only one data item, the mean is equal to it.
else:
mean = integral
return mean
@SeriesAccessor.register_accessor
def series_window(series, window, method='pre', clip_window=True):
"""
Select a portion of a :class:`pandas.Series`
:param series: series to slice
:type series: :class:`pandas.Series`
:param window: two-tuple of index values for the start and end of the
region to select.
:type window: tuple(object)
:param clip_window: Clip the requested window to the bounds of the index,
otherwise raise exceptions if the window is too large.
:type clip_window: bool
:param method: Choose how edges are handled:
* `inclusive`: When no exact match is found, include both the previous
and next values around the window.
* `exclusive`: When no exact match is found, only index values within
the range are selected. This is the default pandas float slicing
behavior.
* `nearest`: When no exact match is found, take the nearest index value.
* `pre`: When no exact match is found, take the previous index value.
* `post`: When no exact match is found, take the next index value.
.. note:: The index of `series` must be monotonic and without duplicates.
"""
return _data_window(series, window, method, clip_window)
def _data_window(data, window, method, clip_window):
"""
``data`` can either be a :class:`pandas.DataFrame` or :class:`pandas.Series`
.. warning:: This function assumes ``data`` has a sorted index.
"""
index = data.index
if clip_window:
if data.empty:
return data
start, end = window
first = index[0]
last = index[-1]
# Fill placeholders
if start is None:
start = first
if end is None:
end = last
# Window is on the left
if start <= first and end <= first:
start = first
end = first
# Window is on the rigth
elif start >= last and end >= last:
start = last
end = last
# Overlapping window
else:
if start <= first:
start = first
if end >= last:
end = last
window = (start, end)
if window[0] > window[1]:
raise KeyError(f'The window starts after its end: {window}')
if method == 'inclusive':
method = ('ffill', 'bfill')
elif method == 'exclusive':
# Default slicing behaviour of pandas' Float64Index is to be exclusive,
# so we can use that knowledge to enable a fast path.
if isinstance(data.index, pd.Float64Index):
return data[slice(*window)]
method = ('bfill', 'ffill')
elif method == 'nearest':
method = ('nearest', 'nearest')
elif method == 'pre':
method = ('ffill', 'ffill')
elif method == 'post':
method = ('bfill', 'bfill')
else:
raise ValueError(f'Slicing method not supported: {method}')
window = [
_get_loc(index, x, method=method) if x is not None else None
for x, method in zip(window, method)
]
window = window[0], (window[1] + 1)
return data.iloc[slice(*window)]
def _get_loc(index, x, method):
"""
Emulate :func:`pandas.Index.get_loc` behavior with the much faster
:func:`pandas.Index.searchsorted`.
.. warning:: Passing a non-sorted index will destroy performance.
"""
# Not a lot of use for nearest, so fall back on the slow but easy to use get_loc()
#
# Also, if the index is not sorted, we need to fall back on the slow path
# as well. Checking is_monotonic is cheap so it's ok to do it here.
if method == 'nearest' or not index.is_monotonic:
return index.get_loc(x, method=method)
else:
if index.empty:
raise KeyError(x)
# get_loc() also raises an exception in these case
elif method == 'ffill' and x < index[0]:
raise KeyError(x)
elif method == 'bfill' and x > index[-1]:
raise KeyError(x)
loc = index.searchsorted(x)
try:
val_at_loc = index[loc]
# We are getting an index past the end. This is fine since we already
# checked correct bounds before
except IndexError:
return loc - 1
if val_at_loc == x:
return loc
elif val_at_loc < x:
return loc if method == 'ffill' else loc + 1
else:
return loc - 1 if method == 'ffill' else loc
@DataFrameAccessor.register_accessor
def df_window(df, window, method='pre', clip_window=True):
"""
Same as :func:`series_window` but acting on a :class:`pandas.DataFrame`
"""
return _data_window(df, window, method, clip_window)
@DataFrameAccessor.register_accessor
def df_make_empty_clone(df):
"""
Make an empty clone of the given dataframe.
:param df: The template dataframe.
:type df: pandas.DataFrame
More specifically, the following aspects are cloned:
* Column names
* Column dtypes
"""
return df.iloc[0:0].copy(deep=True)
@DataFrameAccessor.register_accessor
def df_window_signals(df, window, signals, compress_init=False, clip_window=True):
"""
Similar to :func:`df_window` with ``method='pre'`` but guarantees that each
signal will have a values at the beginning of the window.
:param window: two-tuple of index values for the start and end of the
region to select.
:type window: tuple(object)
:param signals: List of :class:`SignalDesc` describing the signals to
fixup.
:type signals: list(SignalDesc)
:param compress_init: When ``False``, the timestamps of the init value of
signals (right before the window) are preserved. If ``True``, they are
changed into values as close as possible to the beginning of the window.
:type compress_init: bool
:param clip_window: See :func:`df_window`
.. seealso:: :func:`df_split_signals`
"""
def before(x):
return np.nextafter(x, -math.inf)
windowed_df = df_window(df, window, method='pre', clip_window=clip_window)
# Split the extra rows that the method='pre' gave in a separate dataframe,
# so we make sure we don't end up with duplication in init_df
extra_window = (
windowed_df.index[0],
window[0],
)
if extra_window[0] >= extra_window[1]:
extra_df = df_make_empty_clone(df)
else:
extra_df = df_window(windowed_df, extra_window, method='pre')
# This time around, exclude anything before extra_window[1] since it will be provided by extra_df
try:
# Right boundary is exact, so failure can only happen if left boundary
# is after the start of the dataframe, or if the window starts after its end.
_window = (extra_window[1], windowed_df.index[-1])
windowed_df = df_window(windowed_df, _window, method='post', clip_window=False)
# The windowed_df did not contain any row in the given window, all the
# actual data are in extra_df
except KeyError:
windowed_df = df_make_empty_clone(df)
else:
# Make sure we don't include the left boundary
if windowed_df.index[0] == _window[0]:
windowed_df = windowed_df.iloc[1:]
def window_signal(signal_df):
# Get the row immediately preceding the window start
loc = _get_loc(signal_df.index, window[0], method='ffill')
return signal_df.iloc[loc:loc + 1]
# Get the value of each signal at the beginning of the window
signal_df_list = [
window_signal(signal_df)
for signal, signal_df in itertools.chain.from_iterable(
df_split_signals(df, signal.fields, align_start=False)
for signal in signals
)
# Only consider the signal that are in the window. Signals that started
# after the window are irrelevant.
if not signal_df.empty and signal_df.index[0] <= window[0]
]
if compress_init:
def make_init_df_index(init_df):
# Yield a sequence of numbers incrementing by the smallest amount
# possible
def smallest_increment(start, length):
curr = start
for _ in range(length):
curr = before(curr)
yield curr
# If windowed_df is empty, we take the last bit right before the
# beginning of the window
try:
start = windowed_df.index[0]
except IndexError:
start = extra_df.index[-1]
index = list(smallest_increment(start, len(init_df)))
index = pd.Float64Index(reversed(index))
return index
else:
def make_init_df_index(init_df):
return init_df.index
# Get the last row before the beginning the window for each signal, in
# timestamp order
init_df = pd.concat([extra_df] + signal_df_list)
init_df.sort_index(inplace=True)
# Remove duplicated indices, meaning we selected the same row multiple
# times because it's part of multiple signals
init_df = init_df.loc[~init_df.index.duplicated(keep='first')]
init_df.index = make_init_df_index(init_df)
return pd.concat([init_df, windowed_df])
@SeriesAccessor.register_accessor
def series_align_signal(ref, to_align, max_shift=None):
"""
Align a signal to an expected reference signal using their
cross-correlation.
:returns: `(ref, to_align)` tuple, with `to_align` shifted by an amount
computed to align as well as possible with `ref`. Both `ref` and
`to_align` are resampled to have a fixed sample rate.
:param ref: reference signal.
:type ref: pandas.Series
:param to_align: signal to align
:type to_align: pandas.Series
:param max_shift: Maximum shift allowed to align signals, in index units.
:type max_shift: object or None
"""
if ref.isnull().any() or to_align.isnull().any():
raise ValueError('NaN needs to be dropped prior to alignment')
# Select the overlapping part of the signals
start = max(ref.index.min(), to_align.index.min())
end = min(ref.index.max(), to_align.index.max())
# Resample so that we operate on a fixed sampled rate signal, which is
# necessary in order to be able to do a meaningful interpretation of
# correlation argmax
def get_period(series):
return pd.Series(series.index).diff().min()
period = min(get_period(ref), get_period(to_align))
num = math.ceil((end - start) / period)
new_index = pd.Float64Index(np.linspace(start, end, num))
to_align = to_align.reindex(new_index, method='ffill')
ref = ref.reindex(new_index, method='ffill')
# Compute the correlation between the two signals
correlation = scipy.signal.signaltools.correlate(to_align, ref)
# The most likely shift is the index at which the correlation is
# maximum. correlation.argmax() can vary from 0 to 2*len(to_align), so we
# re-center it.
shift = correlation.argmax() - len(to_align)
# Cap the shift value
if max_shift is not None:
assert max_shift >= 0
# Turn max_shift into a number of samples in the resampled signal
max_shift = int(max_shift / period)
# Adjust the sign of max_shift to match shift
max_shift *= -1 if shift < 0 else 1
if abs(shift) > abs(max_shift):
shift = max_shift
# Compensate the shift
return ref, to_align.shift(-shift)
@DataFrameAccessor.register_accessor
def df_filter_task_ids(df, task_ids, pid_col='pid', comm_col='comm', invert=False, comm_max_len=TASK_COMM_MAX_LEN):
"""
Filter a dataframe using a list of :class:`lisa.trace.TaskID`
:param task_ids: List of task IDs to filter
:type task_ids: list(lisa.trace.TaskID)
:param df: Dataframe to act on.
:type df: pandas.DataFrame
:param pid_col: Column name in the dataframe with PIDs.
:type pid_col: str or None
:param comm_col: Column name in the dataframe with comm.
:type comm_col: str or None
:param comm_max_len: Maximum expected length of the strings in
``comm_col``. The ``task_ids`` `comm` field will be truncated at that
length before being matched.
:param invert: Invert selection
:type invert: bool
"""
def make_filter(task_id):
if pid_col and task_id.pid is not None:
pid = (df[pid_col] == task_id.pid)
else:
pid = True
if comm_col and task_id.comm is not None:
comm = (df[comm_col] == task_id.comm[:comm_max_len])
else:
comm = True
return pid & comm
tasks_filters = list(map(make_filter, task_ids))
if tasks_filters:
# Combine all the task filters with OR
tasks_filter = functools.reduce(operator.or_, tasks_filters)
if invert:
tasks_filter = ~tasks_filter
return df[tasks_filter]
else:
return df if invert else df.iloc[0:0]
@SeriesAccessor.register_accessor
def series_local_extremum(series, kind):
"""
Returns a series of local extremum.
:param series: Series to look at.
:type series: pandas.Series
:param kind: Kind of extremum: ``min`` or ``max``.
:type kind: str
"""
if kind == 'min':
comparator = np.less_equal
elif kind == 'max':
comparator = np.greater_equal
else:
raise ValueError(f'Unsupported kind: {kind}')
ilocs = scipy.signal.argrelextrema(series.to_numpy(), comparator=comparator)
return series.iloc[ilocs]
@SeriesAccessor.register_accessor
def series_envelope_mean(series):
"""
Compute the average between the mean of local maximums and local minimums
of the series.
Assuming that the values are ranging inside a tunnel, this will give the
average center of that tunnel.
"""
first_val = series.iat[0]
# Remove constant values, otherwise they would be accounted in both max and
# min, which can bias the result
series = series_deduplicate(series, keep='first', consecutives=True)
# If the series was constant, just return that constant
if series.empty:
return first_val
else:
maxs = series_local_extremum(series, kind='max')
mins = series_local_extremum(series, kind='min')
maxs_mean = series_mean(maxs)
mins_mean = series_mean(mins)
return (maxs_mean - mins_mean) / 2 + mins_mean
# Keep an alias in place for compatibility
@deprecate(replaced_by=series_envelope_mean, deprecated_in='2.0', removed_in='3.0')
def series_tunnel_mean(*args, **kwargs):
return series_envelope_mean(*args, **kwargs)
@SeriesAccessor.register_accessor
def series_rolling_apply(series, func, window, window_float_index=True, center=False):
"""
Apply a function on a rolling window of a series.
:returns: The series of results of the function.
:param series: Series to act on.
:type series: pandas.Series
:param func: Function to apply on each window. It must take a
:class:`pandas.Series` as only parameter and return one value.
:type func: collections.abc.Callable
:param window: Rolling window width in seconds.
:type window: float
:param center: Label values generated by ``func`` with the center of the
window, rather than the highest index in it.
:type center: bool
:param window_float_index: If ``True``, the series passed to ``func`` will
be of type :class:`pandas.Float64Index`, in nanoseconds. Disabling is
recommended if the index is not used by ``func`` since it will remove
the need for a conversion.
:type window_float_index: bool
"""
orig_index = series.index
# Wrap the func to turn the index into nanosecond Float64Index
if window_float_index:
def func(s, func=func):
# pylint: disable=function-redefined
s.index = s.index.astype('int64') * 1e-9
return func(s)
# Use a timedelta index so that rolling gives time-based results
index = pd.to_timedelta(orig_index, unit='s')
series = pd.Series(series.array, index=index)
window_ns = int(window * 1e9)
rolling_window = f'{window_ns}ns'
values = series.rolling(rolling_window).apply(func, raw=False).values
if center:
new_index = orig_index - (window / 2)
else:
new_index = orig_index
return | pd.Series(values, index=new_index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
analyze and plot results of experiments
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sb
import yaml
#E2: How large can I make my output domain without loosing skill?
E2_results = pd.read_csv('param_optimization/E2_results_t2m_34_t2m.csv',sep =';')
#E1:
E1_results = pd.read_csv('param_optimization/E1_results_t2m_34_t2m.csv',sep =';')
#E1 label smoothing
E1_smooth_results = pd.read_csv('param_optimization/E1_label_smoothing_results_t2m_34_t2m_14_10_2021.csv',sep =';')
#E1 refined
E1_ref_results= pd.read_csv('param_optimization/E1_refined_results_t2m_34_t2m_ls0.4.csv',sep =';')
E1_ref_add = pd.read_csv('param_optimization/E1_label_smoothing_results_t2m_34_t2m_14_10_2021.csv',sep =';')
E1_ref_add = E1_ref_add.where(E1_ref_add.label_smoothing == 0.4).dropna()
E1_ref_results = pd.concat([E1_ref_results, E1_ref_add])
E1_ref_results.reset_index(inplace = True)
E1_ref_results_06 = pd.read_csv('param_optimization/E1_refined_results_t2m_34_t2m_0.6.csv',sep =';')
E1_ref_results_06 = pd.concat([E1_ref_results_06, E1_ref_add])
E1_ref_results_06.reset_index(inplace = True)
#E4
E4_results = pd.read_csv('param_optimization/E4_results_t2m_34_t2m_all.csv',sep =';')
#E3
E3_results01 = pd.read_csv('param_optimization/E3_results_t2m_34_t2m_folds_0_1.csv',sep =';')
E3_results25 = pd.read_csv('param_optimization/E3_results_t2m_34_t2m_folds_2_5.csv',sep =';')
E3_results= pd.concat([E3_results01, E3_results25])
E3_results.reset_index(inplace = True)
#%%
###############################################################################
###################################E2
#E2 sharpness of forecasts
E2_sharpness = E2_results[['fold_no','radius_basis_func','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
E2_sharpness = E2_sharpness.melt(id_vars = ['fold_no','radius_basis_func','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2'],
value_vars = ['min_percentage', 'max_percentage'], var_name = 'minmax', value_name = 'probability')
plt.figure()
sb.lineplot(x="epochs", y="probability",hue="radius_basis_func", style="minmax", data=E2_sharpness)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)#ncol = 2)
plt.xticks([5,10])
plt.tight_layout()
plt.savefig('plots/E2_sharpness.png')
#%%
#E2 achieved RPSS
results_epoch_end = E2_results[['fold_no','radius_basis_func','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
results_epoch_end = results_epoch_end.melt(id_vars = ['fold_no','radius_basis_func','epochs', 'accuracy', 'loss', 'min_percentage', 'max_percentage'],
value_vars = ['RPSS_year1', 'RPSS_year2'], var_name = 'year', value_name = 'RPSS')
plt.figure()
sb.lineplot( data = results_epoch_end, x = 'epochs', y = 'RPSS', hue = 'radius_basis_func')#, style = 'year')
plt.xticks([5,10])
plt.hlines(y = 0, xmin= results_epoch_end.epochs.min(), xmax = results_epoch_end.epochs.max(), color = 'black')
plt.tight_layout()
plt.savefig('plots/E2_RPSS.png')
#%%
#E2 history
def extract_history(results):
df_list = []
for i in range(0,results.shape[0],2):
accuracy = yaml.load(results.history[i])['accuracy'] + yaml.load(results.history[i + 1])['accuracy']
val_accuracy = yaml.load(results.history[i])['val_accuracy'] + yaml.load(results.history[i + 1])['val_accuracy']
loss = yaml.load(results.history[i])['loss'] + yaml.load(results.history[i + 1])['loss']
val_loss = yaml.load(results.history[i])['val_loss'] + yaml.load(results.history[i + 1])['val_loss']
df_tr = pd.DataFrame(accuracy, columns = ['accuracy'])
df_tr['loss'] = loss
df_tr['dataset'] = 'train'
df_tr['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10])
df_val = pd.DataFrame(val_accuracy, columns = ['accuracy'])
df_val['loss'] = val_loss
df_val['dataset'] = 'validation'
df_val['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10])
df = pd.concat([df_tr, df_val])
df['radius'] = results.radius_basis_func[i]
df['fold'] = results.fold_no[i]
df_list.append(df)
history = | pd.concat(df_list) | pandas.concat |
from pydap.client import open_url
from datetime import datetime
from calendar import monthrange, month_name
import os
import numpy as np
import pandas as pd
import netCDF4 as nc
import xarray as xr
import time
import pickle
import cdsapi
import math
from Plot import incidence_and_ml_plot
# Interpolation
from scipy.interpolate import RegularGridInterpolator, LinearNDInterpolator
import scipy.interpolate.interpnd
# Plotting
from matplotlib import pyplot as plt, figure
from sklearn.metrics import mean_squared_error
from matplotlib.colors import LogNorm
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
from plotly import express as px
import cartopy.crs as ccrs
# Set data frame options
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
#################################
### DATA COLLECTION FUNCTIONS ###
#################################
def generate_url(year, month, day, satellite_number):
day_of_year = datetime(year, month, day).timetuple().tm_yday
date_string = str(year) + str(month).zfill(2) + str(day).zfill(2)
base_url = 'https://podaac-opendap.jpl.nasa.gov/opendap/hyrax/allData/cygnss/L1/v3.0/'
specific_url = str(year) + '/' + str(day_of_year).zfill(3) + '/cyg0' + str(satellite_number) + '.ddmi.s' + \
date_string + '-000000-e' + date_string + '-235959.l1.power-brcs.a30.d31.nc'
data_url = base_url + specific_url
return data_url + '?sp_lat,sp_lon,ddm_timestamp_utc,ddm_snr,gps_tx_power_db_w,gps_ant_gain_db_i,rx_to_sp_range,' \
'tx_to_sp_range,sp_rx_gain,spacecraft_num,prn_code,track_id,quality_flags,quality_flags_2,sp_inc_angle', day_of_year
def collect_dataset(day_of_year, url, satellite_nr):
dataset = open_url(url, output_grid=False)
df = pd.DataFrame()
track_list = []
for ddm in range(4):
ddm_df = pd.DataFrame()
print("ddm: " + str(ddm))
sp_lat = np.array(dataset.sp_lat[:, ddm])
sp_lon = np.array(dataset.sp_lon[:, ddm])
a, b = (np.where(sp_lon > 180))
sp_lon[a] -= 360
ddm_timestamp_utc = np.array(dataset.ddm_timestamp_utc[:, ddm])
ddm_snr = np.array(dataset.ddm_snr[:, ddm])
gps_tx_power_db_w = np.array(dataset.gps_tx_power_db_w[:, ddm])
gps_ant_gain_db_i = np.array(dataset.gps_ant_gain_db_i[:, ddm])
rx_to_sp_range = np.array(dataset.rx_to_sp_range[:, ddm])
tx_to_sp_range = np.array(dataset.tx_to_sp_range[:, ddm])
sp_rx_gain = np.array(dataset.sp_rx_gain[:, ddm])
track_id = np.array(dataset.track_id[:, ddm])
prn_code = np.array(dataset.prn_code[:, ddm])
quality_flags = np.array(dataset.quality_flags[:, ddm])
quality_flags_2 = np.array(dataset.quality_flags_2[:, ddm])
sp_inc_angle = np.array(dataset.sp_inc_angle[:, ddm])
ddm_df['ddm_channel'] = np.zeros(len(sp_lon))
ddm_df['spacecraft_num'] = np.zeros(len(sp_lon))
ddm_df['day_of_year'] = np.zeros(len(sp_lon))
ddm_df['sp_lat'] = sp_lat.tolist()
ddm_df['sp_lon'] = sp_lon.tolist()
ddm_df = ddm_df.assign(ddm_channel=ddm)
ddm_df = ddm_df.assign(spacecraft_num=satellite_nr)
ddm_df = ddm_df.assign(day_of_year=day_of_year)
ddm_df['ddm_timestamp_utc'] = ddm_timestamp_utc.tolist()
ddm_df['ddm_snr'] = ddm_snr.tolist()
ddm_df['gps_tx_power_db_w'] = gps_tx_power_db_w.tolist()
ddm_df['gps_ant_gain_db_i'] = gps_ant_gain_db_i.tolist()
ddm_df['rx_to_sp_range'] = rx_to_sp_range.tolist()
ddm_df['tx_to_sp_range'] = tx_to_sp_range.tolist()
ddm_df['sp_rx_gain'] = sp_rx_gain.tolist()
ddm_df['track_id'] = track_id.tolist()
ddm_df['prn_code'] = prn_code.tolist()
ddm_df['sp_inc_angle'] = sp_inc_angle.tolist()
ddm_df['quality_flags'] = quality_flags.tolist()
ddm_df['quality_flags_2'] = quality_flags_2.tolist()
for col in ddm_df.columns:
if col != 'ddm_channel' and col != 'ddm_timestamp_utc' and col != 'spacecraft_num' and col != 'day_of_year':
ddm_df[col] = ddm_df[col].apply(lambda x: x[0])
df = df.append(ddm_df, ignore_index=True)
return df
def collect_data(url):
data = open_url(url, output_grid=False)
return data
def create_cygnss_df(year, month, day):
df = pd.DataFrame()
raw_data_list = []
failed_satellites = []
sat_counter = 0
failed_attempts = 0
while sat_counter < 8:
try:
satellite_start = time.time()
print('Starting computations for satellite number ' + str(sat_counter+1) + '...')
print('------------------------------------------------------------')
print('Generating url...')
data_url, day_of_year = generate_url(year, month, day, sat_counter+1)
print('Collecting data as a DataFrame...')
satellite_df = collect_dataset(day_of_year, data_url, sat_counter+1)
print('Collecting raw data...')
raw_data = collect_data(data_url)
raw_data_list.append(raw_data)
seconds = time.time()-satellite_start
print('Collected data for satellite ' + str(sat_counter+1) + ' in ' + str(round(seconds/60)) + ' minutes and ' +
str(seconds % 60) + ' seconds.')
print('#####################################################')
print('#####################################################\n\n')
df = df.append(satellite_df)
sat_counter += 1
except:
print('Data collection failed. Trying again...')
failed_attempts += 1
if failed_attempts == 50:
failed_satellites.append(sat_counter+1)
sat_counter += 1
failed_attempts = 0
print('Data collection aborted. Trying the next satellite!')
return df, raw_data_list, failed_satellites
############################################
### DATA PROCESSING INDIVIDUAL FUNCTIONS ###
############################################
def calculate_sr_value(snr, p_r, g_t, g_r, d_ts, d_sr):
# snr(dB), p_r(dBW), g_t(dBi), g_r(dBi), d_ts(meter), d_sr(meter)
return snr - p_r - g_t - g_r - (20 * np.log10(0.19)) + (20 * np.log10(d_ts + d_sr)) + (20 * np.log10(4 * np.pi))
def compute_surface_reflectivity(df):
df['sr'] = df.apply(
lambda row: calculate_sr_value(row.ddm_snr, row.gps_tx_power_db_w, row.gps_ant_gain_db_i, row.sp_rx_gain,
row.tx_to_sp_range, row.rx_to_sp_range), axis=1)
return df
def calculate_hours_after_jan_value(day_of_year, ddm_timestamp):
return (day_of_year - 1) * 24 + round(ddm_timestamp / (60 * 60))
def compute_hours_after_jan(df):
df['hours_after_jan_2020'] = df.apply(
lambda row: calculate_hours_after_jan_value(row.day_of_year, row.ddm_timestamp_utc), axis=1)
return df
def generate_unique_track_id_value(track_id, day_of_year, prn_nr, sat_nr):
return track_id * 10000 + prn_nr * 10 + sat_nr + day_of_year/1000
def compute_unique_track_ids(df):
df['unique_track_id'] = df.apply(
lambda row: generate_unique_track_id_value(row.track_id, row.day_of_year, row.prn_code, row.spacecraft_num), axis=1)
return df
def generate_qf_list(qf_number):
qf_list = []
binary = format(qf_number, 'b')
for i in range(len(binary)):
if binary[i] == '1':
qf_list.append(2 ** (int(i)))
return qf_list
def compute_prn_to_block_value(prn_code):
iir_list = [2, 13, 16, 19, 20, 21, 22]
iif_list = [1, 3, 6, 8, 9, 10, 25, 26, 27, 30, 32]
iir_m_list = [5, 7, 12, 15, 17, 29, 31]
iii_list = [4, 11, 14, 18, 23, 24]
if prn_code in iir_list:
return 'IIR'
elif prn_code in iif_list:
return 'IIF'
elif prn_code in iir_m_list:
return 'IIR-M'
elif prn_code in iii_list:
return 'III'
else:
return 'UNKNOWN'
def compute_block_code(df):
df['block_code'] = df.apply(lambda row: compute_prn_to_block_value(row.prn_code), axis=1)
return df
def compute_daily_hour_column(df):
df['daily_hour'] = df.apply(lambda row: round(row.ddm_timestamp_utc / (60*60)), axis=1)
return df
def compute_time_of_day_value(time):
if time >= 22:
return 'N'
elif time >= 16:
return 'A'
elif time >= 10:
return 'D'
elif time >= 4:
return 'M'
else:
return 'N'
def compute_time_of_day(df):
df['time_of_day'] = df.apply(lambda row: compute_time_of_day_value(row.daily_hour), axis=1)
return df
def scale_sr_values(df):
min_sr = df['sr'].min()
df['sr'] = df['sr'].apply(lambda x: x - min_sr)
return df
def filter_location(df, location):
filtered_df = df[df.sp_lat < location[0]]
filtered_df = filtered_df[filtered_df.sp_lat > location[2]]
filtered_df = filtered_df[filtered_df.sp_lon < location[3]]
filtered_df = filtered_df[filtered_df.sp_lon > location[1]]
return filtered_df
def filter_quality_flags_1(df):
df['qf_ok'] = df.apply(
lambda row: (2 or 4 or 5 or 8 or 16 or 17) not in generate_qf_list(int(row.quality_flags)), axis=1)
df = df[df['qf_ok']]
return df
def filter_quality_flags_2(df):
res_df = df
res_df['qf2_ok'] = res_df.apply(
lambda row: (1 or 2) not in generate_qf_list(int(row.quality_flags_2)), axis=1) # Remember to check which qfs
res_df = res_df[res_df['qf2_ok']]
return res_df
def remove_fill_values(df, raw_data):
keys = list(raw_data.keys())
keys.remove('ddm_timestamp_utc')
keys.remove('spacecraft_num')
filtered_df = df
# Remove rows containing fill values
for k in keys:
key = raw_data[k]
fv = key._FillValue
filtered_df = filtered_df[filtered_df[k] != fv]
return filtered_df
#################################
### DATA PROCESSING FUNCTIONS ###
#################################
def raw_df_processing(df, location, qf1_removal=False, qf2_removal=False):
res_df = df
print('Filtering the DataFrame based on provided location...')
res_df = filter_location(res_df, location)
if qf1_removal:
print('Removing bad quality values...')
rows_before_removal = res_df.shape[0]
res_df = filter_quality_flags_1(res_df)
rows_after_removal = res_df.shape[0]
print('Removed ' + str(rows_before_removal - rows_after_removal) + ' rows of bad overall quality...')
if qf2_removal:
print('Removing more bad quality values...')
rows_before_removal = res_df.shape[0]
res_df = filter_quality_flags_2(res_df)
rows_after_removal = res_df.shape[0]
print('Removed ' + str(rows_before_removal - rows_after_removal) + ' rows of bad overall quality...')
print('Computing surface reflectivity values for all rows...')
res_df = compute_surface_reflectivity(res_df)
print('Adding column displaying hours after January 1st 2020...')
res_df = compute_hours_after_jan(res_df)
print('Computing unique track ids for all rows...')
res_df = compute_unique_track_ids(res_df)
return res_df
def process_monthly_df(retrieval_folder, year, month, location, qf1_removal=True, qf2_removal=False):
monthly_df = pd.DataFrame()
num_of_days = monthrange(year, month)[1]
for i in range(num_of_days):
csv_path = retrieval_folder + str(month).zfill(2) + '/raw_main_df_' + str(year) + '_' + str(month).zfill(2) + '_' + \
str(i + 1) + 'of' + str(num_of_days) + '.csv'
print('#######################################')
print('Collecting csv file number ' + str(i + 1) + ' of ' + str(num_of_days) + '...')
daily_df = pd.read_csv(csv_path)
print('***Processing the data***')
daily_df = raw_df_processing(daily_df, location, qf1_removal, qf2_removal)
monthly_df = monthly_df.append(daily_df, ignore_index=True)
print('#######################################\n')
return monthly_df
# Returning the same df if no specific features are selected
def select_df_features(df, feature_list):
if len(feature_list) > 0:
return df[feature_list]
else:
return df
def store_df_as_csv(df, storage_path):
df.to_csv(storage_path, index=False)
###############################
### INTERPOLATION FUNCTIONS ###
###############################
def interpolate_ml(df: pd.DataFrame, target_value='swvl1') -> LinearNDInterpolator:
coordinates = list(zip(list(df['time']), list(df['lat']), list(df['long'])))
target = df[target_value]
interpolation_function = LinearNDInterpolator(coordinates, target)
return interpolation_function
def remove_lakes(path, era5_df):
try:
ds = xr.open_dataset(path)
except:
ds = xr.open_dataset('../' + path)
df = ds.to_dataframe()
df = df.reset_index()
if 'time' in df.columns:
df = df.drop(['time'], axis=1)
df['lat'] = df['latitude'].apply(lambda x: round(x, 2))
df['long'] = df['longitude'].apply(lambda x: round(x, 2))
df = df.drop(['latitude', 'longitude'], axis=1)
df = df[df['lsm'] > 0.5]
len_before = len(era5_df)
filtered_df = pd.merge(era5_df, df, how='left', left_on=['lat', 'long'], right_on=['lat', 'long'])
filtered_df = filtered_df.dropna()
print('Removed lake items:', len_before - len(filtered_df))
return filtered_df
def read_era5_data(path, year=2020, target_value='swvl1', remove_lake=True):
try:
ds = xr.open_dataset(path)
except:
ds = xr.open_dataset('../' + path)
df = ds.to_dataframe()
if 'src' in df.columns:
df = df.drop(['src'], axis=1)
if 'swvl2' in df.columns:
df = df.drop(['swvl2'], axis=1)
longitude = df.index.levels[0]
latitude = df.index.levels[1]
time = df.index.levels[2]
start = pd.Timestamp(year, 1, 1)
time_converted = []
for t in time:
diff = t - start
time_converted.append(diff.days * 24 + diff.seconds / 3600)
long, lat, time_converted = np.meshgrid(longitude, latitude, time_converted, indexing='ij')
long = np.array(long).flatten()
lat = np.array(lat).flatten()
time_converted = np.array(time_converted).flatten()
df = pd.DataFrame({'long': long, 'lat': lat, 'time': time_converted, target_value: df[target_value]})
df['lat'] = df['lat'].apply(lambda lat: round(lat, 1))
df['long'] = df['long'].apply(lambda long: round(long, 1))
if target_value == 'swvl1' and remove_lake:
df = remove_lakes('/Volumes/DACOTA HDD/Semester Project CSV/ERA5/lsm.nc', df)
return df
def get_era5_monthly_df(era5_df, month, year):
start_day_of_month = datetime(year, month, 1).timetuple().tm_yday
if month != 12:
end_day_of_month = datetime(year, month + 1, 1).timetuple().tm_yday
else:
end_day_of_month = datetime(year, 12, 31).timetuple().tm_yday + 1
res_df = era5_df[era5_df.time >= (start_day_of_month-1)*24]
return res_df[res_df.time < (end_day_of_month-1)*24]
def generate_monthly_interpolation_function(era5, location, month, year, storage_folder):
location_string = str(location[0]) + '-' + str(location[1]) + '-' + str(location[2]) + '-' + str(location[3])
monthly_era5_df = get_era5_monthly_df(era5, month, year)
interpolation_start = time.time()
print('Generating an interpolation function for the month of ' + month_name[month] + '...')
monthly_interpolation_function = interpolate_ml(monthly_era5_df)
print('Seconds used to create the interpolation function: ', (time.time() - interpolation_start))
return monthly_interpolation_function
def load_monthly_interpolation_function(location, month, year, storage_folder):
location_string = str(location[0]) + '-' + str(location[1]) + '-' + str(location[2]) + '-' + str(location[3])
with open(storage_folder + '/interpolation_function_' + str(year) + '_' + str(month).zfill(2) + '_' + location_string + '.pickle', 'rb') as input_interpolation_file:
monthly_interpolation_function = pickle.load(input_interpolation_file)
return monthly_interpolation_function
def interpolate_df(df, interpolation_function):
df['sm'] = df.apply(lambda row: interpolation_function(row.hours_after_jan_2020, row.sp_lat, row.sp_lon), axis=1)
return df
def generate_monthly_interpolated_cygnss_df(location, month, year, cygnss_storage_folder, interpolation_function):
location_string = str(location[0]) + '-' + str(location[1]) + '-' + str(location[2]) + '-' + str(location[3])
cygnss_df = pd.read_csv(cygnss_storage_folder + '/Processed' + str(year) + '-' + str(month).zfill(2) + '-withQFs-' +
location_string + '.csv')
interpolation_start = time.time()
print('Interpolating the df for the month of ' + month_name[month] + '...')
interpolated_df = interpolate_df(cygnss_df, interpolation_function)
print('Seconds used to interpolate values for soil moisture: ', (time.time() - interpolation_start))
return interpolated_df
def load_monthly_interpolated_df(location, month, year, interpolated_df_storage_folder):
location_string = str(location[0]) + '-' + str(location[1]) + '-' + str(location[2]) + '-' + str(location[3])
return pd.read_csv(interpolated_df_storage_folder + '/df_with_interpolated_sm_' + str(year) + '_' + str(single_month).zfill(2) + '_' + location_string + '.csv')
def load_periodic_interpolated_df(location, start_month, end_month, year, interpolated_df_storage_folder):
location_string = str(location[0]) + '-' + str(location[1]) + '-' + str(location[2]) + '-' + str(location[3])
res_df = pd.DataFrame()
for i in range(start_month, end_month + 1):
tmp_df = pd.read_csv(interpolated_df_storage_folder + '/df_with_interpolated_sm_' + str(year) +
'_' + str(i).zfill(2) + '_' + location_string + '.csv')
res_df = res_df.append(tmp_df, ignore_index=True)
return res_df
def filter_nan_era5(df):
try:
df['sm'] = df['sm'].apply(lambda x: x.item(0))
except:
print('SM value was already of type: float')
df = df.dropna()
return df
def filter_nan_smap(df):
try:
df['smap_sm'] = df['smap_sm'].apply(lambda x: x.item(0))
except:
print('SMAP_SM value was already of type: float')
df = df.dropna()
return df
def get_smap(path: str, printing=False):
ds = nc.Dataset(path)
sm = ds['Soil_Moisture_Retrieval_Data_AM']
latitudes = []
longitudes = []
moistures = []
times = []
for lat in range(len(sm['latitude'])):
for long in range(len(sm['longitude'][lat])):
latitudes.append(sm['latitude'][lat][long])
longitudes.append(sm['longitude'][lat][long])
moistures.append(sm['soil_moisture'][lat][long])
times.append(sm['tb_time_utc'][lat][long])
df = pd.DataFrame.from_dict({'lat': latitudes, 'long': longitudes, 'time': times, 'smap_sm': moistures})
# Filter out missing values
smap_df = df[df['smap_sm'] != -9999.0]
if len(smap_df) > 0 and printing:
print('Number of missing values:', len(df) - len(smap_df))
print('Number of data points with value:', len(smap_df))
index = list(smap_df['smap_sm']).index(max(list(smap_df['smap_sm'])))
print("Peak SM value:", list(smap_df['smap_sm'])[index])
print("Peak SM value at: (" + str(list(smap_df['lat'])[index]) + ", " + str(list(smap_df['long'])[index]) + ")")
return smap_df
def conv(t):
try:
return pd.Timestamp(t)
except:
return pd.Timestamp(t.split('.')[0] + '.000Z')
def convert_time(df: pd.DataFrame) -> pd.DataFrame:
ref_date = pd.Timestamp('2020-01-01T00:00:00.000Z')
df['time'] = df['time'].apply(lambda t: conv(t))
df['time'] = df['time'].apply(lambda t: (t - ref_date).days * 24 + (t - ref_date).seconds / 3600)
return df
def get_smap_df_year(root_dir: str, year: int, convert_time_hours=True) -> pd.DataFrame:
first = True
all_paths = []
for subdir, dirs, files in os.walk(root_dir):
for file in files:
if not first:
all_paths.append(os.path.join(subdir, file))
else:
first = False
smap_df = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
def read_data():
# Define raw data path
raw_data_path = os.path.join('data', 'raw')
train_file_path = os.path.join(raw_data_path, 'train.csv')
test_file_path = os.path.join(raw_data_path, 'test.csv')
# read data from cvs file
train_df = | pd.read_csv(train_file_path, index_col='PassengerId') | pandas.read_csv |
import pandas as pd
import numpy as np
import requests as rq
import datetime as dt
from .constants import CAPS_INFO
def expand(df):
'''Fill missing dates in an irregular timeline'''
min_date = df['date'].min()
max_date = df['date'].max()
idx = pd.date_range(min_date, max_date)
df.index = pd.DatetimeIndex(df.date)
df = df.drop(columns=['date'])
return df.reindex(idx, method='pad').reset_index().rename(columns={'index':'date'})
def prefill(df, min_date):
'''Fill zeros from first_case_date to df.date.min()'''
assert(len(df.name.unique()) == 1)
s = df.name.unique().item()
min_date = min_date
max_date = df['date'].max()
idx = pd.date_range(min_date, max_date)
df.index = | pd.DatetimeIndex(df.date) | pandas.DatetimeIndex |
#################################################
#created the 04/05/2018 09:52 by <NAME>#
#################################################
#-*- coding: utf-8 -*-
'''
'''
'''
Améliorations possibles:
'''
import warnings
warnings.filterwarnings('ignore')
#################################################
########### Imports #################
#################################################
import sys
import numpy as np
import pandas as pd
import scipy.stats
import plotly
import plotly.graph_objs as go
import plotly.offline as offline
from plotly import tools
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.base import BaseEstimator
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import train_test_split
import lightgbm as lgb
from sklearn.metrics import log_loss
from sklearn.externals import joblib
from ast import literal_eval
#################################################
########### Global variables ####################
#################################################
### LGB modeling
params = {'learning_rate': 0.015,
'subsample': 0.9,
#'subsample_freq': 1,
'colsample_bytree': 0.9,
'colsample_bylevel':0.9,
'reg_alpha': 1,
'reg_lambda': 1,
'max_depth' : 10,
'min_data_in_leaf': 1,
'boosting': 'dart',#'rf','dart','goss','gbdt'
'objective': 'binary',
'metric': 'binary_logloss',
'is_training_metric': True,
'seed': 99,'silent' : True,"verbose":-1}
params1 = {'learning_rate': 0.015,
'subsample': 0.9,
#'subsample_freq': 1,
'colsample_bytree': 0.9,
'colsample_bylevel':0.9,
'reg_alpha': 1,
'reg_lambda': 1,
'max_depth' : 8,
'num_leaves': 15,
'min_data_in_leaf': 1,
'boosting': 'dart',#'rf','dart','goss','gbdt'
'objective': 'binary',
'metric': 'binary_logloss',
'is_training_metric': True,
'seed': 99,
'silent' : True,"verbose":-1}
MAX_TREES = 5000
######################################################
class Classifier(BaseEstimator):
def __init__(self):
pass
def fit(self, x1, y1,x2,y2):
watchlist = [(lgb.Dataset(x1, label=y1), 'train'), (lgb.Dataset(x2, label=y2), 'valid')]
self.clf2 = lgb.train(params, lgb.Dataset(x1, label=y1), MAX_TREES, lgb.Dataset(x2, label=y2),verbose_eval=200, feval=logloss_lgbm, early_stopping_rounds=300)
self.clf1 = lgb.train(params1, lgb.Dataset(x1, label=y1), MAX_TREES, lgb.Dataset(x2, label=y2),verbose_eval=200, feval=logloss_lgbm, early_stopping_rounds=300)
def predict(self, X):
return self.clf1.predict(X)
def predict_proba(self, X):
res1 = self.clf1.predict(X, num_iteration = self.clf1.best_iteration)
res2 = self.clf2.predict(X,num_iteration = self.clf2.best_iteration)
return np.array([[1-0.5*(a+b),0.5*(a+b)] for a,b in zip(res1,res2)])
fileX_train ='/home/alexis/Bureau/Stage/Time-series/data/processed/sfrdaily_20180430_0_192_0_cleandata-processed.csv'
fileY_train = '/home/alexis/Bureau/historique/label-30-04.csv'
fileX_valid ='/home/alexis/Bureau/Stage/Time-series/data/processed/sfrdaily_20180507_0_192_0_cleandata-processed.csv'
fileY_valid = '/home/alexis/Bureau/historique/label-07-05.csv'
fileX_test ='/home/alexis/Bureau/Stage/Time-series/data/processed/sfrdaily_20180509_0_192_0_cleandata-processed.csv'
fileY_test = '/home/alexis/Bureau/historique/label-09-05.csv'
#################################################
########### Important functions #################
#################################################
def load(fileX,fileY):
X = pd.DataFrame()
y = pd.DataFrame()
for filex,filey in zip(fileX,fileY):
df = pd.read_csv(filex)
y_ = pd.read_csv(filey)
df = df.replace([np.inf, -np.inf], np.nan)
df = df.fillna(1)
X_train = df
y_train = y_['label'][3:]
X = pd.concat([X,X_train])
y = pd.concat([y,y_train])
t = X['t']
scaler = StandardScaler()
X = scaler.fit_transform(X.values)
return X,y.values.reshape(-1, 1),t
def model_fit(X1,y1,X2,y2):
clf = Classifier()
clf.fit(X1,[Y[0] for Y in y1],X2,[Y[0] for Y in y2])
return clf
def find_index(l,v):
res = []
for i, j in enumerate(l):
if(j == v):
res.append(i)
return res
def mesure(y_pred,y_true):
TP = 0
FP = 0
FN = 0
for i in range(len(y_pred)-1):
i = i+1
if(y_pred[i] == 1):
if(sum(y_true[i-1:i+1])>0):
TP += 1
else:
FP += 1
for i in range(len(y_true)-1):
i = i+1
if(y_true[i] == 1):
if(sum(y_pred[i-1:i+1])>0):
pass
else:
FN += 1
return TP,FP,FN
def plot_res(df,pred,y):
x = df
t= [i/60 +3 for i in range(len(x))]
tp = np.sum([z*x for z,x in zip(pred,y)])
fp = np.sum([np.clip(z-x,0,1) for z,x in zip(pred,y)])
fn = np.sum([np.clip(z-x,0,1) for z,x in zip(y,pred)])
beta = 2
p = tp/np.sum(pred)
r = tp/np.sum(y)
beta_squared = beta ** 2
f = (beta_squared + 1) * (p * r) / (beta_squared * p + r)
print('--------------------------------------------------')
print("|| precison: "+str(p)+"|| recall: "+str(r)+"|| fbeta: "+str(f))
tp,fp,fn = mesure(pred,y)
beta = 2
p = tp/(tp+fp)
r = tp/(tp+fn)
beta_squared = beta ** 2
f = (beta_squared + 1) * (p * r) / (beta_squared * p + r)
print("|| precison: "+str(p)+"|| recall: "+str(r)+"|| fbeta: "+str(f))
print('--------------------------------------------------')
l1 = find_index(pred,1)
x1 = [t[i] for i in l1]
y1 = [x[i] for i in l1]
l3 = find_index(y,1)
x3 = [t[i] for i in l3]
y3 = [x[i] for i in l3]
trace1 = go.Scatter(
x= t,
y= x,
name = 'true',
)
trace2 = go.Scatter(
x =x1,
y=y1,
mode = 'markers',
name ='train',
)
trace3 = go.Scatter(
x=0,
y= 0,
mode = 'markers',
name = 'test',
)
trace4 = go.Scatter(
x=x3,
y=y3,
mode = 'markers',
name = 'true markers'
)
fig = tools.make_subplots(rows=4, cols=1, specs=[[{}], [{}], [{}], [{}]],
shared_xaxes=True, shared_yaxes=True,
vertical_spacing=0.001)
fig.append_trace(trace1, 1, 1)
fig.append_trace(trace2, 1, 1)
fig.append_trace(trace3, 1, 1)
fig.append_trace(trace4, 1, 1)
fig['layout'].update(height=3000, width=2000, title='Annomalie detection')
#plot(fig, filename='LGBM.html')
return 0
def save_model(model):
joblib.dump(model.clf1, 'model/LGBM1.pkl')
joblib.dump(model.clf2, 'model/LGBM2.pkl')
model.clf1.save_model('model/LGBM1.txt')
model.clf2.save_model('model/LGBM2.txt')
def logloss_lgbm(preds, dtrain):
labels = dtrain.get_label()
score = 1-log_loss(labels, preds)
return 'logloss', score,True
#################################################
########### main with options ###################
#################################################
def main(argv):
if(len(argv)==0):
argv = [0.16]
THRESHOLD = float(argv[0])
#### get files names ###
names = pd.read_csv('files.csv')
fileX_train = literal_eval(names['fileX_train'][0])
fileY_train = literal_eval(names['fileY_train'][0])
fileX_valid =literal_eval(names['fileX_valid'][0])
fileY_valid = literal_eval(names['fileY_valid'][0])
fileX_test =literal_eval(names['fileX_test'][0])
fileY_test = literal_eval(names['fileY_test'][0])
X_train,Y_train,_ = load(fileX_train,fileY_train)
X_valid,Y_valid,_ = load(fileX_valid,fileY_valid)
X_test,Y_test,t = load(fileX_test,fileY_test)
model = model_fit(X_train,Y_train,X_valid,Y_valid)
pred = model.predict_proba(X_test)
testPredict = list([1 if i[1]>THRESHOLD else 0 for i in pred])
print('Plot feature importances...')
ax = lgb.plot_importance(model.clf1, max_num_features=30)
#plt.show()
# plot results
plot_res(t,testPredict,Y_test)
pred_valid = model.predict_proba(X_valid)
res_valid = pd.DataFrame(pred_valid)
res_valid.to_csv('lightGBM_valid.csv',index=False)
res = | pd.DataFrame(pred) | pandas.DataFrame |
import json
import datetime
from datetime import time, timedelta
import pandas as pd
import numpy as np
import plotly.graph_objs as go
from plotly.offline import plot
import pytz
import os
from pyloopkit.dose import DoseType
from pyloopkit.generate_graphs import plot_graph, plot_loop_inspired_glucose_graph
from pyloopkit.loop_math import predict_glucose
from pyloopkit.loop_data_manager import update
from pyloopkit.pyloop_parser import (
parse_report_and_run, parse_dictionary_from_previous_run
)
import matplotlib.pyplot as plt
import simglucose
from simglucose.simulation.user_interface import simulate
from simglucose.controller.base import Controller, Action
from simglucose.simulation.env import T1DSimEnv
from simglucose.controller.basal_bolus_ctrller import BBController
from simglucose.sensor.cgm import CGMSensor
from simglucose.actuator.pump import InsulinPump
from simglucose.patient.t1dpatient import T1DPatient
from simglucose.simulation.scenario_gen import RandomScenario
from simglucose.simulation.scenario import CustomScenario
from simglucose.simulation.sim_engine import SimObj, sim, batch_sim
def parse_json(path, name):
""" Get a dictionary output from a previous run of PyLoopKit
and convert the ISO strings to datetime or time objects, and
dose types to enums
"""
data_path_and_name = os.path.join(path, name)
with open(data_path_and_name, "r") as file:
dictionary = json.load(file)
keys_with_times = [
"basal_rate_start_times",
"carb_ratio_start_times",
"sensitivity_ratio_start_times",
"sensitivity_ratio_end_times",
"target_range_start_times",
"target_range_end_times"
]
for key in keys_with_times:
new_list = []
for string in dictionary.get(key):
new_list.append(time.fromisoformat(string))
dictionary[key] = new_list
keys_with_datetimes = [
"dose_start_times",
"dose_end_times",
"glucose_dates",
"carb_dates"
]
for key in keys_with_datetimes:
new_list = []
for string in dictionary.get(key):
new_list.append(datetime.datetime.fromisoformat(string))
dictionary[key] = new_list
dictionary["time_to_calculate_at"] = datetime.datetime.fromisoformat(
dictionary["time_to_calculate_at"]
)
last_temp = dictionary.get("last_temporary_basal")
dictionary["last_temporary_basal"] = [
DoseType.from_str(last_temp[0]),
datetime.datetime.fromisoformat(last_temp[1]),
datetime.datetime.fromisoformat(last_temp[2]),
last_temp[3]
]
dictionary["dose_types"] = [
DoseType.from_str(value) for value in dictionary.get("dose_types")
]
return dictionary
# save dictionary as json file
def convert_times_and_types(obj):
""" Convert dates and dose types into strings when saving as a json """
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, datetime.time):
return obj.isoformat()
if isinstance(obj, DoseType):
return str(obj.name)
def strip_tz(date):
return date.replace(tzinfo=None)
def str_to_time(s, fmt='%H:%M:%S'):
return datetime.datetime.strptime(s, fmt).time()
def parse_input_string(s):
try:
return str_to_time(s)
except:
return s
def parse_json_settings(d):
"""
Convert all '%H:%M:%S values in a json to datetime.time objects.
Looks recusively through dictionarys and lists.
"""
for k,v in d.items():
if isinstance(v, str):
d[k] = parse_input_string(v)
elif isinstance(v, list):
d[k] = list(map(
parse_input_string,
v
))
elif isinstance(v, dict):
d[k] = parse_json_settings(v)
return d
def make_loop_plot(recommendations):
inputs = recommendations.get('input_data')
current_time = inputs.get("time_to_calculate_at")
# blood glucose data
glucose_dates = pd.DataFrame(inputs.get("glucose_dates"), columns=["time"])
glucose_values = pd.DataFrame(inputs.get("glucose_values"), columns=["mg_dL"])
bg = pd.concat([glucose_dates, glucose_values], axis=1)
# Set bg color values
bg['bg_colors'] = 'mediumaquamarine'
bg.loc[bg['mg_dL'] < 54, 'bg_colors'] = 'indianred'
low_location = (bg['mg_dL'] > 54) & (bg['mg_dL'] < 70)
bg.loc[low_location, 'bg_colors'] = 'lightcoral'
high_location = (bg['mg_dL'] > 180) & (bg['mg_dL'] <= 250)
bg.loc[high_location, 'bg_colors'] = 'mediumpurple'
bg.loc[(bg['mg_dL'] > 250), 'bg_colors'] = 'slateblue'
bg_trace = go.Scattergl(
name="bg",
x=bg["time"],
y=bg["mg_dL"],
hoverinfo="y+name",
mode='markers',
marker=dict(
size=6,
line=dict(width=0),
color=bg["bg_colors"]
)
)
# bolus data
dose_start_times = (
pd.DataFrame(inputs.get("dose_start_times"), columns=["startTime"])
)
dose_end_times = (
pd.DataFrame(inputs.get("dose_end_times"), columns=["endTime"])
)
dose_values = (
pd.DataFrame(inputs.get("dose_values"), columns=["dose"])
)
dose_types = (
pd.DataFrame(inputs.get("dose_types"), columns=["type"])
)
dose_types["type"] = dose_types["type"].apply(convert_times_and_types)
dose = pd.concat(
[dose_start_times, dose_end_times, dose_values, dose_types],
axis=1
)
unique_dose_types = dose["type"].unique()
# bolus data
if "bolus" in unique_dose_types:
bolus = dose[dose["type"] == "bolus"]
bolus_trace = go.Bar(
name="bolus",
x=bolus["startTime"],
y=bolus["dose"],
hoverinfo="y+name",
width=999999,
marker=dict(color='lightskyblue')
)
# basals rates
# scheduled basal rate
basal_rate_start_times = (
pd.DataFrame(inputs.get("basal_rate_start_times"), columns=["time"])
)
basal_rate_minutes = (
pd.DataFrame(inputs.get("basal_rate_minutes"), columns=["duration"])
)
basal_rate_values = (
pd.DataFrame(inputs.get("basal_rate_values"), columns=["sbr"])
)
sbr = pd.concat(
[basal_rate_start_times, basal_rate_minutes, basal_rate_values],
axis=1
)
# create a contiguous basal time series
bg_range = pd.date_range(
bg["time"].min() - datetime.timedelta(days=1),
current_time,
freq="1s"
)
contig_ts = pd.DataFrame(bg_range, columns=["datetime"])
contig_ts["time"] = contig_ts["datetime"].dt.time
basal = pd.merge(contig_ts, sbr, on="time", how="left")
basal["sbr"].fillna(method='ffill', inplace=True)
basal.dropna(subset=['sbr'], inplace=True)
# temp basal data
if ("basal" in unique_dose_types) | ("suspend" in unique_dose_types):
temp_basal = (
dose[((dose["type"] == "basal") | (dose["type"] == "suspend"))]
)
temp_basal["type"].replace("basal", "temp", inplace=True)
all_temps = | pd.DataFrame() | pandas.DataFrame |
from collections.abc import MutableSequence
import warnings
import io
import copy
import numpy as np
import pandas as pd
from . import endf
import openmc.checkvalue as cv
from .resonance import Resonances
def _add_file2_contributions(file32params, file2params):
"""Function for aiding in adding resonance parameters from File 2 that are
not always present in File 32. Uses already imported resonance data.
Paramaters
----------
file32params : pandas.Dataframe
Incomplete set of resonance parameters contained in File 32.
file2params : pandas.Dataframe
Resonance parameters from File 2. Ordered by energy.
Returns
-------
parameters : pandas.Dataframe
Complete set of parameters ordered by L-values and then energy
"""
# Use l-values and competitiveWidth from File 2 data
# Re-sort File 2 by energy to match File 32
file2params = file2params.sort_values(by=['energy'])
file2params.reset_index(drop=True, inplace=True)
# Sort File 32 parameters by energy as well (maintaining index)
file32params.sort_values(by=['energy'], inplace=True)
# Add in values (.values converts to array first to ignore index)
file32params['L'] = file2params['L'].values
if 'competitiveWidth' in file2params.columns:
file32params['competitiveWidth'] = file2params['competitiveWidth'].values
# Resort to File 32 order (by L then by E) for use with covariance
file32params.sort_index(inplace=True)
return file32params
class ResonanceCovariances(Resonances):
"""Resolved resonance covariance data
Parameters
----------
ranges : list of openmc.data.ResonanceCovarianceRange
Distinct energy ranges for resonance data
Attributes
----------
ranges : list of openmc.data.ResonanceCovarianceRange
Distinct energy ranges for resonance data
"""
@property
def ranges(self):
return self._ranges
@ranges.setter
def ranges(self, ranges):
cv.check_type('resonance ranges', ranges, MutableSequence)
self._ranges = cv.CheckedList(ResonanceCovarianceRange,
'resonance range', ranges)
@classmethod
def from_endf(cls, ev, resonances):
"""Generate resonance covariance data from an ENDF evaluation.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
resonances : openmc.data.Resonance object
openmc.data.Resonanance object generated from the same evaluation
used to import values not contained in File 32
Returns
-------
openmc.data.ResonanceCovariances
Resonance covariance data
"""
file_obj = io.StringIO(ev.section[32, 151])
# Determine whether discrete or continuous representation
items = endf.get_head_record(file_obj)
n_isotope = items[4] # Number of isotopes
ranges = []
for iso in range(n_isotope):
items = endf.get_cont_record(file_obj)
abundance = items[1]
fission_widths = (items[3] == 1) # Flag for fission widths
n_ranges = items[4] # Number of resonance energy ranges
for j in range(n_ranges):
items = endf.get_cont_record(file_obj)
# Unresolved flags - 0: only scattering radius given
# 1: resolved parameters given
# 2: unresolved parameters given
unresolved_flag = items[2]
formalism = items[3] # resonance formalism
# Throw error for unsupported formalisms
if formalism in [0, 7]:
error = 'LRF='+str(formalism)+' covariance not supported '\
'for this formalism'
raise NotImplementedError(error)
if unresolved_flag in (0, 1):
# Resolved resonance region
resonance = resonances.ranges[j]
erange = _FORMALISMS[formalism].from_endf(ev, file_obj,
items, resonance)
ranges.append(erange)
elif unresolved_flag == 2:
warn = 'Unresolved resonance not supported. Covariance '\
'values for the unresolved region not imported.'
warnings.warn(warn)
return cls(ranges)
class ResonanceCovarianceRange:
"""Resonace covariance range. Base class for different formalisms.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
mpar : int
Number of parameters in covariance matrix for each individual resonance
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max):
self.energy_min = energy_min
self.energy_max = energy_max
def subset(self, parameter_str, bounds):
"""Produce a subset of resonance parameters and the corresponding
covariance matrix to an IncidentNeutron object.
Parameters
----------
parameter_str : str
parameter to be discriminated
(i.e. 'energy', 'captureWidth', 'fissionWidthA'...)
bounds : np.array
[low numerical bound, high numerical bound]
Returns
-------
res_cov_range : openmc.data.ResonanceCovarianceRange
ResonanceCovarianceRange object that contains a subset of the
covariance matrix (upper triangular) as well as a subset parameters
within self.file2params
"""
# Copy range and prevent change of original
res_cov_range = copy.deepcopy(self)
parameters = self.file2res.parameters
cov = res_cov_range.covariance
mpar = res_cov_range.mpar
# Create mask
mask1 = parameters[parameter_str] >= bounds[0]
mask2 = parameters[parameter_str] <= bounds[1]
mask = mask1 & mask2
res_cov_range.parameters = parameters[mask]
indices = res_cov_range.parameters.index.values
# Build subset of covariance
sub_cov_dim = len(indices)*mpar
cov_subset_vals = []
for index1 in indices:
for i in range(mpar):
for index2 in indices:
for j in range(mpar):
if index2*mpar+j >= index1*mpar+i:
cov_subset_vals.append(cov[index1*mpar+i,
index2*mpar+j])
cov_subset = np.zeros([sub_cov_dim, sub_cov_dim])
tri_indices = np.triu_indices(sub_cov_dim)
cov_subset[tri_indices] = cov_subset_vals
res_cov_range.file2res.parameters = parameters[mask]
res_cov_range.covariance = cov_subset
return res_cov_range
def sample(self, n_samples):
"""Sample resonance parameters based on the covariances provided
within an ENDF evaluation.
Parameters
----------
n_samples : int
The number of samples to produce
Returns
-------
samples : list of openmc.data.ResonanceCovarianceRange objects
List of samples size `n_samples`
"""
warn_str = 'Sampling routine does not guarantee positive values for '\
'parameters. This can lead to undefined behavior in the '\
'reconstruction routine.'
warnings.warn(warn_str)
parameters = self.parameters
cov = self.covariance
# Symmetrizing covariance matrix
cov = cov + cov.T - np.diag(cov.diagonal())
formalism = self.formalism
mpar = self.mpar
samples = []
# Handling MLBW/SLBW sampling
if formalism == 'mlbw' or formalism == 'slbw':
params = ['energy', 'neutronWidth', 'captureWidth', 'fissionWidth',
'competitiveWidth']
param_list = params[:mpar]
mean_array = parameters[param_list].values
mean = mean_array.flatten()
par_samples = np.random.multivariate_normal(mean, cov,
size=n_samples)
spin = parameters['J'].values
l_value = parameters['L'].values
for sample in par_samples:
energy = sample[0::mpar]
gn = sample[1::mpar]
gg = sample[2::mpar]
gf = sample[3::mpar] if mpar > 3 else parameters['fissionWidth'].values
gx = sample[4::mpar] if mpar > 4 else parameters['competitiveWidth'].values
gt = gn + gg + gf + gx
records = []
for j, E in enumerate(energy):
records.append([energy[j], l_value[j], spin[j], gt[j],
gn[j], gg[j], gf[j], gx[j]])
columns = ['energy', 'L', 'J', 'totalWidth', 'neutronWidth',
'captureWidth', 'fissionWidth', 'competitiveWidth']
sample_params = pd.DataFrame.from_records(records,
columns=columns)
# Copy ResonanceRange object
res_range = copy.copy(self.file2res)
res_range.parameters = sample_params
samples.append(res_range)
# Handling RM sampling
elif formalism == 'rm':
params = ['energy', 'neutronWidth', 'captureWidth',
'fissionWidthA', 'fissionWidthB']
param_list = params[:mpar]
mean_array = parameters[param_list].values
mean = mean_array.flatten()
par_samples = np.random.multivariate_normal(mean, cov,
size=n_samples)
spin = parameters['J'].values
l_value = parameters['L'].values
for sample in par_samples:
energy = sample[0::mpar]
gn = sample[1::mpar]
gg = sample[2::mpar]
gfa = sample[3::mpar] if mpar > 3 else parameters['fissionWidthA'].values
gfb = sample[4::mpar] if mpar > 3 else parameters['fissionWidthB'].values
records = []
for j, E in enumerate(energy):
records.append([energy[j], l_value[j], spin[j], gn[j],
gg[j], gfa[j], gfb[j]])
columns = ['energy', 'L', 'J', 'neutronWidth',
'captureWidth', 'fissionWidthA', 'fissionWidthB']
sample_params = pd.DataFrame.from_records(records,
columns=columns)
# Copy ResonanceRange object
res_range = copy.copy(self.file2res)
res_range.parameters = sample_params
samples.append(res_range)
return samples
class MultiLevelBreitWignerCovariance(ResonanceCovarianceRange):
"""Multi-level Breit-Wigner resolved resonance formalism covariance data.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
mpar : int
Number of parameters in covariance matrix for each individual resonance
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max)
self.parameters = parameters
self.covariance = covariance
self.mpar = mpar
self.lcomp = lcomp
self.file2res = copy.copy(file2res)
self.formalism = 'mlbw'
@classmethod
def from_endf(cls, ev, file_obj, items, resonance):
"""Create MLBW covariance data from an ENDF evaluation.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
file_obj : file-like object
ENDF file positioned at the second record of a resonance range
subsection in MF=32, MT=151
items : list
Items from the CONT record at the start of the resonance range
subsection
resonance : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
Returns
-------
openmc.data.MultiLevelBreitWignerCovariance
Multi-level Breit-Wigner resonance covariance parameters
"""
# Read energy-dependent scattering radius if present
energy_min, energy_max = items[0:2]
nro, naps = items[4:6]
if nro != 0:
params, ape = endf.get_tab1_record(file_obj)
# Other scatter radius parameters
items = endf.get_cont_record(file_obj)
target_spin = items[0]
lcomp = items[3] # Flag for compatibility 0, 1, 2 - 2 is compact form
nls = items[4] # number of l-values
# Build covariance matrix for General Resolved Resonance Formats
if lcomp == 1:
items = endf.get_cont_record(file_obj)
# Number of short range type resonance covariances
num_short_range = items[4]
# Number of long range type resonance covariances
num_long_range = items[5]
# Read resonance widths, J values, etc
records = []
for i in range(num_short_range):
items, values = endf.get_list_record(file_obj)
mpar = items[2]
num_res = items[5]
num_par_vals = num_res*6
res_values = values[:num_par_vals]
cov_values = values[num_par_vals:]
energy = res_values[0::6]
spin = res_values[1::6]
gt = res_values[2::6]
gn = res_values[3::6]
gg = res_values[4::6]
gf = res_values[5::6]
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gt[i], gn[i],
gg[i], gf[i]])
# Build the upper-triangular covariance matrix
cov_dim = mpar*num_res
cov = np.zeros([cov_dim, cov_dim])
indices = np.triu_indices(cov_dim)
cov[indices] = cov_values
# Compact format - Resonances and individual uncertainties followed by
# compact correlations
elif lcomp == 2:
items, values = endf.get_list_record(file_obj)
mean = items
num_res = items[5]
energy = values[0::12]
spin = values[1::12]
gt = values[2::12]
gn = values[3::12]
gg = values[4::12]
gf = values[5::12]
par_unc = []
for i in range(num_res):
res_unc = values[i*12+6 : i*12+12]
# Delete 0 values (not provided, no fission width)
# DAJ/DGT always zero, DGF sometimes nonzero [1, 2, 5]
res_unc_nonzero = []
for j in range(6):
if j in [1, 2, 5] and res_unc[j] != 0.0:
res_unc_nonzero.append(res_unc[j])
elif j in [0, 3, 4]:
res_unc_nonzero.append(res_unc[j])
par_unc.extend(res_unc_nonzero)
records = []
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gt[i], gn[i],
gg[i], gf[i]])
corr = endf.get_intg_record(file_obj)
cov = np.diag(par_unc).dot(corr).dot(np.diag(par_unc))
# Compatible resolved resonance format
elif lcomp == 0:
cov = np.zeros([4, 4])
records = []
cov_index = 0
for i in range(nls):
items, values = endf.get_list_record(file_obj)
num_res = items[5]
for j in range(num_res):
one_res = values[18*j:18*(j+1)]
res_values = one_res[:6]
cov_values = one_res[6:]
records.append(list(res_values))
# Populate the coviariance matrix for this resonance
# There are no covariances between resonances in lcomp=0
cov[cov_index, cov_index] = cov_values[0]
cov[cov_index+1, cov_index+1 : cov_index+2] = cov_values[1:2]
cov[cov_index+1, cov_index+3] = cov_values[4]
cov[cov_index+2, cov_index+2] = cov_values[3]
cov[cov_index+2, cov_index+3] = cov_values[5]
cov[cov_index+3, cov_index+3] = cov_values[6]
cov_index += 4
if j < num_res-1: # Pad matrix for additional values
cov = np.pad(cov, ((0, 4), (0, 4)), 'constant',
constant_values=0)
# Create pandas DataFrame with resonance data, currently
# redundant with data.IncidentNeutron.resonance
columns = ['energy', 'J', 'totalWidth', 'neutronWidth',
'captureWidth', 'fissionWidth']
parameters = pd.DataFrame.from_records(records, columns=columns)
# Determine mpar (number of parameters for each resonance in
# covariance matrix)
nparams, params = parameters.shape
covsize = cov.shape[0]
mpar = int(covsize/nparams)
# Add parameters from File 2
parameters = _add_file2_contributions(parameters,
resonance.parameters)
# Create instance of class
mlbw = cls(energy_min, energy_max, parameters, cov, mpar, lcomp,
resonance)
return mlbw
class SingleLevelBreitWignerCovariance(MultiLevelBreitWignerCovariance):
"""Single-level Breit-Wigner resolved resonance formalism covariance data.
Single-level Breit-Wigner resolved resonance data is is identified by LRF=1
in the ENDF-6 format.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
mpar : int
Number of parameters in covariance matrix for each individual resonance
formalism : str
String descriptor of formalism
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res)
self.formalism = 'slbw'
class ReichMooreCovariance(ResonanceCovarianceRange):
"""Reich-Moore resolved resonance formalism covariance data.
Reich-Moore resolved resonance data is identified by LRF=3 in the ENDF-6
format.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
mpar : int
Number of parameters in covariance matrix for each individual resonance
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max)
self.parameters = parameters
self.covariance = covariance
self.mpar = mpar
self.lcomp = lcomp
self.file2res = copy.copy(file2res)
self.formalism = 'rm'
@classmethod
def from_endf(cls, ev, file_obj, items, resonance):
"""Create Reich-Moore resonance covariance data from an ENDF
evaluation. Includes the resonance parameters contained separately in
File 32.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
file_obj : file-like object
ENDF file positioned at the second record of a resonance range
subsection in MF=2, MT=151
items : list
Items from the CONT record at the start of the resonance range
subsection
resonance : openmc.data.Resonance object
openmc.data.Resonanance object generated from the same evaluation
used to import values not contained in File 32
Returns
-------
openmc.data.ReichMooreCovariance
Reich-Moore resonance covariance parameters
"""
# Read energy-dependent scattering radius if present
energy_min, energy_max = items[0:2]
nro, naps = items[4:6]
if nro != 0:
params, ape = endf.get_tab1_record(file_obj)
# Other scatter radius parameters
items = endf.get_cont_record(file_obj)
target_spin = items[0]
lcomp = items[3] # Flag for compatibility 0, 1, 2 - 2 is compact form
nls = items[4] # Number of l-values
# Build covariance matrix for General Resolved Resonance Formats
if lcomp == 1:
items = endf.get_cont_record(file_obj)
# Number of short range type resonance covariances
num_short_range = items[4]
# Number of long range type resonance covariances
num_long_range = items[5]
# Read resonance widths, J values, etc
channel_radius = {}
scattering_radius = {}
records = []
for i in range(num_short_range):
items, values = endf.get_list_record(file_obj)
mpar = items[2]
num_res = items[5]
num_par_vals = num_res*6
res_values = values[:num_par_vals]
cov_values = values[num_par_vals:]
energy = res_values[0::6]
spin = res_values[1::6]
gn = res_values[2::6]
gg = res_values[3::6]
gfa = res_values[4::6]
gfb = res_values[5::6]
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gn[i], gg[i],
gfa[i], gfb[i]])
# Build the upper-triangular covariance matrix
cov_dim = mpar*num_res
cov = np.zeros([cov_dim, cov_dim])
indices = np.triu_indices(cov_dim)
cov[indices] = cov_values
# Compact format - Resonances and individual uncertainties followed by
# compact correlations
elif lcomp == 2:
items, values = endf.get_list_record(file_obj)
num_res = items[5]
energy = values[0::12]
spin = values[1::12]
gn = values[2::12]
gg = values[3::12]
gfa = values[4::12]
gfb = values[5::12]
par_unc = []
for i in range(num_res):
res_unc = values[i*12+6 : i*12+12]
# Delete 0 values (not provided in evaluation)
res_unc = [x for x in res_unc if x != 0.0]
par_unc.extend(res_unc)
records = []
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gn[i], gg[i],
gfa[i], gfb[i]])
corr = endf.get_intg_record(file_obj)
cov = np.diag(par_unc).dot(corr).dot(np.diag(par_unc))
# Create pandas DataFrame with resonacne data
columns = ['energy', 'J', 'neutronWidth', 'captureWidth',
'fissionWidthA', 'fissionWidthB']
parameters = | pd.DataFrame.from_records(records, columns=columns) | pandas.DataFrame.from_records |
'''This script contains functions for evaluating models and calculating and visualizing metrics'''
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold, cross_validate, cross_val_score, RandomizedSearchCV
from sklearn.metrics import precision_score, recall_score, accuracy_score, roc_auc_score, roc_curve, precision_recall_curve, f1_score, fbeta_score, confusion_matrix, classification_report, make_scorer, auc, log_loss
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier, plot_importance
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN
from imblearn.under_sampling import RandomUnderSampler
from collections import Counter, OrderedDict
from scipy.stats import randint
import time
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
def eval(model_name, model, X, y):
'''This is a function to compare preliminary models.
Takes in model and its name from a dictionary containing instantiated models and their names as
values and keys, respectively, and entire dataframe, partitions data, oversamples minority class
in training data set, and evaluates metrics'''
# Partition data
X_tv, X_test, y_tv, y_test = train_test_split(X, y, test_size = 0.2, random_state=33, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X_tv, y_tv, test_size = 0.2, random_state=14, stratify=y_tv)
# Oversample minority class in training data
oversample = RandomOverSampler(random_state=0, sampling_strategy='minority')
X_train_os, y_train_os = oversample.fit_resample(X_train, y_train)
# Train model
model.fit(X_train_os, y_train_os)
# Make predictions
y_pred = model.predict(X_val)
preds = model.predict_proba(X_val)
# Print scores
print(model_name, ':')
print('Accuracy score: ', accuracy_score(y_val, y_pred))
print('Precision score: ', precision_score(y_val, y_pred))
print('Recall score: ', recall_score(y_val, y_pred))
print('F1 score: ', f1_score(y_val, y_pred))
print('F-beta score: ', fbeta_score(y_val, y_pred, beta=2))
print('ROC-AUC score: ', roc_auc_score(y_val, preds[:,1]), '\n')
def model_scores(model, X, y):
'''
Takes in an instantiated model and training data, partitions the training data
into training and validation sets, trains the model on training data, and returns
evaluation metrics
'''
# Partition data for cross-validation
X_tv, X_test, y_tv, y_test = train_test_split(X, y, test_size=0.2, random_state=5, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=17, stratify=y)
# Train model
model.fit(X_train, y_train)
# Make prediction
y_pred = model.predict(X_val)
preds = model.predict_proba(X_val)
# Print scores
print('Accuracy score: ', accuracy_score(y_val, y_pred))
print('Precision score: ', precision_score(y_val, y_pred))
print('Recall score: ', recall_score(y_val, y_pred))
print('F1 score: ', f1_score(y_val, y_pred))
print('Fbeta score (beta=2): ', fbeta_score(y_val, y_pred, beta=2))
print('ROC AUC score: ', roc_auc_score(y_val, preds[:,1]), '\n')
def model_scores_os(model, X, y):
'''
Takes in an instantiated model and training data, partitions the training data
into training and validation sets, oversamples the training data, trains the model
on the oversampled training data, and returns evaluation metrics
'''
# Partition data for cross-validation
X_tv, X_test, y_tv, y_test = train_test_split(X, y, test_size=0.2, random_state=5, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=17, stratify=y)
# Oversample since classes are imbalanced
oversampler = RandomOverSampler(sampling_strategy='minority', random_state=0)
X_train, y_train = oversampler.fit_resample(X_train, y_train)
# Train model
model.fit(X_train, y_train)
# Make prediction
y_pred = model.predict(X_val)
preds = model.predict_proba(X_val)
# Print scores
print('Accuracy score: ', accuracy_score(y_val, y_pred))
print('Precision score: ', precision_score(y_val, y_pred))
print('Recall score: ', recall_score(y_val, y_pred))
print('F1 score: ', f1_score(y_val, y_pred))
print('Fbeta score (beta=2): ', fbeta_score(y_val, y_pred, beta=2))
print('ROC AUC score: ', roc_auc_score(y_val, preds[:,1]), '\n')
# Plot confusion matrix
def plot_cm(y_test, y_pred):
'''
Takes in target variable test set and set of predictions from a model
and returns confusion matrix
'''
# Set up confusion matrix
confusion = confusion_matrix(y_test, y_pred)
# Plot confusion matrix
plt.figure(dpi=100)
sns.heatmap(confusion, cmap=plt.cm.Blues, annot=True, square=True,
xticklabels=['No Death', 'Death'],
yticklabels=['No Death', 'Death'])
plt.xlabel('Predicted death')
plt.ylabel('Actual death')
plt.title('Confusion Matrix')
plt.show()
# Plot precision-recall curve
def plot_pr_curve(y_test, preds):
'''
Takes in target variable test set and set of predictions from a model
and plots precision-recall curve
'''
# Set up precsion-recall curve
precision, recall, thresholds = precision_recall_curve(y_test, preds[:,1])
# Plot P-R curve
plt.figure(dpi=80, figsize=(5,5))
plt.plot(thresholds, precision[1:], label='precision')
plt.plot(thresholds, recall[1:], label='recall')
plt.legend(loc='lower left')
plt.xlabel('Threshold')
plt.title('Precision and Recall Curves')
plt.show()
# Plot ROC curve and return AUC score
def roc_auc_curve(y_test, preds):
'''
Takes in target variable test set and set of predictions from a model,
plots ROC curve, and prints ROC AUC score
'''
# Set up ROC curve
fpr, tpr, thresholds = roc_curve(y_test, preds[:,1])
# Plot ROC curve
plt.figure(figsize=(5,5))
plt.plot(fpr, tpr,lw=2)
plt.plot([0,1],[0,1],c='violet',ls='--')
plt.xlim([-0.05,1.05])
plt.ylim([-0.05,1.05])
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.show()
# Print ROC AUC score
print("ROC AUC score = ", roc_auc_score(y_test, preds[:,1]))
# Cross-validation with stratified KFold (only for models without oversampling)
def cv(model, X_tv, y_tv):
'''
Takes in instantiated model and non-test data set, performs cross validation using
5-fold stratified splits, and returns dataframe of train and test evaluation metrics
'''
# Define scoring metrics
scoring = {'accuracy': 'accuracy', 'precision': 'precision', 'recall': 'recall', 'f1': 'f1',
'fbeta': make_scorer(fbeta_score, beta=2), 'auc': 'roc_auc'}
# Cross-validation using stratified KFolds
kf = StratifiedKFold(n_splits=5, shuffle=False)
# Store results of cross-validation function dictionary
cv_dict = cross_validate(model, X_tv, y_tv, scoring=scoring,
cv=kf, n_jobs=-1, return_train_score=True)
# Prepare dictionary of metrics for converting into dataframe
cv_dict_2 = {
'test_accuracy': np.mean(cv_dict['test_accuracy']),
'train_accuracy': np.mean(cv_dict['train_accuracy']),
'test_precision': np.mean(cv_dict['test_precision']),
'train_precision': np.mean(cv_dict['train_precision']),
'test_recall': np.mean(cv_dict['test_recall']),
'train_recall': np.mean(cv_dict['train_recall']),
'test_f1': np.mean(cv_dict['train_f1']),
'train_f1': np.mean(cv_dict['test_f1']),
'test_fbeta': np.mean(cv_dict['train_fbeta']),
'train_fbeta': np.mean(cv_dict['train_fbeta']),
'test_auc': np.mean(cv_dict['test_auc']),
'train_auc': np.mean(cv_dict['train_auc'])
}
# Convert to dataframe
cv_df = pd.DataFrame.from_dict(cv_dict_2, orient='index', columns=['mean_score'])
return cv_df
# Adjust threshold
def threshold(model, X_test, t):
'''
Takes in model, val/test data, and a designated threshold value and returns dataframe of
evaluation metrics based on threshold
'''
threshold = t
y_pred = model.predict(X_test)
preds = np.where(model.predict_proba(X_test)[:,1] > threshold, 1, 0)
new_df = pd.DataFrame(data=[accuracy_score(y_test, preds), recall_score(y_test, preds),
precision_score(y_test, preds), f1_score(y_test, preds), roc_auc_score(y_test, preds)],
index=["accuracy", "recall", "precision", "f1", "roc_auc"])
return new_df
# Look at coefficents and intercept of model
def model_coef(model, X, y):
# Partition data for cross-validation
X_tv, X_test, y_tv, y_test = train_test_split(X, y, test_size=0.2, random_state=5, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=17, stratify=y)
# Oversample since classes are imbalanced
oversampler = RandomOverSampler(sampling_strategy='minority', random_state=0)
X_train, y_train = oversampler.fit_resample(X_train, y_train)
# Train model
model.fit(X_train, y_train)
# Get coefficients and intercept and format into dataframe
coef = pd.DataFrame(model.coef_, columns=X_train.columns)
coef.append(pd.Series(model.intercept_), ignore_index=True)
return coef.T
def coef_int(model, feat):
'''
Takes in model and list containing names of features/columns and returns dataframe of
coefficients and intercept for model
'''
coef = pd.DataFrame(model.coef_, columns=feat)
coef.append(pd.Series(model.intercept_), ignore_index=True)
return coef
# Compare sampling methods for a model
def compare_sampling(name, model, X_tv, y_tv):
'''
Takes in model and its name (value and key in dictionary of models to compare, respectively)
and non-test data, splits data into training and validation sets, and trains model on:
1) non-resampled training data while adjusting built-in class weight metric of model,
2) training data minority class oversampled using RandomOverSampler,
3) training data minority class oversampled using SMOTE,
4) training data minority class oversampled using ADASYN,
5) training data majority class undersampled using RandomUnderSampler,
and compares evaluation metrics of these iterations on both training and validation data sets
'''
# Partition data
X_train, X_val, y_train, y_val = train_test_split(X_tv, y_tv, test_size = 0.2, random_state=14)
# Model with balancing class weight via model parameter
if name == 'Random Forest':
model_balanced = RandomForestClassifier(class_weight='balanced')
elif name == 'XGBoost':
model_balanced = XGBClassifier(scale_pos_weight=14)
# Train model
model_balanced.fit(X_train, y_train)
# Make predictions
y_pred_train_balanced = model_balanced.predict(X_train)
y_pred_val_balanced = model_balanced.predict(X_val)
# Store evaluation metrics for train and test sets for balanced model in dictionaries
balanced_train = {
'precision': precision_score(y_train, y_pred_train_balanced),
'recall': recall_score(y_train, y_pred_train_balanced),
'f1': f1_score(y_train, y_pred_train_balanced),
'fbeta': fbeta_score(y_train, y_pred_train_balanced, beta=2)
}
balanced_val = {
'precision': precision_score(y_val, y_pred_val_balanced),
'recall': recall_score(y_val, y_pred_val_balanced),
'f1': f1_score(y_val, y_pred_val_balanced),
'fbeta': fbeta_score(y_val, y_pred_val_balanced, beta=2)
}
# Convert dictionaries to dataframe
balanced_scores_df = pd.DataFrame({'train': pd.Series(balanced_train), 'val': | pd.Series(balanced_val) | pandas.Series |
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
SMALL_SIZE = 10
MEDIUM_SIZE = 12
plt.rc('font', size=SMALL_SIZE)
plt.rc('axes', titlesize=MEDIUM_SIZE)
plt.rc('axes', labelsize=MEDIUM_SIZE)
plt.rcParams['figure.dpi']=150
def next_batch(num, train_data, labels):
batch = idx[:num]
data_shuffle = [train_data[j] for j in batch]
labels_shuffle = [labels[j] for j in batch]
return np.asarray(data_shuffle), np.asarray(labels_shuffle)
def selective_learning(sess, train_data, labels):
data_set_length = len(train_data)
cost = []
for item in range(data_set_length):
cost.append([sess.run(cross_entropy, feed_dict={x: np.asarray([train_data[item]]), y: np.asarray([labels[item]])}),
train_data[item], labels[item]])
cost = sorted(cost, key=lambda x: x[0])
one_third = int(len(cost)/3)
cost = cost[:one_third] + cost[-one_third:]
new_batch_x = []
new_batch_y = []
for item in range(len(cost)):
new_batch_x.append(cost[item][1])
new_batch_y.append(cost[item][2])
return new_batch_x, new_batch_y
if __name__ == "__main__":
# TODO: Ensure all steps performed
averageTrainingError = []
averageGeneralisationError = []
averageClassificationError = []
num_runs = 3
# Importing Data
data = pd.read_csv("/home/vignesh/PycharmProjects/SloanDigitalSkySurvey/"
"astronomical-observation-classification-neural-network/"
"Skyserver_SQL2_27_2018 6_51_39 PM.csv", skiprows=1)
# TODO: Data Analysis and Exploration - Statistics and Visual Graphs
# Take Action on Data - Data Filtering
data.drop(['objid', 'run', 'rerun', 'camcol', 'field', 'specobjid'], axis=1, inplace=True)
# TODO: Feature Engineering
# Try Different Feature Scaling and Normalisation Techniques
#
# Standardisation/Normalisation/Z-score
data_num = data.select_dtypes(include=[np.number])
data_num = (data_num - data_num.mean()) / data_num.std()
data[data_num.columns] = data_num
one_hot = | pd.get_dummies(data['class']) | pandas.get_dummies |
import math
import numpy as np
import pandas as pd
from typing import Union
from scipy import signal
from sklearn import preprocessing
from scipy.spatial import distance
def asin2(x: float, y: float) -> float:
"""Function to return the inverse sin function across the range (-pi, pi], rather than (-pi/2, pi/2]
Parameters
----------
x : float
x coordinate of the point in 2D space
y : float
y coordinate of the point in 2D space
Returns
-------
theta : float
Angle corresponding to point in 2D space in radial coordinates, within range (-pi, pi]
"""
r = math.sqrt(x**2+y**2)
if x >= 0:
return math.asin(y/r)
else:
if y >= 0:
return math.pi-math.asin(y/r)
else:
return -math.pi-math.asin(y/r)
def acos2(x: float, y: float) -> float:
"""Function to return the inverse cos function across the range (-pi, pi], rather than (0, pi]
Parameters
----------
x : float
x coordinate of the point in 2D space
y : float
y coordinate of the point in 2D space
Returns
-------
theta : float
Angle corresponding to point in 2D space in radial coordinates, within range (-pi, pi]
"""
r = math.sqrt(x**2+y**2)
if y >= 0:
return math.acos(x/r)
else:
return -math.acos(x/r)
def filter_butterworth(data: Union[np.ndarray, pd.DataFrame],
sample_freq: float = 500.0,
freq_filter: float = 40,
order: int = 2,
filter_type: str = 'low') -> Union[np.ndarray, pd.DataFrame]:
"""Filter data using Butterworth filter
Filter a given set of data using a Butterworth filter, designed to have a specific passband for desired
frequencies. It is set up to use seconds, not milliseconds.
Parameters
----------
data : np.ndarray or pd.DataFrame
Data to filter
sample_freq : int or float
Sampling rate of data (Hz), default=500. If data passed as dataframe, the sample_freq will be calculated from
the dataframe index.
freq_filter : int or float
Cut-off frequency for filter, default=40
order : int
Order of the Butterworth filter, default=2
filter_type : {'low', 'high', 'band'}
Type of filter to use, default='low'
Returns
-------
filter_out : np.ndarray
Output filtered data
Notes
-----
Data should be passed using milliseconds rather than seconds
"""
# Define filter window (expressed as a fraction of the Nyquist frequency, which is half the sampling rate)
if isinstance(data, pd.DataFrame):
dt = np.mean(np.diff(data.index))
assert 1 <= dt <= 50, "dt seems to be a number that doesn't fit with milliseconds..."
sample_freq = (1/dt)*1000
window = freq_filter/(sample_freq*0.5)
[b, a] = signal.butter(N=order, Wn=window, btype=filter_type)
if isinstance(data, pd.DataFrame):
data_filtered = pd.DataFrame(columns=data.columns, index=data.index)
for col in data_filtered:
data_filtered.loc[:, col] = signal.filtfilt(b, a, data[col])
return data_filtered
else:
return signal.filtfilt(b, a, data)
def filter_savitzkygolay(data: pd.DataFrame,
window_length: int = 50,
order: int = 2,
deriv: int = 0,
delta: float = 1.0):
"""Filter EGM data using a Savitzky-Golay filter
Filter a given set of data using a Savitzky-Golay filter, designed to smooth data using a convolution process
fitting to a low-degree polynomial within a given window. Default values are either taken from scipy
documentation (not all options are provided here), or adapted to match Hermans et al.
Parameters
----------
data : pd.DataFrame
Data to filter
window_length : float, optional
The length of the filter window in seconds. When passed to the scipy filter, will be converted to a
positive odd integer (i.e. the number of coefficients). Default=50ms
order : int, optional
The order of the polynomial used to fit the samples. polyorder must be less than window_length. Default=2
deriv : int, optional
The order of the derivative to compute. This must be a nonnegative integer. The default is 0, which means to
filter the data without differentiating.
delta : float, optional
The spacing of the samples to which the filter will be applied. This is only used if deriv > 0. Default=1.0
Returns
-------
data : pd.DataFrame
Output filtered data
References
----------
The development and validation of an easy to use automatic QT-interval algorithm
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
PLoS ONE, 12(9), 1–14 (2017)
https://doi.org/10.1371/journal.pone.0184352
"""
i_window = np.where(data.index-data.index[0] > window_length)[0][0]
if (i_window % 2) == 0:
i_window += 1
data_filtered = | pd.DataFrame(index=data.index, columns=data.columns) | pandas.DataFrame |
import os
from copy import deepcopy
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition
from numpy import logical_not, isnan, array, where, abs, max, min, vstack, hstack
from pandas import read_csv, Series, DataFrame
# from sklearn import linear_model
# from sklearn.metrics import r2_score
from map.variable_importance import variable_importance
# plt.style.use('presentation.mplstyle')
def state_sum(csv):
cdf = read_csv(csv)
df = cdf.groupby(['State', 'State_Code'])[['IM2002_ac', 'NASS_2002_ac', 'IM2007_ac',
'NASS_2007_ac', 'IM2012_ac', 'NASS_2012_ac']].sum()
fig, ax = plt.subplots(1, 1)
s = Series(index=df.index)
s.loc[0], s.loc[df.shape[0]] = 0, 1e8
s.interpolate(axis=0, inplace=True)
s.index = s.values
s.plot(x=s.values, ax=ax, kind='line', lw=1, loglog=True, color='k', style='--', alpha=0.5)
df_state = df.groupby(['State', 'State_Code'])[['IM2002_ac', 'NASS_2002_ac', 'IM2007_ac',
'NASS_2007_ac', 'IM2012_ac', 'NASS_2012_ac']].sum()
s = Series(index=df_state.index)
s.loc[0], s.loc[df_state.shape[0]] = 1e6, 1e7
s.interpolate(axis=0, inplace=True)
s.index = s.values
ax2 = fig.add_axes([0, 0, 1, 1])
ax2.xaxis.label.set_visible(False)
ax2.yaxis.label.set_visible(False)
ax.text(0.26, 0.62, 'States', transform=ax.transAxes, ha="right")
df_state.plot(x='NASS_{}_ac'.format(2002), y='IM{}_ac'.format(2002), kind='scatter', s=4,
xlim=(1e6, 1e7), ylim=(1e6, 1e7), ax=ax2, loglog=True, color='g')
df_state.plot(x='NASS_{}_ac'.format(2007), y='IM{}_ac'.format(2007), kind='scatter', s=4,
xlim=(1e6, 1e7), ylim=(1e6, 1e7), ax=ax2, loglog=True, color='b')
df_state.plot(x='NASS_{}_ac'.format(2012), y='IM{}_ac'.format(2012), kind='scatter', s=4,
xlim=(1e6, 1e7), ylim=(1e6, 1e7), ax=ax2, loglog=True, color='r')
s.plot(x=s.values, kind='line', lw=1, loglog=True, color='k', style='--', alpha=0.5, label='_nolegend_')
ip = InsetPosition(ax, [0.07, 0.67, 0.3, 0.3])
ax2.set_axes_locator(ip)
plt.show()
return plt
def compare_nass_irrmapper_scatter(csv, fig_name=None):
df = read_csv(csv)
s = array([0, 1e6])
plt.rcParams['figure.constrained_layout.use'] = True
fig, ax = plt.subplots(2, 4, figsize=(6, 5), tight_layout=True, sharex=True, sharey=True)
rows, cols = [0, 0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 3, 0, 1, 2, 3]
all_comparison = array([]).reshape((0, 2))
for r, c, year in zip(rows, cols, range(1987, 2022, 5)):
n, i = 'NASS_{}'.format(year), 'IM_{}'.format(year)
a = ax[r, c]
ydf = df[[n, i]]
ydf = ydf / 247.105
nass, irr = ydf[n].values, ydf[i].values
nass, irr = nass[logical_not(isnan(nass))], irr[logical_not(isnan(nass))]
nass, irr = nass.reshape(nass.shape[0], 1), irr.reshape(irr.shape[0], 1)
print('{} county correlations...'.format(year))
r2, m, _int = get_correlations(nass, irr)
a.plot(s, s, linewidth=1, linestyle='--', color='k', alpha=0.5, label='_nolegend_')
ydf[n].name = ''
ydf[i].name = ''
ydf.plot(n, i, xlim=(1, 1e6), ylim=(1, 1e6), loglog=True, color='b', alpha=0.25,
kind='scatter', ax=a, marker='o', s=3)
a.set(adjustable='box')
a.set_title(str(year), size=10)
a.text(0.05, 0.9, '$r^2$={0:.3f}'.format(r2), transform=a.transAxes,
size=7)
a.text(0.05, 0.85, '$m$={0:.2f}'.format(m), transform=a.transAxes,
size=7)
if r == 0 and c == 3:
a.set_xticks([])
a.tick_params(labelsize=10)
a.tick_params(labelsize=10)
x_axis = a.xaxis
x_axis.label.set_visible(False)
a.set_xlim(1, 1e4)
y_axis = a.yaxis
y_axis.label.set_visible(False)
a.set_ylim(1, 1e4)
comp = hstack([irr, nass])
all_comparison = vstack([all_comparison, comp])
print('overall county correlations...')
i_c = all_comparison[:, 0].reshape((all_comparison[:, 0].shape[0], 1))
n_c = all_comparison[:, 1].reshape((all_comparison[:, 1].shape[0], 1))
get_correlations(i_c, n_c)
fig.delaxes(ax[1, 3])
# plt.subplots_adjust(wspace=0.1, hspace=0.1)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
x_txt = 'NASS ($\mathregular{km^2}$)'
y_txt = 'IrrMapper ($\mathregular{km^2}$)'
# fig.subplots_adjust(bottom=0.2, left=-0.5)
fig.text(0.5, 0.0, x_txt, ha='center', fontsize=10)
fig.text(0.0, 0.5, y_txt, va='center', rotation='vertical', fontsize=10)
if fig_name:
plt.savefig(fig_name, pad_inches=0.25)
else:
# plt.show()
plt.close()
def get_correlations(a, b):
coeff_det = r2_score(a, b)
regr = linear_model.LinearRegression()
regr.fit(a, b)
pred = regr.predict(a)
slope, intercept = regr.coef_[0][0], regr.intercept_
# print('regression coeff: {}'.format(slope))
print('r squared: {}'.format(coeff_det))
return coeff_det, slope, intercept
def irr_time_series_states(csv, fig_name=None):
df = read_csv(csv)
df = df.sort_index(axis=1)
yrs = [x for x in df.columns if 'noCdlMask_' in x]
df = df.groupby(['STATEFP']).sum()
df = df[yrs]
df = df.div(df.mean(axis=1), axis=0)
linear = [x for x in range(1986, 2019)]
totals = df.sum(axis=0)
z_totals = totals.div(totals.mean(), totals.values)
z_totals.index = linear
fig, ax = plt.subplots()
for i, r in df.iterrows():
r.index = linear
r.name = state_fp_code_abv()[r.name]
ax = r.plot(ax=ax, kind='line', x=linear, y=r.values, alpha=0.6)
z_totals.name = 'All'
z_totals.plot(ax=ax, kind='line', color='k', alpha=0.7, x=linear, y=z_totals.values)
# plt.title('Normalized Irrigated Area')
ax.axvspan(2011.5, 2012.5, alpha=0.5, color='red')
plt.xlim(1984, 2020)
plt.ylim(0.4, 1.6)
plt.legend(loc='lower center', ncol=5, labelspacing=0.5)
if fig_name:
plt.savefig(fig_name)
return None
plt.show()
def irr_time_series_totals(irr, nass, fig_name=None):
df = read_csv(irr)
df.drop(['COUNTYFP', 'COUNTYNS', 'LSAD', 'GEOID'], inplace=True, axis=1)
df = df.groupby(['STATEFP']).sum()
state_irr = deepcopy(df)
totals = df.sum(axis=0)
labels = [x for x in df.columns if 'noCdlMask' in x]
irr_years = [x for x in range(1986, 2019)]
totals_s = totals[labels]
irr_nass_years = deepcopy(totals_s)
totals_s.sort_index(inplace=True)
totals_s.index = irr_years
totals = totals_s.values / 247.105
nass = read_csv(nass, index_col=[0])
nass.dropna(axis=0, subset=['STATE_ANSI'], inplace=True)
nass['STATE_ANSI'] = nass['STATE_ANSI'].astype(int)
nass = nass.loc[nass['STATE_ANSI'].isin(list(df.index))]
state_nass = deepcopy(nass)
cols = [x for x in nass.columns if 'VALUE' in x]
state_nass = state_nass[cols + ['STATE_ANSI']] / 247.105
state_nass = state_nass.groupby(['STATE_ANSI']).sum()
nass = nass[cols]
nass = nass / 247.105
nass_s = nass.sum(axis=0)
nass_years = [int(x[-4:]) for x in nass_s.index]
irr_labels = [x for x in labels if int(x[-4:]) in nass_years]
state_irr = state_irr[irr_labels].sort_index() / 247.105
nass_s.index = nass_years
nass_values = nass_s.values
print('state-by state correlations...')
s_i = state_irr.values.reshape(state_irr.values.size, 1)
s_n = state_nass.values.reshape(state_nass.values.size, 1)
get_correlations(s_i, s_n)
plt.plot(irr_years, totals / 1000., label='IrrMapper', zorder=1)
plt.scatter(x=nass_years, y=nass_values / 1000., marker='.', color='red', label='NASS', zorder=2)
# plt.title('Total Irrigated Area, Western 11 States \n 1986 - 2018')
plt.xlim(1985, 2019)
# plt.ylim(20, 30)
plt.ylabel('Thousand $\mathregular{km^2}$')
plt.xlabel('Year')
plt.tight_layout()
plt.legend()
if fig_name:
plt.savefig(fig_name)
else:
pass
# plt.show()
def state_fp_code_abv():
return {4: 'AZ',
6: 'CA',
8: 'CO',
16: 'ID',
30: 'MT',
32: 'NV',
35: 'NM',
41: 'OR',
49: 'UT',
53: 'WA',
56: 'WY'}
def state_fp_code_full_name():
return {4: 'Arizona',
6: 'California',
8: 'Colorado',
16: 'Idaho',
30: 'Montana',
32: 'Nevada',
35: 'New Mexico',
41: 'Oregon',
49: 'Utah',
53: 'Washington',
56: 'Wyoming'}
def irrigated_years_precip_anomaly(csv, save_fig=None):
df = read_csv(csv, skip_blank_lines=True).dropna()
means = df.groupby(by=['State']).mean().drop(columns=['Year', 'Anomaly Inches', 'Anomaly mm'])
n_cols, n_rows = 3, 4
fig, axes = plt.subplots(n_rows, n_cols, sharex=True,
sharey=False, figsize=(12, 6))
pos = [(0, 0), (0, 1), (0, 2), (0, 3),
(1, 0), (1, 1), (1, 2), (1, 3),
(2, 0), (2, 1), (2, 2), (2, 3)]
i = 0
for p in pos:
ax = axes[p[1], p[0]]
if p == (0, 3):
yrs_ = [_ for _ in range(1986, 2019)]
d = [0 for _ in yrs_]
ax.bar(d, height=d, bottom=d, width=0.75, align='center')
ax.set(xlabel='Time')
plt.xlim([1986, 2019])
plt.ylim([0, 1])
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
ax.spines['left'].set_position(('data', 1986))
ax.spines['right'].set_position(('data', 2018))
ax.spines['left'].set_color('k')
ax.spines['bottom'].set_position(('axes', 0.45))
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.tick_params(axis='y', which='major', length=0, labelleft=False, labelright=False)
else:
name = means.iloc[i].name
print(name)
d = df[df['State'] == name]
a = d['Anomaly mm'].values
mean_ = means.iloc[i]['Mean mm']
bottoms = where(a < 0.0, mean_ + a, mean_)
height = abs(a)
x = d['Year'].values
# data_color = [(a[i] - a.min()) / (a.max() - a.min()) for i, _ in enumerate(a)]
data_color = [0.9 if x > 0.0 else 0.1 for x in a]
cmap = cm.get_cmap('RdBu')
color = cmap(data_color)
ax.bar(x, height=height, bottom=bottoms, width=0.75, align='center', color=color)
plt.xlim([1986, 2018])
plt.ylim([min(bottoms) - mean_ * 0.1, max(a + mean_) + mean_ * 0.1])
ax.set_title(name, size=12, y=0.9)
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_major_formatter(FormatStrFormatter(''))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.tick_params(which='minor', length=1.5)
ax.spines['left'].set_position(('data', 1986))
ax.spines['right'].set_position(('data', 2018))
ax.spines['bottom'].set_position(('data', mean_))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.tick_params(axis='x', which='major', bottom=True, top=False, labelbottom=False)
i += 1
# fig.delaxes(axes[5, 1])
tick_years = range(1990, 2020, 5)
plt.setp(axes, xticks=tick_years, xticklabels=[str(x) for x in tick_years])
if save_fig:
plt.tight_layout()
plt.savefig(save_fig)
return None
plt.show()
def state_bar_plots(csv, save_fig=None):
df = | read_csv(csv) | pandas.read_csv |
import numpy as np
import pandas as pd
import json
import geopandas as gpd
from shapely.geometry import Point, MultiPolygon
blrDF = gpd.read_file("data/base/"+city+"/city.geojson")
blr_quarantined = | pd.read_csv("data/base/"+city+"/BLR_incoming travel.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not | inference.is_array_like([1, 2, 3]) | pandas.core.dtypes.inference.is_array_like |
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from app import helpers
from app.ui import (
header,
contact_modal,
tab_comparison_controls,
tab_comparison_sip_cards,
tab_port_sip_cards,
tab_map_controls,
tab_map_sip_cards,
)
from app import tab_map, tab_stats, tab_compare
from config import strings, constants
from dash.dependencies import Input, Output
# DATASET LOADING AND TRANSFORMATION
df_tab_1 = pd.read_csv("data/first_tab_dataset.csv")
df_ships_duration = pd.read_csv("data/ships_stop_duration.csv")
df_ships_duration["min_time"] = pd.to_datetime(df_ships_duration["min_time"])
df_ships_duration["year"] = df_ships_duration["min_time"].dt.year
df_ships_duration["month"] = df_ships_duration["min_time"].dt.month
df_tab_1 = pd.merge(left=df_tab_1, right=df_ships_duration, on="SHIP_ID")
df_tab_1 = df_tab_1.drop(["port_y", "ship_type_y"], axis=1)
df_tab_1 = df_tab_1.rename({"port_x": "port", "ship_type_x": "ship_type"}, axis=1)
df_tab_1["year"] = df_tab_1["date"].astype("datetime64").dt.year
df_tab_1["month"] = df_tab_1["date"].astype("datetime64").dt.month
df_tab_2 = | pd.read_csv("data/second_tab_dataset.csv") | pandas.read_csv |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Alignments
#
# This notebook analyzes page alignments and prepares metrics for final use.
# %% [markdown]
# ## Setup
#
# We begin by loading necessary libraries:
# %%
from pathlib import Path
import pandas as pd
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import gzip
import pickle
import binpickle
from natural.size import binarysize
# %%
codec = binpickle.codecs.Blosc('zstd')
# %% [markdown]
# Set up progress bar and logging support:
# %%
from tqdm.auto import tqdm
tqdm.pandas(leave=False)
# %%
import sys, logging
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
log = logging.getLogger('alignment')
# %% [markdown]
# Import metric code:
# %%
# %load_ext autoreload
# %autoreload 1
# %%
# %aimport metrics
from trecdata import scan_runs
# %% [markdown]
# ## Loading Data
#
# We first load the page metadata:
# %%
pages = pd.read_json('data/trec_metadata_eval.json.gz', lines=True)
pages = pages.drop_duplicates('page_id')
pages.info()
# %% [markdown]
# Now we will load the evaluation topics:
# %%
eval_topics = pd.read_json('data/eval-topics-with-qrels.json.gz', lines=True)
eval_topics.info()
# %%
train_topics = pd.read_json('data/trec_topics.json.gz', lines=True)
train_topics.info()
# %% [markdown]
# Train and eval topics use a disjoint set of IDs:
# %%
train_topics['id'].describe()
# %%
eval_topics['id'].describe()
# %% [markdown]
# This allows us to create a single, integrated topics list for convenience:
# %%
topics = pd.concat([train_topics, eval_topics], ignore_index=True)
topics['eval'] = False
topics.loc[topics['id'] >= 100, 'eval'] = True
topics.head()
# %% [markdown]
# Finally, a bit of hard-coded data - the world population:
# %%
world_pop = pd.Series({
'Africa': 0.155070563,
'Antarctica': 1.54424E-07,
'Asia': 0.600202585,
'Europe': 0.103663858,
'Latin America and the Caribbean': 0.08609797,
'Northern America': 0.049616733,
'Oceania': 0.005348137,
})
world_pop.name = 'geography'
# %% [markdown]
# And a gender global target:
# %%
gender_tgt = pd.Series({
'female': 0.495,
'male': 0.495,
'third': 0.01
})
gender_tgt.name = 'gender'
gender_tgt.sum()
# %% [markdown]
# Xarray intesectional global target:
# %%
geo_tgt_xa = xr.DataArray(world_pop, dims=['geography'])
gender_tgt_xa = xr.DataArray(gender_tgt, dims=['gender'])
int_tgt = geo_tgt_xa * gender_tgt_xa
int_tgt
# %% [markdown]
# And the order of work-needed codes:
# %%
work_order = [
'Stub',
'Start',
'C',
'B',
'GA',
'FA',
]
# %% [markdown]
# ## Query Relevance
#
# We now need to get the qrels for the topics. This is done by creating frames with entries for every relevant document; missing documents are assumed irrelevant (0).
#
# First the training topics:
# %%
train_qrels = train_topics[['id', 'rel_docs']].explode('rel_docs', ignore_index=True)
train_qrels.rename(columns={'rel_docs': 'page_id'}, inplace=True)
train_qrels['page_id'] = train_qrels['page_id'].astype('i4')
train_qrels = train_qrels.drop_duplicates()
train_qrels.head()
# %%
eval_qrels = eval_topics[['id', 'rel_docs']].explode('rel_docs', ignore_index=True)
eval_qrels.rename(columns={'rel_docs': 'page_id'}, inplace=True)
eval_qrels['page_id'] = eval_qrels['page_id'].astype('i4')
eval_qrels = eval_qrels.drop_duplicates()
eval_qrels.head()
# %% [markdown]
# And concatenate:
# %%
qrels = | pd.concat([train_qrels, eval_qrels], ignore_index=True) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
| Timestamp("2000-01-01") | pandas.Timestamp |
import mankey.custom_helpers as transformers
import pandas as pd
def test_basic():
assert 1 == 1
def test_ordinal_h():
import pandas as pd
data = {'Pclass': ['First_class', 'Second_Class', 'Third_Class', 'Fourth_Class'],
'level': [1, 2, 3,4],
}
df = pd.DataFrame(data)
levels_dict = {"Pclass": ['First_class', 'Second_Class', 'Third_Class'],
}
target_result = {'Pclass': [0, 1, 2,-1],
'level': [1, 2, 3,4],
}
target_df = pd.DataFrame(target_result)
t_ord = transformers.Ordinal_Transformer()
t_ord.fit( levels_dict, df,None)
df = t_ord.transform(df, None)
| pd.testing.assert_frame_equal(df, target_df) | pandas.testing.assert_frame_equal |
import datetime as dt
import pytest
from distutils.version import LooseVersion
import numpy as np
try:
import pandas as pd
from pandas._testing import (
makeCustomDataframe, makeMixedDataFrame, makeTimeDataFrame
)
except ImportError:
pytestmark = pytest.mark.skip('pandas not available')
from bokeh.models.widgets.tables import (
NumberFormatter, IntEditor, NumberEditor, StringFormatter,
SelectEditor, DateFormatter, DataCube, CellEditor,
SumAggregator, AvgAggregator, MinAggregator
)
from panel.depends import bind
from panel.widgets import Button, DataFrame, Tabulator, TextInput
pd_old = pytest.mark.skipif(LooseVersion(pd.__version__) < '1.3',
reason="Requires latest pandas")
def test_dataframe_widget(dataframe, document, comm):
table = DataFrame(dataframe)
model = table.get_root(document, comm)
index_col, int_col, float_col, str_col = model.columns
assert index_col.title == 'index'
assert isinstance(index_col.formatter, NumberFormatter)
assert isinstance(index_col.editor, CellEditor)
assert int_col.title == 'int'
assert isinstance(int_col.formatter, NumberFormatter)
assert isinstance(int_col.editor, IntEditor)
assert float_col.title == 'float'
assert isinstance(float_col.formatter, NumberFormatter)
assert isinstance(float_col.editor, NumberEditor)
assert str_col.title == 'str'
assert isinstance(float_col.formatter, StringFormatter)
assert isinstance(float_col.editor, NumberEditor)
def test_dataframe_widget_no_show_index(dataframe, document, comm):
table = DataFrame(dataframe, show_index=False)
model = table.get_root(document, comm)
assert len(model.columns) == 3
int_col, float_col, str_col = model.columns
assert int_col.title == 'int'
assert float_col.title == 'float'
assert str_col.title == 'str'
table.show_index = True
assert len(model.columns) == 4
index_col, int_col, float_col, str_col = model.columns
assert index_col.title == 'index'
assert int_col.title == 'int'
assert float_col.title == 'float'
assert str_col.title == 'str'
def test_dataframe_widget_datetimes(document, comm):
table = DataFrame(makeTimeDataFrame())
model = table.get_root(document, comm)
dt_col, _, _, _, _ = model.columns
assert dt_col.title == 'index'
assert isinstance(dt_col.formatter, DateFormatter)
assert isinstance(dt_col.editor, CellEditor)
def test_dataframe_editors(dataframe, document, comm):
editor = SelectEditor(options=['A', 'B', 'C'])
table = DataFrame(dataframe, editors={'str': editor})
model = table.get_root(document, comm)
model_editor = model.columns[-1].editor
assert isinstance(model_editor, SelectEditor) is not editor
assert isinstance(model_editor, SelectEditor)
assert model_editor.options == ['A', 'B', 'C']
def test_dataframe_formatter(dataframe, document, comm):
formatter = NumberFormatter(format='0.0000')
table = DataFrame(dataframe, formatters={'float': formatter})
model = table.get_root(document, comm)
model_formatter = model.columns[2].formatter
assert model_formatter is not formatter
assert isinstance(model_formatter, NumberFormatter)
assert model_formatter.format == formatter.format
def test_dataframe_triggers(dataframe):
events = []
def increment(event, events=events):
events.append(event)
table = DataFrame(dataframe)
table.param.watch(increment, 'value')
table._process_events({'data': {'str': ['C', 'B', 'A']}})
assert len(events) == 1
def test_dataframe_does_not_trigger(dataframe):
events = []
def increment(event, events=events):
events.append(event)
table = DataFrame(dataframe)
table.param.watch(increment, 'value')
table._process_events({'data': {'str': ['A', 'B', 'C']}})
assert len(events) == 0
def test_dataframe_selected_dataframe(dataframe):
table = DataFrame(dataframe, selection=[0, 2])
pd.testing.assert_frame_equal(table.selected_dataframe, dataframe.iloc[[0, 2]])
def test_dataframe_process_selection_event(dataframe):
table = DataFrame(dataframe, selection=[0, 2])
table._process_events({'indices': [0, 2]})
pd.testing.assert_frame_equal(table.selected_dataframe, dataframe.iloc[[0, 2]])
def test_dataframe_process_data_event(dataframe):
df = dataframe.copy()
table = DataFrame(dataframe, selection=[0, 2])
table._process_events({'data': {'int': [5, 7, 9]}})
df['int'] = [5, 7, 9]
pd.testing.assert_frame_equal(table.value, df)
table._process_events({'data': {'int': {1: 3, 2: 4, 0: 1}}})
df['int'] = [1, 3, 4]
pd.testing.assert_frame_equal(table.value, df)
def test_dataframe_duplicate_column_name(document, comm):
df = pd.DataFrame([[1, 1], [2, 2]], columns=['col', 'col'])
with pytest.raises(ValueError):
table = DataFrame(df)
df = pd.DataFrame([[1, 1], [2, 2]], columns=['a', 'b'])
table = DataFrame(df)
with pytest.raises(ValueError):
table.value = table.value.rename(columns={'a': 'b'})
df = pd.DataFrame([[1, 1], [2, 2]], columns=['a', 'b'])
table = DataFrame(df)
table.get_root(document, comm)
with pytest.raises(ValueError):
table.value = table.value.rename(columns={'a': 'b'})
def test_hierarchical_index(document, comm):
df = pd.DataFrame([
('Germany', 2020, 9, 2.4, 'A'),
('Germany', 2021, 3, 7.3, 'C'),
('Germany', 2022, 6, 3.1, 'B'),
('UK', 2020, 5, 8.0, 'A'),
('UK', 2021, 1, 3.9, 'B'),
('UK', 2022, 9, 2.2, 'A')
], columns=['Country', 'Year', 'Int', 'Float', 'Str']).set_index(['Country', 'Year'])
table = DataFrame(value=df, hierarchical=True,
aggregators={'Year': {'Int': 'sum', 'Float': 'mean'}})
model = table.get_root(document, comm)
assert isinstance(model, DataCube)
assert len(model.grouping) == 1
grouping = model.grouping[0]
assert len(grouping.aggregators) == 2
agg1, agg2 = grouping.aggregators
assert agg1.field_ == 'Int'
assert isinstance(agg1, SumAggregator)
assert agg2.field_ == 'Float'
assert isinstance(agg2, AvgAggregator)
table.aggregators = {'Year': 'min'}
agg1, agg2 = grouping.aggregators
print(grouping)
assert agg1.field_ == 'Int'
assert isinstance(agg1, MinAggregator)
assert agg2.field_ == 'Float'
assert isinstance(agg2, MinAggregator)
def test_none_table(document, comm):
table = DataFrame(value=None)
assert table.indexes == []
model = table.get_root(document, comm)
assert model.source.data == {}
def test_tabulator_selected_dataframe():
df = makeMixedDataFrame()
table = Tabulator(df, selection=[0, 2])
pd.testing.assert_frame_equal(table.selected_dataframe, df.iloc[[0, 2]])
def test_tabulator_selected_and_filtered_dataframe(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
pd.testing.assert_frame_equal(table.selected_dataframe, df)
table.add_filter('foo3', 'C')
pd.testing.assert_frame_equal(table.selected_dataframe, df[df["C"] == "foo3"])
def test_tabulator_config_defaults(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
model = table.get_root(document, comm)
assert model.configuration['columns'] == [
{'field': 'index'},
{'field': 'A'},
{'field': 'B'},
{'field': 'C'},
{'field': 'D'}
]
assert model.configuration['selectable'] == True
def test_tabulator_config_formatter_string(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, formatters={'B': 'tickCross'})
model = table.get_root(document, comm)
assert model.configuration['columns'][2] == {'field': 'B', 'formatter': 'tickCross'}
def test_tabulator_config_formatter_dict(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, formatters={'B': {'type': 'tickCross', 'tristate': True}})
model = table.get_root(document, comm)
assert model.configuration['columns'][2] == {'field': 'B', 'formatter': 'tickCross', 'formatterParams': {'tristate': True}}
def test_tabulator_config_editor_string(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, editors={'B': 'select'})
model = table.get_root(document, comm)
assert model.configuration['columns'][2] == {'field': 'B', 'editor': 'select'}
def test_tabulator_config_editor_dict(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, editors={'B': {'type': 'select', 'values': True}})
model = table.get_root(document, comm)
assert model.configuration['columns'][2] == {'field': 'B', 'editor': 'select', 'editorParams': {'values': True}}
def test_tabulator_groups(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, groups={'Number': ['A', 'B'], 'Other': ['C', 'D']})
model = table.get_root(document, comm)
assert model.configuration['columns'] == [
{'field': 'index'},
{'title': 'Number',
'columns': [
{'field': 'A'},
{'field': 'B'}
]},
{'title': 'Other',
'columns': [
{'field': 'C'},
{'field': 'D'}
]}
]
def test_tabulator_frozen_cols(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, frozen_columns=['index'])
model = table.get_root(document, comm)
assert model.configuration['columns'] == [
{'field': 'index', 'frozen': True},
{'field': 'A'},
{'field': 'B'},
{'field': 'C'},
{'field': 'D'}
]
def test_tabulator_frozen_rows(document, comm):
df = | makeMixedDataFrame() | pandas._testing.makeMixedDataFrame |
import os
import io
import pandas as pd
import sys
import tempfile
import webbrowser
from functools import lru_cache
import zipfile
import requests
import datetime
import bisect
import re
import numpy as np
import plotly.express as pex
import math
try:
import numpy_ext as npext
except ImportError:
npext = None
import functools
try:
# these are used with rolling apply if one wants concurrency, not used by default
from joblib import Parallel, delayed
except ImportError:
Parallel = None
delayed = None
pass
def year_frac(dt1, dt2):
if isinstance(dt1, str):
dt1 = pd.to_datetime(dt1)
elif isinstance(dt1, pd.Timestamp):
dt1 = dt1.normalize()
if isinstance(dt2, str):
dt2 = pd.to_datetime(dt2)
elif isinstance(dt2, pd.Timestamp):
dt2 = dt2.normalize()
return (dt2 - dt1) / np.timedelta64(1, 'Y')
def query_yes_no(question, default="yes"):
"""(this is copied from somewhere, )
Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no"}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def display_sortable_table(df, title='', header_size=1, filename=None, sort_on_col=None, ascending=True, file_post_process_func=None, autosort=False, display=True):
"""
if filename is set, we also write the stuff to file
"""
df = df.copy()
if autosort:
n = len(df.columns) + 1
df['__'] = range(len(df))
sort_on_col = n
boilerplate = '<html><head><script type="text/javascript" language="javascript" src="https://code.jquery.com/jquery-3.5.1.js"></script>'
boilerplate += '<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.22/css/jquery.dataTables.css">'
boilerplate += '<script type="text/javascript" charset="utf8" src="https://cdn.datatables.net/1.10.22/js/jquery.dataTables.js"></script>'
boilerplate += '<script type="text/javascript" class="init">'
boilerplate += '$(document).ready( function () {'
if sort_on_col is None:
boilerplate += "$('#dummy').DataTable();"
else:
if not ascending:
boilerplate += "$('#dummy').DataTable({'order':[[%d, 'desc']]});" % sort_on_col
else:
boilerplate += "$('#dummy').DataTable({'order':[[%d, 'asc']]});" % sort_on_col
boilerplate += '} );'
boilerplate += '</script></head>'
_buffer = io.StringIO()
_buffer.write(boilerplate)
with pd.option_context('display.max_colwidth', -1):
df.to_html(_buffer, index=False)
_buffer.seek(0)
tmp = _buffer.read()
tmp = tmp.replace('<table border="1" class="dataframe">',
'<table border="1" id="dummy" class="display">')
tmp += '</html>'
_buffer.seek(0)
_buffer.write(tmp)
_buffer.seek(0)
raw = _buffer.read()
if title is not None:
raw = '<h%d>%s</h%d>%s' % (header_size, title, header_size, raw)
if filename is not None and isinstance(filename, str):
res = 'yes'
if os.path.exists(filename):
res = query_yes_no(
"%s exists, do you want to overwrite?" % filename, default='no')
if res == 'yes':
with open(filename, 'w') as f:
f.write(raw)
print("wrote file to '%s'" % filename)
if file_post_process_func is not None:
file_post_process_func(filename)
if display:
try:
if not len(title):
prefix = None
else:
prefix = title
tmp = tempfile.NamedTemporaryFile(
suffix='.html', prefix=prefix, delete=False, mode='w')
tmp.write(raw)
return open_path_in_browser(tmp.name)
except:
print("Not able to fire up browser, are you running in colab per chance?")
def open_path_in_browser(path):
return webbrowser.open('file://' + path)
def download_and_save_zipped_excel_data_to_file(url='', tab_name='', refresh=False):
"""Returns the file name of the temp file we've written the data to
"""
res = get_request_from_session(url=url, refresh=refresh)
filebytes = io.BytesIO(res.content)
tmp = zipfile.ZipFile(filebytes)
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.xlsx')
existing = tmp.namelist()
if tab_name not in existing:
raise Exception("File = '%s' not found among existing files (%s)" % (
tab_name, ','.join(existing)))
with open(temp.name, 'wb') as fp:
fp.write(tmp.read(tab_name))
return temp.name
def get_request_from_session(session=None, url='', refresh=False, headers=None):
"""
don't want the session object as part o fthe cache key
"""
if refresh:
_get_request_from_session.cache_clear()
return _get_request_from_session(session=session, url=url, headers=headers)
@lru_cache(maxsize=None)
def _get_request_from_session(session=None, url=None, headers=None):
if session is None:
return requests.get(url, headers=headers)
else:
return session.get(url, headers=headers)
def uk_holidays(refresh=False):
if refresh:
_uk_holidays.cache_clear()
return _uk_holidays()
@lru_cache(maxsize=None)
def _uk_holidays():
"""gov.uk address here (doesn't go back very far in time): https://www.gov.uk/bank-holidays.json
"""
url = 'https://raw.githubusercontent.com/ministryofjustice/govuk-bank-holidays/main/govuk_bank_holidays/bank-holidays.json'
data = requests.get(url).json()
dates = pd.to_datetime([x['date']
for x in data['england-and-wales']['events']])
return dates
def date_range(start_date, end_date, cal='uk', closed=None, refresh=False):
if cal != 'uk':
raise NotImplementedError("Only uk calendar implemented so far!")
return pd.bdate_range(start_date, end_date, holidays=uk_holidays(refresh=refresh), freq='C', closed=closed)
def serial_date_to_datetime(ordinal, epoch=datetime.datetime(1900, 1, 1), as_time_stamps=True):
if ordinal > 59:
ordinal -= 1 # Excel leap year bug, 1900 is not a leap year!
inDays = int(ordinal)
frac = ordinal - inDays
inSecs = int(round(frac * 86400.0))
if as_time_stamps:
return pd.to_datetime(epoch + datetime.timedelta(days=inDays - 1, seconds=inSecs))
return epoch + datetime.timedelta(days=inDays - 1, seconds=inSecs)
def interpolate(a, b, sort=False, return_first_lower=True):
# if b is in list a, return its idx, else return idx of element smaller than b
if sort is True:
a.sort()
if b in a:
return a.index(b)
elif return_first_lower:
return bisect.bisect_left(a, b) - 1
else:
return bisect.bisect_left(a, b)
def find_all_indicies(line='', tag='', case=False, use_regex=True):
"""
sometimes when you're searching for eg. html tags it can be cumbersome to escape everything in regex
"""
if use_regex:
if not case:
return [m.start() for m in re.finditer(tag, line, re.IGNORECASE)]
else:
return [m.start() for m in re.finditer(tag, line)]
else:
raise NotImplementedError("Could try re.escape(*) here maybe?")
def big_fmt(val):
"""
format with thousandsd separator
"""
if pd.isnull(val):
return 'n/a'
if isinstance(val, str):
return val
return '{:,}'.format(int(round(val, 0))).strip()
def value_is_numeric_type(X):
X = np.asanyarray(X)
if (X.dtype.char in np.typecodes['AllFloat']) or (X.dtype.char in np.typecodes['Integer']): # noqa: E501
return True
return False
def strictly_increasing(L):
"""Copied from accepted answer to this:
https://stackoverflow.com/questions/4983258/python-how-to-check-list-monotonicity
"""
return all(x < y for x, y in zip(L, L[1:]))
def strictly_decreasing(L):
return all(x > y for x, y in zip(L, L[1:]))
def non_increasing(L):
return all(x >= y for x, y in zip(L, L[1:]))
def non_decreasing(L):
return all(x <= y for x, y in zip(L, L[1:]))
def monotonic(L):
return non_increasing(L) or non_decreasing(L)
def syncsort(a, b):
"""
sorts a in ascending order (and b will tag along, so each element of b is still associated with the right element in a)
"""
a, b = (list(t) for t in zip(*sorted(zip(a, b))))
return a, b
def string_col_to_unique_components(df, col_name, separator='|'):
"""If you have a column stored as strings, eg. 'player1|player2|player3'
returns a flat array with unique values
"""
return np.unique(df[col_name].str.split(separator, expand=True).values.ravel())
def rolling_window(a, window):
"""
this is built-in in later versions of numpy
"""
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def plot_df(df, plot_these=None, normalize=True, title=None, yaxis_title=None):
"""
plotly express is awkward when it comes to plotting very basic stuff, for some reason
assume the df has a proper datetime index (any way to check?)
if plot_these is None, plot all but the index, m'kay?
"""
if df.index.name is None:
df.index.name = 'date'
idx_name = df.index.name
df = df.copy()
# convert to long format:
if plot_these is None:
vv = list(set(df.columns) - set([idx_name]))
else:
vv = plot_these
if normalize:
df.dropna(inplace=True)
# have to drop na , otherwise we risk dividing by na
df = df / df.iloc[0, :]
df_long = df.reset_index().melt(id_vars=[idx_name], value_vars=vv)
if title is None:
fig = pex.line(df_long, x=idx_name, y='value', color='variable')
else:
fig = pex.line(df_long, x=idx_name, y='value',
color='variable', title=title)
if yaxis_title is not None:
fig.update_layout(yaxis_title=yaxis_title)
fig.show()
def rolling_apply(func, window, *arrays, n_jobs=1, **kwargs):
if npext is not None:
return np.ext.rolling_apply(func, window, *arrays, n_jobs=n_jobs, **kwargs)
return _rolling_apply(func, window, *arrays, n_jobs=n_jobs, **kwargs)
def _rolling_apply(func, window, *arrays, n_jobs=1, **kwargs):
"""
numpy_ext requires numpt 1.20 something
this code is copied from numpy_ext
Useful for doing rolling window calcs involving multiple cols on a dataframe
For example, rolling 10-day window on off and def rating in nba:
def rollinator(poss, rtg):
return np.dot(poss, rtg)/poss.sum()
rwb['off_L10'] = rolling_apply(rollinator, 10, rwb.POSS, rwb.OFF_RATING)
You CAN'T do this simply using the built-in rolling()... (obv this might change in the future)
Doesn't seem to work with multi-dimensional output?
Roll a fixed-width window over an array or a group of arrays, producing slices.
Apply a function to each slice / group of slices, transforming them into a value.
Perform computations in parallel, optionally.
Return a new np.ndarray with the resulting values.
Parameters
----------
func : Callable
The function to apply to each slice or a group of slices.
window : int
Window size.
*arrays : list
List of input arrays.
n_jobs : int, optional
Parallel tasks count for joblib. If 1, joblib won't be used. Default is 1.
**kwargs : dict
Input parameters (passed to func, must be named).
Returns
-------
np.ndarray
Examples
--------
>>> arr = np.array([1, 2, 3, 4, 5])
>>> rolling_apply(sum, 2, arr)
array([nan, 3., 5., 7., 9.])
>>> arr2 = np.array([1.5, 2.5, 3.5, 4.5, 5.5])
>>> func = lambda a1, a2, k: (sum(a1) + max(a2)) * k
>>> rolling_apply(func, 2, arr, arr2, k=-1)
array([ nan, -5.5, -8.5, -11.5, -14.5])
"""
if not any(isinstance(window, t) for t in [int, np.integer]):
raise TypeError(f'Wrong window type ({type(window)}) int expected')
window = int(window)
if max(len(x.shape) for x in arrays) != 1:
raise ValueError('Wrong array shape. Supported only 1D arrays')
if len({array.size for array in arrays}) != 1:
raise ValueError('Arrays must be the same length')
def _apply_func_to_arrays(idxs):
return func(*[array[idxs[0]:idxs[-1] + 1] for array in arrays], **kwargs)
array = arrays[0]
rolls = rolling(
array if len(arrays) == n_jobs == 1 else np.arange(len(array)),
window=window,
skip_na=True
)
if n_jobs == 1:
if len(arrays) == 1:
arr = list(map(functools.partial(func, **kwargs), rolls))
else:
arr = np.asarray(list(map(_apply_func_to_arrays, rolls)))
else:
f = delayed(_apply_func_to_arrays)
arr = Parallel(n_jobs=n_jobs)(f(idxs[[0, -1]]) for idxs in rolls)
try:
return prepend_na(arr, n=window - 1)
except:
return prepend_na(arr.ravel(), n=window - 1)
def rolling(
array,
window,
skip_na=False,
as_array=False
):
"""
used with rolling_apply. code copied from numpy_ext
Roll a fixed-width window over an array.
The result is either a 2-D array or a generator of slices, controlled by `as_array` parameter.
Parameters
----------
array : np.ndarray
Input array.
window : int
Size of the rolling window.
skip_na : bool, optional
If False, the sequence starts with (window-1) windows filled with nans. If True, those are omitted.
Default is False.
as_array : bool, optional
If True, return a 2-D array. Otherwise, return a generator of slices. Default is False.
Returns
-------
np.ndarray or Generator[np.ndarray, None, None]
Rolling window matrix or generator
Examples
--------
rolling(np.array([1, 2, 3, 4, 5]), 2, as_array=True)
array([[nan, 1.],
[ 1., 2.],
[ 2., 3.],
[ 3., 4.],
[ 4., 5.]])
Usage with numpy functions
arr = rolling(np.array([1, 2, 3, 4, 5]), 2, as_array=True)
np.sum(arr, axis=1)
array([nan, 3., 5., 7., 9.])
"""
if not any(isinstance(window, t) for t in [int, np.integer]):
raise TypeError(f'Wrong window type ({type(window)}) int expected')
window = int(window)
if array.size < window:
raise ValueError('array.size should be bigger than window')
def rows_gen():
if not skip_na:
prepend_func = prepend_na
if np.issubdtype(array.dtype, np.datetime64):
def prepend_func(arr, n):
return np.hstack((np.repeat(np.datetime64('NaT'), n), arr))
yield from (prepend_func(array[:i + 1], (window - 1) - i) for i in np.arange(window - 1))
starts = np.arange(array.size - (window - 1))
yield from (array[start:end] for start, end in zip(starts, starts + window))
return np.array([row for row in rows_gen()]) if as_array else rows_gen()
def prepend_na(array, n):
"""
used with rolling_apply
Return a copy of array with nans inserted at the beginning.
Parameters
----------
array : np.ndarray
Input array.
n : int
Number of elements to insert.
Returns
-------
np.ndarray
New array with nans added at the beginning.
Examples
--------
>>> prepend_na(np.array([1, 2]), 2)
array([nan, nan, 1., 2.])
"""
return np.hstack(
(
nans(n, array[0].dtype) if len(array) and hasattr(
array[0], 'dtype') else nans(n),
array
)
)
def nans(shape, dtype=np.float64):
"""
Return a new array of a given shape and type, filled with np.nan values.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., (2, 3) or 2.
dtype: data-type, optional
Returns
-------
np.ndarray
Array of np.nans of the given shape.
Examples
--------
>>> nans(3)
array([nan, nan, nan])
>>> nans((2, 2))
array([[nan, nan],
[nan, nan]])
>>> nans(2, np.datetime64)
array(['NaT', 'NaT'], dtype=datetime64)
"""
if np.issubdtype(dtype, np.integer):
dtype = np.float
arr = np.empty(shape, dtype=dtype)
arr.fill(np.nan)
return arr
def filter_nans(x):
return x[~np.isnan(x)]
def format_time(timespan, precision=3):
"""Formats the timespan in a human readable form (copied from ipython code)"""
if timespan >= 60.0:
# we have more than a minute, format that in a human readable form
# Idea from http://snipplr.com/view/5713/
parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)]
time = []
leftover = timespan
for suffix, length in parts:
value = int(leftover / length)
if value > 0:
leftover = leftover % length
time.append(u'%s%s' % (str(value), suffix))
if leftover < 1:
break
return " ".join(time)
# Unfortunately the unicode 'micro' symbol can cause problems in
# certain terminals.
# See bug: https://bugs.launchpad.net/ipython/+bug/348466
# Try to prevent crashes by being more secure than it needs to
# E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set.
units = [u"s", u"ms", u'us', "ns"] # the save value
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
try:
u'\xb5'.encode(sys.stdout.encoding)
units = [u"s", u"ms", u'\xb5s', "ns"]
except:
pass
scaling = [1, 1e3, 1e6, 1e9]
if timespan > 0.0:
order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
else:
order = 3
return u"%.*g %s" % (precision, timespan * scaling[order], units[order])
def file_finder(folder, pattern, case=False, return_last_file=False):
orig_files = os.scandir(folder)
case_flag = re.IGNORECASE if not case else 0
files = [(folder + x.name, x.stat().st_mtime)
for x in orig_files if re.search(pattern, x.name, case_flag) is not None]
df = pd.DataFrame({'file': [x[0] for x in files], "time": [datetime.datetime.fromtimestamp(
x[1]) for x in files]}, columns=['file', 'time']).sort_values('time')
if return_last_file:
return df.iloc[-1].file
return df
def read_file_as_string(filename):
with open(filename, 'r') as fil:
return fil.read()
def extract_text_from_pdf(url="", session=None, start_page=None, end_page=None, refresh=False):
"""Depends on ghostview being installed
"""
res = get_request_from_session(url=url, session=session, refresh=refresh)
with open('temptemp.pdf', 'wb') as fp:
fp.write(res.content)
tmp = tempfile.NamedTemporaryFile(delete=False)
base_cmd = "gs -sDEVICE=txtwrite"
if isinstance(start_page, int):
base_cmd += ' -dFirstPage=%d' % start_page
if isinstance(end_page, int):
base_cmd += ' -dLastPage=%d' % end_page
full_cmd = "%s -o %s 'temptemp.pdf'" % (base_cmd, tmp.name)
_ = os.popen(full_cmd).read()
tmp = read_file_as_string(tmp.name)
return tmp
def bst_start_date(dt=''):
"""In the UK the clocks go forward 1 hour at 1am on the last Sunday in March, and back 1 hour at 2am on the last Sunday in October
https://www.gov.uk/when-do-the-clocks-change
>>> bst_start_date('30-oct-2023')
Timestamp('2023-03-26 00:00:00')
>>> bst_start_date('30-oct-2022')
Timestamp('2022-03-27 00:00:00')
>>> bst_start_date('30-oct-2020')
Timestamp('2020-03-29 00:00:00')
>>> bst_start_date('30-oct-2021')
Timestamp('2021-03-28 00:00:00')
"""
if isinstance(dt, str):
if len(dt):
dt = pd.to_datetime(dt)
else:
dt = pd.to_datetime('today')
if not isinstance(dt, pd.Timestamp):
raise Exception("dt has to be str or timestamp!")
year = dt.year
dt = pd.to_datetime('%d-3-01' % year)
me = dt + pd.offsets.MonthEnd()
dt_name = me.day_name()
while dt_name != 'Sunday':
me += pd.DateOffset(days=-1)
dt_name = me.day_name()
return me
def bst_end_date(dt=''):
"""In the UK the clocks go forward 1 hour at 1am on the last Sunday in March, and back 1 hour at 2am on the last Sunday in October
https://www.gov.uk/when-do-the-clocks-change
>>> bst_end_date('30-oct-2023')
Timestamp('2023-10-29 00:00:00')
>>> bst_end_date('30-oct-2022')
Timestamp('2022-10-30 00:00:00')
>>> bst_end_date('30-oct-2020')
Timestamp('2020-10-25 00:00:00')
>>> bst_end_date('30-oct-2021')
Timestamp('2021-10-31 00:00:00')
"""
if isinstance(dt, str):
if len(dt):
dt = pd.to_datetime(dt)
else:
dt = pd.to_datetime('today')
if not isinstance(dt, pd.Timestamp):
raise Exception("dt has to be str or timestamp!")
year = dt.year
dt = pd.to_datetime('%d-10-01' % year)
me = dt + pd.offsets.MonthEnd()
dt_name = me.day_name()
while dt_name != 'Sunday':
me += pd.DateOffset(days=-1)
dt_name = me.day_name()
return me
def is_bst(dt=''):
"""
Is it British Summer Time or not?
>>> is_bst('2022-01-10')
False
>>> is_bst('2022-05-18')
True
"""
if isinstance(dt, str):
if len(dt):
dt = | pd.to_datetime(dt) | pandas.to_datetime |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# GUI module generated by PAGE version 5.0.3
# in conjunction with Tcl version 8.6
# Feb 08, 2021 09:54:12 PM +03 platform: Windows NT
# -*- coding: utf-8 -*-
from logging import disable
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome.options import Options
import chromedriver_autoinstaller
import time
import pandas as pd
from datetime import timedelta
from datetime import datetime
# import tkinter as tk
import tkinter as tk
from tkinter import messagebox, DISABLED, NORMAL, simpledialog
from tkinter.filedialog import askopenfilename
import sys
import os
#Driver setup
#Driver setup
path = "driver/config.csv"
if not os.path.exists(path):
data={
"username":[],
"password":[],
"late_time":[]
}
df = pd.DataFrame(data=data)
df.to_csv("driver/config.csv", encoding="utf-8-sig", index=False)
import madrasti_fetcher_support
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = tk.Tk()
top = Toplevel1 (root)
madrasti_fetcher_support.init(root, top)
root.mainloop()
w = None
def create_Toplevel1(rt, *args, **kwargs):
'''Starting point when module is imported by another module.
Correct form of call: 'create_Toplevel1(root, *args, **kwargs)' .'''
global w, w_win, root
#rt = root
root = rt
w = tk.Toplevel (root)
top = Toplevel1 (w)
madrasti_fetcher_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_Toplevel1():
global w
w.destroy()
w = None
class Toplevel1:
def donate(self):
window = tk.Toplevel()
window.geometry("290x294+688+270")
window.minsize(148, 1)
window.maxsize(1684, 1025)
window.resizable(1, 1)
window.title("BuY me A COFFEE!")
window.configure(background="#d9d9d9")
window.iconbitmap("driver/icon-rules-book-96.ico")
self.Btndonate = tk.Button(window)
self.Btndonate.place(relx=-0.034, rely=-0.035, height=303, width=306)
self.Btndonate.configure(activebackground="#ececec")
self.Btndonate.configure(activeforeground="#000000")
self.Btndonate.configure(background="#d9d9d9")
self.Btndonate.configure(disabledforeground="#a3a3a3")
self.Btndonate.configure(foreground="#000000")
self.Btndonate.configure(highlightbackground="#d9d9d9")
self.Btndonate.configure(highlightcolor="black")
global _img22
_img22 = tk.PhotoImage(file="driver/qr.png")
self.Btndonate.configure(image=_img22)
self.Btndonate.configure(pady="0")
self.Btndonate.configure(relief="flat")
self.Btndonate.configure(command=window.destroy)
def startapp(self):
global driver, late_time
# messagebox.showerror("xx","errrrrrrrrrrrrr")
self.Btnstart["state"] = DISABLED
self.Btnkillchrome["state"] = NORMAL
self.Btngheyab["state"] = NORMAL
self.Btnaddgeyab["state"] = NORMAL
self.Btnhw["state"] = NORMAL
self.Btnactive["state"] = NORMAL
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "normal"
options = Options()
options.add_argument("--log-level=3")
chromedriver_autoinstaller.install(True) # install driver
driver = webdriver.Chrome(options=options, desired_capabilities=caps)
# driver = webdriver.Chrome(options=options, desired_capabilities=caps, executable_path='driver/chromedriver.exe')
driver.implicitly_wait(30)
base_url = "https://www.google.com/"
verificationErrors = []
accept_next_alert = True
df_settings = pd.read_csv("driver/config.csv")
username = str(df_settings.iloc[0,0])
password = str(df_settings.iloc[0,1])
late_time = df_settings.iloc[0,2]
if username == " " and password == " ":
driver.get("https://schools.madrasati.sa/")
else:
driver.get("https://schools.madrasati.sa/")
time.sleep(4)
# Login
driver.find_element_by_xpath("//*[@id='Beneficiaries']/div/div[1]/div[2]/a/div/div").click()
driver.find_element_by_id("i0116").send_keys(username)
time.sleep(1)
driver.find_element_by_id("idSIButton9").click()
time.sleep(1)
driver.find_element_by_id("i0118").send_keys(password)
time.sleep(1)
driver.find_element_by_id("idSIButton9").click()
time.sleep(1)
driver.find_element_by_id("idSIButton9").click()
def settings(self):
_username_get = simpledialog.askstring("اعدادات","ادخل البريد الالكتروني لحسابك في منصة مدرستي:")
_username = [_username_get]
if _username_get == None:
pass
else:
_password_get = simpledialog.askstring("<PASSWORD>","ادخل كلمة المرور", show="*")
_password = [_password_get]
if _password_get == None:
pass
else:
_late_time_get = simpledialog.askstring("اعدادات","يرصد للطالب (تأخر) اذا مر على زمن الحصة الدراسية (ضع الرقم بالدقائق)")
if _late_time_get == "" or (_late_time_get == None) or len(_late_time_get) > 2:
_late_time_get = "15"
_late_time = [_late_time_get]
if (_username_get == "") or (_password_get == ""):
_username = " "
_password = " "
if (_username_get == None) or (_password_get == None):
pass
else:
data={
"username":_username,
"password":_password,
"late_time":_late_time
}
df_settings = pd.DataFrame(data)
df_settings.to_csv("driver/config.csv", encoding = "UTF-8-sig", index = False)
messagebox.showinfo("رسالة","تم حفظ الاعدادات بنجاح.")
def reformat_time(self):
# Clean DF for reformat Time
df = pd.DataFrame()
file_path = askopenfilename(title="choose a file", initialdir="Downloads", filetype=[("Csv file","*.csv")])
df = pd.read_csv(f"{file_path}", sep="\t" ,encoding="utf-16le", engine="python")
df.reset_index(inplace=True)
df.columns = ["Full Name","User Action","Timestamp"]
# print(df)
# df = pd.read_clipboard()
clean_df = pd.DataFrame(columns=["Full Name", "User Action", "Timestamp"])
# clean_df.columns= ["Full Name", "User Action", "Timestamp"]
stud_names = df["Full Name"].tolist()
stud_names = set(stud_names)
df = df.drop(df[df["User Action"] == "left"].index)
df = df.drop(df[df["User Action"] == "تمت المغادرة من قبل"].index)
# remove teacher name from list
for student in stud_names:
temp = df.loc[df['Full Name'] == student]
clean_df = clean_df.append(temp.iloc[:1])
# Clean TimeStamp and assign AM_PM
timestamp = clean_df["Timestamp"].tolist()
timestamp_sep = [elm.split() for elm in timestamp]
am_pm = timestamp_sep[0][2]
if am_pm == "ص":
am_pm = "AM"
elif am_pm == "م":
am_pm = "PM"
student_timestamp = [elm[1] for elm in timestamp_sep]
student_timestamp = [elm[0:5] for elm in student_timestamp]
err_time = student_timestamp[0]
if err_time[-1] == ":":
student_timestamp = ["0" + elm[0:4] + " " + am_pm for elm in student_timestamp]
# student_timestamp = ["0"+elm[0] for elm in student_timestamp]
student_timestamp = [elm[0:5] + " " + am_pm for elm in student_timestamp]
# if len(student_timestamp[0]) == 4:
# student_timestamp = ["0"+elm[0] for elm in student_timestamp]
# print(class_time)
# if len(student_timestamp[0]) == 4:
# h=student_timestamp[0]
# if int(h[0]) < 9:
# student_timestamp = [str(int(elm[0]) + 12)+elm[1:] for elm in student_timestamp]
# elif int(h[0]) == 9 :
# student_timestamp = ["0"+elm[0] for elm in student_timestamp]
clean_df["Timestamp"] = student_timestamp
return clean_df
# Clean DF for reformat Time
def absents_process(self):
date = datetime.now().strftime("%Y-%m-%d")
ask = ""
df_late = pd.DataFrame()
absent_list_studs = []
late_list_studs = []
late_time_studs = []
late_join_time_studs = []
file_is_incorrect = True
while file_is_incorrect:
copy_incorrect = True
while copy_incorrect:
try:
# ask = messagebox.askokcancel("تجهيز البيانات", "اختر ملف التحضير تيمز واضغط موافق، لالغاء العملية اختر الغاء")
# if ask == False:
# break
attend_df = self.reformat_time()
# print(attend_df)
copy_incorrect = False
except:
# print("❌ BE SURE TO COPY ALL -3- Columns .. Try again\n")
# messagebox.showerror("تحذير","تأكد من نسخ الأعمدة بشكل صحيح .. حاول مرة اخرى ❌")
copy_incorrect = True
break
if copy_incorrect == True:
break
# get Class time
class_name_get = driver.find_element_by_xpath("//*[@class='col-md-5 text-primary text-right']/label[1]")
shoba_name_get = driver.find_element_by_xpath("//*[@class='col-md-5 text-primary text-right']/label[2]")
class_name = class_name_get.get_attribute('innerHTML')
shoba_name = shoba_name_get.get_attribute('innerHTML')
class_time_get = driver.find_element_by_xpath("//*[@class='time']")
class_time = class_time_get.get_attribute("outerHTML")
class_time = class_time.split()
if class_time[6] == "ص":
am_pm = "AM"
else:
am_pm = "PM"
class_time = class_time[5]
class_time = class_time + " " + am_pm
# attend LIST ready
attend_list = attend_df["Full Name"].tolist()
attend_list = set(attend_list)
attend_list = [student_name.split() for student_name in attend_list]
attend_list = [student_name[0] + " " + student_name[-1] for student_name in attend_list]
attend_df["Full Name"] = attend_list
page_incorrect = True
while page_incorrect:
try:
list_name = []
for elem in driver.find_elements_by_xpath('.//*[@class="Maintdtitle"]'):
list_name.append(elem.text)
page_incorrect = False
except:
# print("❌ Wrong Page .. try again\n")
messagebox.showerror(" رسالة خطأ","الصفحة خاطئة .. توجه لصفحة تحضير الطلاب وحاول مرة اخرى ❌")
student_class = [student_name.split() for student_name in list_name]
student_class = [student_name[0] + " " + student_name[-1] for student_name in student_class]
# Chick if the data are correct.
stud_count = 0
for i in range(5):
if attend_list[i] in student_class:
stud_count += 1
if stud_count >= 4:
file_is_incorrect = False
# print("✅ Data imported Successfully")
messagebox.showinfo(" رسالة","رائع تم سحب البيانات بنجاح .. انقر لتحضير الطلاب ✅")
else:
# print("❌ Wrong file! .. Try again\n")
messagebox.showerror(" رسالة خطأ","ملف الشعبة غير صحيح .. حاول مرة اخرى ❌")
# ##DELETE LATER
# _ = input("ALL EXIST _ PRESS")
# for i in range(len(student_class)):
# drop = Select(driver.find_element_by_xpath(f'.//*[@id="List_{i}__AttendStatusId"]'))
# drop.select_by_index(0)
# ## DELETE LATER
if copy_incorrect == True:
# exit the function
pass
else:
for i in range(len(student_class)):
if student_class[i] in attend_list:
# stu_attend_time
# print(student_class[i])
row = attend_df[attend_df["Full Name"] == student_class[i]]
# print(row)
data_row= row.Timestamp
data_row = data_row.values
stu_attend_time = data_row[0]
# stu_attend_time_h=
# stu_attend_time_m=
# print(stu_attend_time)
# Diff time between Class Time and student attend time in MIN
# current_time = timedelta(hours=int(class_time[0:2]), minutes=int(class_time[3:5]))
# stud_time = timedelta(hours=int(stu_attend_time[0:2]), minutes=int(stu_attend_time[3:5]))
# diff_time = stud_time - current_time
# diff_time_min = diff_time.total_seconds() / 60
fmt="%I:%M %p"
diff_time_min = datetime.strptime(stu_attend_time, fmt) - datetime.strptime(class_time, fmt)
diff_time_min = diff_time_min.total_seconds() / 60
# print(diff_time_min)
if int(diff_time_min) > int(late_time):
# تسجيل حاضر متأخر
drop = Select(driver.find_element_by_xpath(f'.//*[@id="List_{i}__AttendStatusId"]'))
drop.select_by_index(3)
# Lists for late students
late_list_studs.append(list_name[i])
late_join_time_studs.append(stu_attend_time)
late_time_studs.append(diff_time_min)
else:
# تسجيل حاضر
drop = Select(driver.find_element_by_xpath(f'.//*[@id="List_{i}__AttendStatusId"]'))
drop.select_by_index(0)
else:
# تسجيل غياب
drop = Select(driver.find_element_by_xpath(f'.//*[@id="List_{i}__AttendStatusId"]'))
drop.select_by_index(2)
#lists for absents
absent_list_studs.append(list_name[i])
real_late_studs_count = len(late_list_studs)
real_absent_studs_count = len(absent_list_studs)
if len(absent_list_studs) > len(late_list_studs):
extra_lines = len(absent_list_studs) - len(late_list_studs)
for i in range(extra_lines):
late_list_studs.append(" ")
late_join_time_studs.append(" ")
late_time_studs.append(" ")
if len(late_list_studs) > len(absent_list_studs):
extra_lines = len(late_list_studs) - len(absent_list_studs)
for i in range(extra_lines):
absent_list_studs.append(" ")
#Get date from PAGE
elem = driver.find_element_by_xpath('.//*[@id="lblDay"]')
pg_today = elem.get_attribute("innerHTML")
date_infilename = pg_today.replace("/", "_")
# df_absents[f"{shoba_name} - الطلاب المتغيبين"] = absent_list_studs
new_row = pd.DataFrame({f"{shoba_name} - ({real_late_studs_count}) الطلاب المتأخرين":f'{date}',
'بداية الدرس':' ',
'زمن الدخول':' ',
'التأخر بالدقائق':' ',
f'({real_absent_studs_count}) الطلاب المتغيبين':f'{date}'},
index =[0])
df_late[f"{shoba_name} - ({real_late_studs_count}) الطلاب المتأخرين"] = late_list_studs
df_late["بداية الدرس"] = class_time
df_late["زمن الدخول"] = late_join_time_studs
df_late["التأخر بالدقائق"] = late_time_studs
df_late[f"({real_absent_studs_count}) الطلاب المتغيبين"] = absent_list_studs
df_late = pd.concat([new_row, df_late]).reset_index(drop = True)
# df_absents.to_csv(f"الغياب-{shoba_name}-{class_name}.csv", encoding= "UTF-8-sig")
df_late.to_csv(f"تقرير الحضور-{shoba_name}-{date_infilename}-{class_name}.csv", encoding= "UTF-8-sig")
# print("DONE 👍\n\n")
messagebox.showinfo(" رسالة","- 👍 تم تحضير الطلاب بنجاح وحفظ ملف التقرير .. لاتنسى نقر زر حفظ بالمنصة")
ask = messagebox.askyesno("رسالة","هل تريد اضافة البيانات في كشف متابعة حضور الطلاب ؟")
if ask == True:
list_absent_motaba = []
shoba_name = simpledialog.askinteger("اسم الشعبة","لاضافة البيانات في كشف متابعة حضور الطلاب، ادخل رقم الشعبة")
df_absent = pd.read_csv(f'كشوف/شعبة {shoba_name} - حضور.csv')
if len(df_absent) == 0:
df_absent["اسم الطالب"] = list_name
#fill absent list with " "
for i in range(len(list_name)):
list_absent_motaba.append(" ")
for i in range(len(list_name)):
elem = driver.find_element_by_xpath(f'.//*[@id="List_{i}__AttendStatusId"]')
state = elem.get_attribute('value')
list_absent_motaba[i] = state
for i in range(len(list_absent_motaba)):
if list_absent_motaba[i] == "0":
list_absent_motaba[i] = "ح"
elif list_absent_motaba[i] == "1":
list_absent_motaba[i] = "غ بعذر"
elif list_absent_motaba[i] == "2":
list_absent_motaba[i] = "غ"
elif list_absent_motaba[i] == "3":
list_absent_motaba[i] = "ت"
elif list_absent_motaba[i] == "4":
list_absent_motaba[i] = "ت بعذر"
# elem = driver.find_element_by_xpath('.//*[@id="lblDay"]')
# pg_today = elem.get_attribute("innerHTML")
df_absent[f"{pg_today}"] = list_absent_motaba
df_absent.to_csv(f"كشوف/شعبة {shoba_name} - حضور.csv", encoding= "UTF-8-sig", index = False)
else:
pass
def addgeyab(self):
ask = messagebox.askokcancel("رسالة", "هل تريد اضافة الغياب لكشف المتابعة مباشرة ؟ توجه للصفحة وابدا")
if ask == False:
pass
else:
# Get names From page
page_incorrect = True
while page_incorrect:
try:
list_name = []
for elem in driver.find_elements_by_xpath('.//*[@class="Maintdtitle"]'):
list_name.append(elem.text)
page_incorrect = False
except:
# print("❌ Wrong Page .. try again\n")
messagebox.showerror(" رسالة خطأ","الصفحة خاطئة .. توجه لصفحة تحضير الطلاب وحاول مرة اخرى ❌")
list_absent_motaba = []
shoba_name = simpledialog.askinteger("اسم الشعبة","لاضافة البيانات في كشف متابعة حضور الطلاب، ادخل رقم الشعبة")
df_absent = pd.read_csv(f'كشوف/شعبة {shoba_name} - حضور.csv')
if len(df_absent) == 0:
df_absent["اسم الطالب"] = list_name
# NOT WORK AFTER WebSite Last Update in 3-2-2021
#fill absent list with " "
# for i in range(len(list_name)):
# list_absent_motaba.append(" ")
# for i in range(len(list_name)):
# elem = driver.find_element_by_xpath(f'.//*[@id="List_{i}__AttendStatusId"]')
# state = elem.get_attribute('value')
# list_absent_motaba[i] = state
elms = driver.find_elements_by_class_name('form-control')
for l in elms:
state = l.get_attribute('value')
list_absent_motaba.append(state)
for i in range(len(list_absent_motaba)):
if list_absent_motaba[i] == "0":
list_absent_motaba[i] = "ح"
elif list_absent_motaba[i] == "1":
list_absent_motaba[i] = "غ بعذر"
elif list_absent_motaba[i] == "2":
list_absent_motaba[i] = "غ"
elif list_absent_motaba[i] == "3":
list_absent_motaba[i] = "ت"
elif list_absent_motaba[i] == "4":
list_absent_motaba[i] = "ت بعذر"
elem = driver.find_element_by_xpath('.//*[@id="lblDay"]')
pg_today = elem.get_attribute("innerHTML")
df_absent[f"{pg_today}"] = list_absent_motaba
df_absent.to_csv(f"كشوف/شعبة {shoba_name} - حضور.csv", encoding= "UTF-8-sig", index = False)
def home_work(self):
ask = ""
date = datetime.now().strftime("%Y-%m-%d")
result = pd.DataFrame()
list_name = []
list_hw= []
list_mark = []
ask = messagebox.askokcancel("تجهيز البيانات", "توجه لصفحة الواجب المراد حفظه واضغط موافق، لالغاء العملية اختر الغاء")
if ask == False:
# exit from function
pass
else:
shoba_name = simpledialog.askstring("رقم الشعبة","الرجاء ادخال رقم الشعبة لهذه الصفحة")
try:
driver.switch_to.window(window_name=driver.window_handles[1])
time.sleep(1)
#get home work title
elem = driver.find_element_by_xpath('.//*[@class="col-md-10"]')
hw_title = elem.get_attribute("innerHTML")
hw_title = hw_title.split()
hw_title = " ".join(hw_title)
for elem in driver.find_elements_by_xpath('.//span[@class="Maintdtitle"]'):
list_name.append(elem.text)
for elem in driver.find_elements_by_xpath('.//span[@class="smalldata"]'):
list_hw.append(elem.text)
for i in list_hw:
index = list_hw.index(i)
if i != 'استعراض الحل' and len(i) <= 12:
del list_hw[index]
for i in list_hw:
if i == 'استعراض الحل':
index = list_hw.index(i)
list_hw[index] = 'تم حل الواجب'
for i in list_hw: #تنظيف اسم الشعبة
index = list_hw.index(i)
if i != 'تم حل الواجب' or i != 'لم يتم حل الواجب':
del list_hw[index]
#fill mark list with ZERO
for i in range(len(list_name)):
list_mark.append(0)
for i in range(len(list_name)):
if list_hw[i] == 'تم حل الواجب':
elem = driver.find_element_by_xpath(f'.//*[@id="List_{i}__Grade"]')
mark = elem.get_attribute('value')
list_mark[i] = mark
result["اسم الطالب"] = list_name
result["حالة الواجب"] = list_hw
result["درجة الواجب"] = list_mark
# print(list_name, len(list_name))
# print(list_hw, len(list_hw))
# print(list_mark, len(list_mark))
driver.close()
driver.switch_to.window(window_name=driver.window_handles[0])
result.to_csv(f"واجب-{hw_title}-شعبة {shoba_name}-{date}.csv", encoding = "UTF-8-sig")
# print("DONE 👍\n\n")
messagebox.showinfo("رسالة",f"👍 رائع تم حفظ التقرير لواجب {hw_title} بنجاح")
ask = messagebox.askyesno("رسالة","هل تريد اضافة البيانات في كشف متابعة واجبات الطلاب ؟")
if ask == True:
list_hw_motaba = []
list_hw_motaba_last = []
df_absent = pd.read_csv(f'كشوف/شعبة {shoba_name} - حضور.csv')
df_hw = pd.read_csv(f'كشوف/شعبة {shoba_name} - واجبات.csv')
main_names = df_absent["اسم الطالب"].tolist()
if len(df_hw) == 0:
df_hw["اسم الطالب"] = main_names
# df_hw["اسم الطالب"] = list_name
#fill hw list with " "
for i in range(len(list_name)):
list_hw_motaba.append(str(list_hw[i]) + " " + str(list_mark[i]))
dictionary = dict(zip(list_name, list_hw_motaba))
for i in range(df_hw.shape[0]):
name_foucs = df_hw.at[i,"اسم الطالب"]
for x in list_name:
match=len([matched for matched in x.split() if matched in name_foucs.split()])
if match >=3:
list_hw_motaba_last.append(dictionary[x])
break
if x == list_name[-1]:
list_hw_motaba_last.append("X")
# df_hw[f"{hw_title}"] = list_hw_motaba
df_hw[f"{hw_title}"] = list_hw_motaba_last
df_hw.to_csv(f"كشوف/شعبة {shoba_name} - واجبات.csv", encoding= "UTF-8-sig", index = False)
else:
pass
except:
messagebox.showerror(" رسالة خطأ","الصفحة خاطئة .. توجه لصفحة الواجب وحاول مرة اخرى ❌")
def activities(self):
ask = ""
date = datetime.now().strftime("%Y-%m-%d")
result = | pd.DataFrame() | pandas.DataFrame |
'''
May 2020 by <NAME>
<EMAIL>
https://www.github.com/sebbarb/
'''
import feather
import pandas as pd
import numpy as np
from hyperparameters import Hyperparameters
from pdb import set_trace as bp
def main():
hp = Hyperparameters()
# Load data
#df = feather.read_dataframe(hp.data_dir + 'Py_VARIANZ_2012_v3-1.feather')
df = pd.read_feather(hp.data_dir + 'Py_VARIANZ_2012_v3-1.feather')
# Exclude
df = df[~df['ph_loopdiuretics_prior_5yrs_3evts'].astype(bool)]
df = df[~df['ph_antianginals_prior_5yrs_3evts' ].astype(bool)]
df.dropna(subset=['end_fu_date'], inplace=True)
# Adjust data types
df['nhi_age'] = df['nhi_age'].astype(int)
df['gender_code'] = df['gender_code'].astype(bool)
df['en_prtsd_eth'] = df['en_prtsd_eth'].astype(int)
df['en_nzdep_q'] = df['en_nzdep_q'].astype(int)
df['hx_vdr_diabetes'] = df['hx_vdr_diabetes'].astype(bool)
df['hx_af'] = df['hx_af'].astype(bool)
df['ph_bp_lowering_prior_6mths'] = df['ph_bp_lowering_prior_6mths'].astype(bool)
df['ph_lipid_lowering_prior_6mths'] = df['ph_lipid_lowering_prior_6mths'].astype(bool)
df['ph_anticoagulants_prior_6mths'] = df['ph_anticoagulants_prior_6mths'].astype(bool)
df['ph_antiplatelets_prior_6mths'] = df['ph_antiplatelets_prior_6mths'].astype(bool)
df['out_broad_cvd_adm_date'] = | pd.to_datetime(df['out_broad_cvd_adm_date'], format='%Y-%m-%d', errors='coerce') | pandas.to_datetime |
import pandas as pd
import numpy as np
import ml_metrics as metrics
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import log_loss
np.random.seed(131)
path = '../Data/'
NumK = 11
print("read training data")
train = pd.read_csv(path+"train.csv")
label = train['target']
trainID = train['id']
del train['id']
del train['target']
clf = KNeighborsClassifier(n_neighbors=2**NumK,n_jobs=-1)
clf.fit(train.values, label)
print("read test data")
test = pd.read_csv(path+"test.csv")
ID = test['id']
del test['id']
preds = clf.predict_proba(test)
sample = pd.read_csv(path+'sampleSubmission.csv')
print("writing submission data")
submission = | pd.DataFrame(preds, index=ID, columns=sample.columns[1:]) | pandas.DataFrame |
"""Extract information from the GeoLife dataset"""
import numpy as np
import os
import pandas as pd
from dateutil import parser
from time import time
from joblib import Parallel, delayed
def decode_str(s):
return s.decode('utf-8')
class GeoLifeExtractor(object):
def __init__(self, base_path='./geolife-gps-trajectory-dataset/'):
self.base_path = base_path
def extract_user_information(self, user):
#
# Get users dataset files
#
files = []
user_dir = os.path.join(self.base_path, f'{user}/Trajectory/')
for (_, _, filenames) in os.walk(user_dir):
files.extend([f for f in filenames if f.endswith('.plt')])
#
# Build a pandas DataFrame gathering data for each file of the user
#
df = None
columns = ['latitude', 'longitude', 'Unknown1', 'Unknown2', 'Unknown3', 'date', 'time']
for file in files:
data = np.genfromtxt(
os.path.join(user_dir, file),
delimiter=',',
skip_header=6,
converters={
0: float,
1: float,
2: float,
3: float,
4: float,
5: decode_str,
6: decode_str,
}
)
if df is None:
df = pd.DataFrame([list(l) for l in data], columns=columns)
else:
df_aux = pd.DataFrame([list(l) for l in data], columns=columns)
df = pd.concat([df, df_aux])
df['user'] = user
print(f'{user}: Number of locations: {len(df)}')
print(f'{user}: Trajectories from {df.date.min()} until {df.date.max()}')
df.to_pickle(f'.tmp_user_tracjectories_{user}.pkl')
return df
def extract_all_users_information(self, n_jobs=1):
self.users = [
d
for d in os.listdir(self.base_path)
if os.path.isdir(os.path.join(self.base_path, d))
]
t = time()
dfs = Parallel(n_jobs=n_jobs)(
delayed(self.extract_user_information)(user)
for user in self.users
)
print(f'Took {time() - t} gather information from all users')
return | pd.concat(dfs) | pandas.concat |
'''
This set of functions creates, loads, encodes, and saves DataFrames
of each sequence.
Pos: H(6), K(8), R(14)
Neg: D(2), E(3)
'''
import numpy as np
import pandas as pd
from sklearn import preprocessing
import plot_functions
def ordinal_decode(seq):
'ordinal to amino acid sequence'
AAlist=np.array(list("ACDEFGHIKLMNPQRSTVWY"))
enc_OH=preprocessing.OrdinalEncoder().fit(AAlist.reshape(-1,1))
AAlist=enc_OH.inverse_transform(seq.reshape(-1, 1)).flatten()
AA_sequence_list=''.join(AAlist)
return AA_sequence_list
def get_charge(seq):
'return # pos and # neg AA in seq'
seq=np.array(seq)
n_pos=sum(np.where((seq==6)|(seq==8)|(seq==14),1,0))
n_neg=sum(np.where((seq==2)|(seq==3),1,0))
return n_pos, n_neg
def make_datasets(n_samples=1000,seq_len=10):
'Pos: 0-19 stay, 20-29: H, 30-39:K, 40-49:R'
pos_data=np.random.randint(low=0,high=49,size=[n_samples,seq_len])
pos_list=[[]]*len(pos_data)
for i, seq in enumerate(pos_data):
seq_adj=np.where(((seq>=20)&(seq<30)),6,seq)
seq_adj=np.where(((seq_adj>=30)&(seq_adj<40)),8,seq_adj)
seq_adj=np.where(((seq_adj>=40)&(seq_adj<50)),14,seq_adj)
AA_seq=ordinal_decode(seq_adj)
n_pos,n_neg=get_charge(seq_adj)
pos_list[i]=[list(seq_adj),AA_seq,n_pos,n_neg]
pos_df=pd.DataFrame(pos_list)
pos_df.columns=['Ordinal','AA','N_Pos','N_Neg']
pos_df['Class']='Positive'
'Neg: 0-19 stay, 20-29:D, 30-39:E'
neg_data=np.random.randint(low=0,high=39,size=[n_samples,seq_len])
neg_list=[[]]*len(neg_data)
for i, seq in enumerate(neg_data):
seq_adj=np.where(((seq>=20)&(seq<30)),2,seq)
seq_adj=np.where(((seq_adj>=30)&(seq_adj<40)),3,seq_adj)
AA_seq=ordinal_decode(seq_adj)
n_pos,n_neg=get_charge(seq_adj)
neg_list[i]=[list(seq_adj),AA_seq,n_pos,n_neg]
neg_df=pd.DataFrame(neg_list)
neg_df.columns=['Ordinal','AA','N_Pos','N_Neg']
neg_df['Class']='Negative'
df= | pd.concat([pos_df,neg_df],ignore_index=True) | pandas.concat |
"""unit test for loanpy.loanfinder.py (2.0 BETA) for pytest 7.1.1"""
from inspect import ismethod
from os import remove
from pathlib import Path
from unittest.mock import patch, call
from pandas import DataFrame, RangeIndex, Series, read_csv
from pandas.testing import (assert_frame_equal, assert_index_equal,
assert_series_equal)
from pytest import raises
from loanpy.loanfinder import Search, gen, read_data, NoPhonMatch
from loanpy import loanfinder as lf
def test_read_data():
"""test if data is being read correctly"""
# setup expected outcome, path, input-dataframe, mock pandas.read_csv
srsexp = Series(["a", "b", "c"], name="col1", index=[0, 1, 1])
path = Path(__file__).parent / "test_read_data.csv"
dfin = DataFrame({"col1": ["a", "b, c", "wrong clusters",
"wrong phonotactics"], "col2": [1, 2, 3, 4]})
with patch("loanpy.loanfinder.read_csv") as read_csv_mock:
read_csv_mock.return_value = dfin
# assert that the actual outcome equals the expected outcome
assert_series_equal(read_data(path, "col1"), srsexp)
# assert mock call to read_csv_mock was correct
assert read_csv_mock.call_args_list[0] == call(
path, encoding="utf-8", usecols=["col1"])
# test read recip
# setup: overwrite expected outcome and input-dataframe, mock
# pandas.read_csv
srsexp = Series(["(a)?", "(b|c)"], name="col1", index=[1, 3])
dfin = DataFrame({"col1": ["wrong vowel harmony", "(a)?",
"wrong phonotactics", "(b|c)"], "col2": [1, 2, 3, 4]})
with patch("loanpy.loanfinder.read_csv") as read_csv_mock:
read_csv_mock.return_value = dfin
# assert expected and actual outcome are the same pandas Series
assert_series_equal(read_data(path, "col1"), srsexp)
# assert mock was called with correct input
assert read_csv_mock.call_args_list[0] == call(
path, encoding="utf-8", usecols=["col1"])
# tear down
del path, dfin, srsexp
def test_gen():
"""test if generator yields the right things"""
# set up mock-tqdm (which is a progress bar)
def tqdm_mock(iterable, prefix):
"""this just returns the input and remembers it"""
tqdm_mock.called_with = (iterable, prefix)
return iterable
tqdm = lf.tqdm # remember the original tqdm to plug back in later
lf.tqdm = tqdm_mock # overwrite real tqdm with mock-tqdm function
# set up: create custom class
class SomeMonkeyClass:
def __init__(self):
self.somefunc_called_with = []
def somefunc(self, *args):
arglist = [*args]
self.somefunc_called_with.append(arglist)
return arglist[0] + arglist[1]
# set up: create instance of mock class
somemockclass = SomeMonkeyClass()
# assert generator yields/returns the expected outcome
assert list(gen([2, 3, 4], [4, 5, 6],
somemockclass.somefunc, "lol", "rofl")) == [6, 8, 10]
# assert 2 mock calls: tqdm and somefunc in SomeMonkeyClass
assert tqdm_mock.called_with == ([2, 3, 4], "lol")
assert somemockclass.somefunc_called_with == [
[2, 4, "rofl"], [3, 5, "rofl"], [4, 6, "rofl"]]
# tear down
lf.tqdm = tqdm # plug back in the original tqdm
del tqdm, somemockclass, tqdm_mock, SomeMonkeyClass
def test_init():
"""test if class Search is initiated correctly"""
# set up mock panphon class with mock edit distance
class DistanceMonkey:
def hamming_feature_edit_distance(): pass
# set up mock Adrc class for get_nse
class AdrcMonkey:
def get_nse(self, *args): pass
# set up mock function for semantic distance measure
def mock_gensim_mw():
return "sthsth"
# set up vars 4 exped outcome, set up mock instance of DistanceMonkey class
srsad = Series(["a", "b", "c"], name="adapted", index=[0, 1, 1])
srsrc = Series(["a", "b", "c"], name="adapted", index=[0, 1, 1])
dist_mockinstance = DistanceMonkey()
# set up: mock read_data, mock panphon.Distance mock loanpy.adrc.Adrc
with patch("loanpy.loanfinder.read_data", side_effect=[
srsad, srsrc]) as read_data_mock:
with patch("loanpy.loanfinder.Distance") as Distance_mock:
Distance_mock.return_value = dist_mockinstance
with patch("loanpy.loanfinder.Adrc") as Adrc_mock:
Adrc_mock.return_value = AdrcMonkey
# initiate Search() with mock parameters
mocksearch = Search(
path2donordf="got.csv",
path2recipdf="hun.csv",
donorcol="adapted",
recipcol="reconstructed",
scdictlist_ad="scad.txt",
scdictlist_rc="scrc.txt",
semsim_msr=mock_gensim_mw)
# assert initiation went properly
assert_series_equal(mocksearch.search_in, srsad)
assert_series_equal(mocksearch.search_for, srsrc)
assert mocksearch.phondist == 0
assert ismethod(mocksearch.phondist_msr)
assert mocksearch.donpath == "got.csv"
assert mocksearch.recpath == "hun.csv"
assert mocksearch.doncol == "adapted"
assert mocksearch.reccol == "reconstructed"
assert mocksearch.semsim == 1
assert mocksearch.semsim_msr.__name__ == "mock_gensim_mw"
assert mocksearch.get_nse_ad == AdrcMonkey.get_nse
assert mocksearch.get_nse_rc == AdrcMonkey.get_nse
# double check with __dict__
msdict = mocksearch.__dict__
assert len(msdict) == 12
for i in msdict:
if i in zip(["search_in", "search_for"], [srsad, srsrc]):
assert_series_equal(msdict[i], expsrs)
if i == "doncol":
assert msdict[i] == "adapted"
if i == "donpath":
assert msdict[i] == "got.csv"
if i == "get_nse_ad":
assert msdict[i
] == AdrcMonkey.get_nse
if i == "get_nse_rc":
assert msdict[i
] == AdrcMonkey.get_nse
if i == "phondist":
assert msdict[i] == 0
if i == "phondist_msr":
hmng = dist_mockinstance.hamming_feature_edit_distance
assert msdict[i] == hmng
if i == "reccol" == "reconstructed":
assert msdict[
i] == "reconstructed"
if i == "recpath":
assert msdict[i] == "hun.csv"
if i == "semsim":
assert msdict[i] == 1
if i == "semsim_msr":
assert msdict[i] == mock_gensim_mw
# assert calls
read_data_mock.assert_has_calls(
[call("got.csv", "adapted"),
call("hun.csv", "reconstructed")])
Distance_mock.assert_called_with()
Adrc_mock.assert_has_calls(
[call(scdictlist="scad.txt", mode='adapt'),
call(scdictlist="scrc.txt", mode='reconstruct')])
# assert init runs correctly without entering parameters as well
# set up: mock read_data, mock panphon.Distance mock loanpy.adrc.Adrc
with patch("loanpy.loanfinder.read_data", side_effect=[
srsad, srsrc]) as read_data_mock:
with patch("loanpy.loanfinder.Distance") as Distance_mock:
Distance_mock.return_value = dist_mockinstance
with patch("loanpy.loanfinder.Adrc") as Adrc_mock:
Adrc_mock.return_value = AdrcMonkey
# initiate Search() without any parameters (default params)
mocksearch = Search()
# assert initiation went properly
| assert_series_equal(mocksearch.search_in, srsad) | pandas.testing.assert_series_equal |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils import bin
"""
Blue: #0C5DA5
Green: #00B945
"""
plt.style.use(['science', 'ieee', 'std-colors'])
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
sciblue = '#0C5DA5'
scigreen = '#00B945'
# ----------------------------------------------------------------------------------------------------------------------
"""
NOTE: There are two parts to this analysis:
A. Calculate the mean rmse_z by grouping dataframes.
B. Bin and plot rmse_z by dx.
"""
# ----------------------------------------------------------------------------------------------------------------------
# PART A.
# filepaths
base_dir = '/Users/mackenzie/Desktop/gdpyt-characterization/figure data/grid-overlap/'
path_read = base_dir + 'results/test-coords/'
path_figs = base_dir + 'figs/'
path_results = base_dir + 'results/average/'
fp1 = path_read + 'test_id1_coords_static_grid-overlap-random-z-nl1_percent_overlap.xlsx'
fp2 = path_read + 'test_id2_coords_static_grid-overlap-random-z-nl1_percent_overlap.xlsx'
fp3 = path_read + 'test_id11_coords_SPC_grid-overlap-random-z-nl1_percent_overlap.xlsx'
fp4 = path_read + 'test_id12_coords_SPC_grid-overlap-random-z-nl1_percent_overlap.xlsx'
df1 = pd.read_excel(fp1)
df2 = pd.read_excel(fp2)
df3 = pd.read_excel(fp3)
df4 = pd.read_excel(fp4)
# concat IDPT and SPCT dataframes
dfi = pd.concat([df1, df2], ignore_index=True)
dfs = pd.concat([df3, df4], ignore_index=True)
dfbi = bin.bin_local_rmse_z(dfi,
column_to_bin='z_true',
bins=1,
min_cm=0.5,
z_range=None,
round_to_decimal=4,
df_ground_truth=None,
dropna=True,
error_column='error')
dfbs = bin.bin_local_rmse_z(dfs,
column_to_bin='z_true',
bins=1,
min_cm=0.5,
z_range=None,
round_to_decimal=4,
df_ground_truth=None,
dropna=True,
error_column='error')
dfb_rmse = pd.concat([dfbi, dfbs], ignore_index=True)
dfb_rmse.to_excel(path_results + 'rmse_z_mean_1-bin.xlsx')
# ----------------------------------------------------------------------------------------------------------------------
# PART B.
# filepaths
base_dir = '/Users/mackenzie/Desktop/gdpyt-characterization/publication data/synthetic grid overlap random z nl1/'
path_read = base_dir + 'percent-overlap/results/bin by particle spacing/'
path_figs = base_dir + '/figs/'
fp1 = path_read + 'test_id1_coords_static_global_binned_rmsez_by_particle_spacing.xlsx'
fp2 = path_read + 'test_id2_coords_static_global_binned_rmsez_by_particle_spacing.xlsx'
fp3 = path_read + 'test_id11_coords_SPC_global_binned_rmsez_by_particle_spacing.xlsx'
fp4 = path_read + 'test_id12_coords_SPC_global_binned_rmsez_by_particle_spacing.xlsx'
df1 = pd.read_excel(fp1)
df2 = pd.read_excel(fp2)
df3 = pd.read_excel(fp3)
df4 = | pd.read_excel(fp4) | pandas.read_excel |
import os
import time
import uuid
import yaml
import logging
import shutil
import numpy as np
import pandas as pd
import multiprocessing as mp
from functools import partial
from astropy.time import Time
from .config import Config
from .config import Configuration
from .clusters import find_clusters, filter_clusters_by_length
from .cell import Cell
from .orbit import TestOrbit
from .orbits import Orbits
from .orbits import generateEphemeris
from .orbits import initialOrbitDetermination
from .orbits import differentialCorrection
from .orbits import mergeAndExtendOrbits
from .observatories import getObserverState
from .utils import _initWorker
from .utils import _checkParallel
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
logger = logging.getLogger("thor")
__all__ = [
"rangeAndShift_worker",
"rangeAndShift",
"clusterVelocity",
"clusterVelocity_worker",
"clusterAndLink",
"runTHOROrbit",
"runTHOR",
]
def rangeAndShift_worker(observations, ephemeris, cell_area=10):
assert len(observations["mjd_utc"].unique()) == 1
assert len(ephemeris["mjd_utc"].unique()) == 1
assert observations["mjd_utc"].unique()[0] == ephemeris["mjd_utc"].unique()[0]
observation_time = observations["mjd_utc"].unique()[0]
# Create Cell centered on the sky-plane location of the
# test orbit
cell = Cell(
ephemeris[["RA_deg", "Dec_deg"]].values[0],
observation_time,
area=cell_area,
)
# Grab observations within cell
cell.getObservations(observations)
if len(cell.observations) != 0:
# Create test orbit with state of orbit at visit time
test_orbit = TestOrbit(
ephemeris[["obj_x", "obj_y", "obj_z", "obj_vx", "obj_vy", "obj_vz"]].values[0],
observation_time
)
# Prepare rotation matrices
test_orbit.prepare()
# Apply rotation matrices and transform observations into the orbit's
# frame of motion.
test_orbit.applyToObservations(cell.observations)
projected_observations = cell.observations
else:
projected_observations = pd.DataFrame()
return projected_observations
def clusterVelocity(
obs_ids,
x,
y,
dt,
vx,
vy,
eps=0.005,
min_obs=5,
min_arc_length=1.0,
alg="hotspot_2d",
):
"""
Clusters THOR projection with different velocities
in the projection plane using `~scipy.cluster.DBSCAN`.
Parameters
----------
obs_ids : `~numpy.ndarray' (N)
Observation IDs.
x : `~numpy.ndarray' (N)
Projection space x coordinate in degrees or radians.
y : `~numpy.ndarray' (N)
Projection space y coordinate in degrees or radians.
dt : `~numpy.ndarray' (N)
Change in time from 0th exposure in units of MJD.
vx : `~numpy.ndarray' (N)
Projection space x velocity in units of degrees or radians per day in MJD.
vy : `~numpy.ndarray' (N)
Projection space y velocity in units of degrees or radians per day in MJD.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 0.005]
min_obs : int, optional
The number of samples (or total weight) in a neighborhood for a
point to be considered as a core point. This includes the point itself.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 5]
min_arc_length : float, optional
Minimum arc length in units of days for a cluster to be accepted.
Returns
-------
list
If clusters are found, will return a list of numpy arrays containing the
observation IDs for each cluster. If no clusters are found, will return np.NaN.
"""
logger.debug(f"cluster: vx={vx} vy={vy} n_obs={len(obs_ids)}")
xx = x - vx * dt
yy = y - vy * dt
X = np.stack((xx, yy), 1)
clusters = find_clusters(X, eps, min_obs, alg=alg)
clusters = filter_clusters_by_length(
clusters, dt, min_obs, min_arc_length,
)
cluster_ids = []
for cluster in clusters:
cluster_ids.append(obs_ids[cluster])
if len(cluster_ids) == 0:
cluster_ids = np.NaN
return cluster_ids
def clusterVelocity_worker(
vx,
vy,
obs_ids=None,
x=None,
y=None,
dt=None,
eps=None,
min_obs=None,
min_arc_length=None,
alg=None
):
"""
Helper function to multiprocess clustering.
"""
cluster_ids = clusterVelocity(
obs_ids,
x,
y,
dt,
vx,
vy,
eps=eps,
min_obs=min_obs,
min_arc_length=min_arc_length,
alg=alg
)
return cluster_ids
def rangeAndShift(
observations,
orbit,
cell_area=10,
backend="PYOORB",
backend_kwargs={},
num_jobs=1,
parallel_backend="mp"
):
"""
Propagate the orbit to all observation times in observations. At each epoch gather a circular region of observations of size cell_area
centered about the location of the orbit on the sky-plane. Transform and project each of the gathered observations into
the frame of motion of the test orbit.
Parameters
----------
observations : `~pandas.DataFrame`
DataFrame containing preprocessed observations.
Should contain the following columns:
obs_id : observation IDs
RA_deg : Right Ascension in degrees.
Dec_deg : Declination in degrees.
RA_sigma_deg : 1-sigma uncertainty for Right Ascension in degrees.
Dec_sigma_deg : 1-sigma uncertainty for Declination in degrees.
observatory_code : MPC observatory code
orbit : `~numpy.ndarray` (6)
Orbit to propagate. If backend is 'THOR', then these orbits must be expressed
as heliocentric ecliptic cartesian elements. If backend is 'PYOORB' orbits may be
expressed in keplerian, cometary or cartesian elements.
cell_area : float, optional
Cell's area in units of square degrees.
[Default = 10]
backend : {'THOR', 'PYOORB'}, optional
Which backend to use.
backend_kwargs : dict, optional
Settings and additional parameters to pass to selected
backend.
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
Returns
-------
projected_observations : {`~pandas.DataFrame`, -1}
Observations dataframe (from cell.observations) with columns containing
projected coordinates.
"""
time_start = time.time()
logger.info("Running range and shift...")
logger.info("Assuming r = {} au".format(orbit.cartesian[0, :3]))
logger.info("Assuming v = {} au per day".format(orbit.cartesian[0, 3:]))
# Build observers dictionary: keys are observatory codes with exposure times (as astropy.time objects)
# as values
observers = {}
for code in observations["observatory_code"].unique():
observers[code] = Time(
observations[observations["observatory_code"].isin([code])]["mjd_utc"].unique(),
format="mjd",
scale="utc"
)
# Propagate test orbit to all times in observations
ephemeris = generateEphemeris(
orbit,
observers,
backend=backend,
backend_kwargs=backend_kwargs,
chunk_size=1,
num_jobs=1,
parallel_backend=parallel_backend
)
if backend == "FINDORB":
observer_states = []
for observatory_code, observation_times in observers.items():
observer_states.append(
getObserverState(
[observatory_code],
observation_times,
frame='ecliptic',
origin='heliocenter',
)
)
observer_states = pd.concat(observer_states)
observer_states.reset_index(
inplace=True,
drop=True
)
ephemeris = ephemeris.join(observer_states[["obs_x", "obs_y", "obs_z", "obs_vx", "obs_vy", "obs_vz"]])
velocity_cols = []
if backend != "PYOORB":
velocity_cols = ["obs_vx", "obs_vy", "obs_vz"]
observations = observations.merge(
ephemeris[["mjd_utc", "observatory_code", "obs_x", "obs_y", "obs_z"] + velocity_cols],
left_on=["mjd_utc", "observatory_code"],
right_on=["mjd_utc", "observatory_code"]
)
# Split the observations into a single dataframe per unique observatory code and observation time
# Basically split the observations into groups of unique exposures
observations_grouped = observations.groupby(by=["observatory_code", "mjd_utc"])
observations_split = [observations_grouped.get_group(g) for g in observations_grouped.groups]
# Do the same for the test orbit's ephemerides
ephemeris_grouped = ephemeris.groupby(by=["observatory_code", "mjd_utc"])
ephemeris_split = [ephemeris_grouped.get_group(g) for g in ephemeris_grouped.groups]
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel:
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
rangeAndShift_worker_ray = ray.remote(rangeAndShift_worker)
rangeAndShift_worker_ray = rangeAndShift_worker_ray.options(
num_returns=1,
num_cpus=1
)
p = []
for observations_i, ephemeris_i in zip(observations_split, ephemeris_split):
p.append(
rangeAndShift_worker_ray.remote(
observations_i,
ephemeris_i,
cell_area=cell_area
)
)
projected_dfs = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
projected_dfs = p.starmap(
partial(
rangeAndShift_worker,
cell_area=cell_area
),
zip(
observations_split,
ephemeris_split,
)
)
p.close()
else:
projected_dfs = []
for observations_i, ephemeris_i in zip(observations_split, ephemeris_split):
projected_df = rangeAndShift_worker(
observations_i,
ephemeris_i,
cell_area=cell_area
)
projected_dfs.append(projected_df)
projected_observations = | pd.concat(projected_dfs) | pandas.concat |
import os
import streamlit as st
import pandas as pd
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from datetime import timedelta
import sqlite3
from sqlite3 import Connection
import plotly.express as px
userDir = os.path.expanduser('~')
URI_SQLITE_DB = userDir + '/BirdNET-Pi/scripts/birds.db'
st.set_page_config(layout='wide')
# Remove whitespace from the top of the page and sidebar
st.markdown("""
<style>
.css-18e3th9 {
padding-top: 2.5rem;
padding-bottom: 10rem;
padding-left: 5rem;
padding-right: 5rem;
}
.css-1d391kg {
padding-top: 3.5rem;
padding-right: 1rem;
padding-bottom: 3.5rem;
padding-left: 1rem;
}
</style>
""", unsafe_allow_html=True)
@st.cache(hash_funcs={Connection: id})
def get_connection(path: str):
return sqlite3.connect(path, check_same_thread=False)
def get_data(conn: Connection):
df1 = pd.read_sql("SELECT * FROM detections", con=conn)
return df1
conn = get_connection(URI_SQLITE_DB)
# Read in the cereal data
# df = load_data()
df = get_data(conn)
df2 = df.copy()
df2['DateTime'] = pd.to_datetime(df2['Date'] + " " + df2['Time'])
df2 = df2.set_index('DateTime')
# Filter on date range
# Date as calendars
# Start_Date = pd.to_datetime(st.sidebar.date_input('Which date do you want to start?', value = df2.index.min()))
# End_Date = pd.to_datetime(st.sidebar.date_input('Which date do you want to end?', value = df2.index.max()))
# Date as slider
Start_Date = pd.to_datetime(df2.index.min()).date()
End_Date = pd.to_datetime(df2.index.max()).date()
cols1, cols2 = st.columns((1, 1))
Date_Slider = cols1.slider('Date Range',
min_value=Start_Date - timedelta(days=1),
max_value=End_Date,
value=(Start_Date,
End_Date)
)
filt = (df2.index >= pd.Timestamp(Date_Slider[0])) & (df2.index <= pd.Timestamp(Date_Slider[1] + timedelta(days=1)))
df2 = df2[filt]
st.write('<style>div.row-widget.stRadio > div{flex-direction:row;justify-content: left;} </style>', unsafe_allow_html=True)
st.write('<style>div.st-bf{flex-direction:column;} div.st-ag{font-weight:bold;padding-left:2px;}</style>', unsafe_allow_html=True)
resample_sel = cols2.radio(
'''
Select Resample Resolution - To downsample and make run faster select longer period,
Daily provides a view on detections at 15 min intervals through the day
''',
('1 minute',
'5 minutes',
'10 minutes',
'Hourly',
'Daily'))
resample_times = {'1 minute': '1min',
'5 minutes': '5min',
'10 minutes': '10min',
'Hourly': '1H',
'Daily': '1D'
}
resample_time = resample_times[resample_sel]
df5 = df2.resample(resample_time)['Com_Name'].aggregate('unique').explode()
# Create species count for selected date range
Specie_Count = df5.value_counts()
# Create species treemap
# Create Hourly Crosstab
hourly = | pd.crosstab(df5, df5.index.hour, dropna=False) | pandas.crosstab |
import pandas as pd
import networkx as nx
import numpy as np
from scipy import sparse
import torch, sys
Your_path = '/data/project/yinhuapark/ssl/'
sys.path.append(Your_path+'ssl_make_graphs')
sys.path.append(Your_path+'ssl_graphmodels')
from PairData import PairData
pd.set_option('display.max_columns', None)
import os.path as osp, os
from tqdm import tqdm
def combine_same_word_pair(df, col_name):
dfs = []
for w1, w1_df in df.groupby(by='word1'):
for w2, w2_df in w1_df.groupby(by='word2'):
freq_sum = w2_df['freq'].sum() / len(w2_df)
dfs.append([w2_df['word1'].values[0], w2_df['word2'].values[0], freq_sum])
dfs = | pd.DataFrame(dfs, columns=['word1', 'word2', col_name]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# QTPy: Light-Weight, Pythonic Algorithmic Trading Library
# https://github.com/ranaroussi/qtpylib
#
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os.path
import pandas as pd
import numpy as np
import re
import time
import tempfile
import sys
import requests
from bs4 import BeautifulSoup as bs
from dateutil.parser import parse as parse_date
from qtpylib import tools
# =============================================
# check min, python version
if sys.version_info < (3, 4):
raise SystemError("QTPyLib requires Python version >= 3.4")
# =============================================
import logging
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# =============================================
def create_continuous_contract(df, resolution="1T"):
def _merge_contracts(m1, m2):
if m1 is None:
return m2
try:
# rollver by date
roll_date = m1['expiry'].unique()[-1]
except Exception as e:
# rollover by volume
combined = m1.merge(m2, left_index=True, right_index=True)
m_highest = combined['volume_y'] > combined['volume_x']
if len(m_highest.index) == 0:
return m1 # didn't rolled over yet
roll_date = m_highest[m_highest].index[-1]
return pd.concat([m1[m1.index <= roll_date], m2[m2.index > roll_date]
], sort=True)
def _continuous_contract_flags(daily_df):
# grab expirations
expirations = list(daily_df['expiry'].dropna().unique())
expirations.sort()
# set continuous contract markets
flags = None
for expiration in expirations:
new_contract = daily_df[daily_df['expiry'] == expiration].copy()
flags = _merge_contracts(flags, new_contract)
# add gap
flags['gap'] = 0
for expiration in expirations:
try:
minidf = daily_df[daily_df.index ==
expiration][['symbol', 'expiry', 'diff']]
expiry = flags[
(flags.index > expiration) & (
flags['expiry'] >= expiration)
]['expiry'][0]
gap = minidf[minidf['expiry'] == expiry]['diff'][0]
flags.loc[flags.index <= expiration, 'gap'] = gap
except Exception as e:
pass
flags = flags[flags['symbol'].isin(flags['symbol'].unique())]
# single row df won't resample
if len(flags.index) <= 1:
flags = pd.DataFrame(
index=pd.date_range(start=flags[0:1].index[0],
periods=24, freq="1H"), data=flags[
['symbol', 'expiry', 'gap']]).ffill()
flags['expiry'] = pd.to_datetime(flags['expiry'], utc=True)
return flags[['symbol', 'expiry', 'gap']]
# gonna need this later
df = df.copy()
df['dt'] = df.index
# work with daily data
daily_df = df.groupby('symbol').resample("D").last().dropna(how='all')
daily_df.index = daily_df.index.droplevel()
daily_df.sort_index(inplace=True)
try:
daily_df['diff'] = daily_df['close'].diff()
except Exception as e:
daily_df['diff'] = daily_df['last'].diff()
# build flags
flags = _continuous_contract_flags(daily_df)
# resample back to original
if "K" in resolution or "V" in resolution or "S" in resolution:
flags = flags.resample('S').last().ffill(
).reindex(df.index.unique()).ffill()
else:
flags = flags.resample('T').last().ffill(
).reindex(df.index.unique()).ffill()
flags['dt'] = flags.index
# build contract
contract = pd.merge(df, flags, how='left', on=[
'dt', 'symbol']).ffill()
contract.set_index('dt', inplace=True)
contract = contract[contract.expiry_y == contract.expiry_x]
contract['expiry'] = contract['expiry_y']
contract.drop(['expiry_y', 'expiry_x'], axis=1, inplace=True)
try:
contract['open'] = contract['open'] + contract['gap']
contract['high'] = contract['high'] + contract['gap']
contract['low'] = contract['low'] + contract['gap']
contract['close'] = contract['close'] + contract['gap']
# contract['volume'] = df['volume'].resample("D").sum()
except Exception as e:
contract['last'] = contract['last'] + contract['gap']
contract.drop(['gap'], axis=1, inplace=True)
return contract
# -------------------------------------------
def get_active_contract(symbol, url=None, n=1):
# cell content reader
def read_cells(row):
cells = row.findAll('th') + row.findAll('td')
return [cells[0].text.strip(), '', cells[7].text.strip().replace(',', '')]
def get_contracts(url):
html = requests.get(url, timeout=5)
html = bs(html.text, 'html.parser')
""" CME switched to using ajax """
prodDataUrl = html.text.split('component.baseUrl = "')[1].split(';')[
0].replace('" + ', '').replace(' + "', '').strip('"')
# get data
url = 'https://www.cmegroup.com%s?tradeDate=%s' % (
prodDataUrl, datetime.datetime.now().strftime('%m/%d/%Y'))
data = requests.get(url, timeout=5).json()
if len(data['settlements']) == 1:
url = 'https://www.cmegroup.com%s?tradeDate=%s' % (
prodDataUrl, parse_date(data['updateTime']).strftime('%m/%d/%Y'))
data = requests.get(url, timeout=5).json()
df = pd.DataFrame(data['settlements'])[:-1][['month', 'volume']]
df.columns = ['expiry', 'volume']
df.volume = pd.to_numeric(df.volume.str.replace(',', ''))
df.expiry = df.expiry.str.replace('JLY', 'JUL').apply(
lambda ds: parse_date(ds).strftime('%Y%m'))
# remove duplidates
try:
df = df.reset_index().drop_duplicates(keep='last')
except Exception as e:
df = df.reset_index().drop_duplicates(take_last=True)
return df[:13].dropna()
if url is None:
try:
url = _get_futures_url(symbol, 'quotes_settlements_futures')
except Exception as e:
pass
try:
c = get_contracts(url)
if tools.after_third_friday():
c = c[c.expiry != datetime.datetime.now().strftime('%Y%m')]
# based on volume
if len(c[c.volume > 100].index):
return c.sort_values(by=['volume', 'expiry'], ascending=False)[:n][
'expiry'].values[0]
else:
# based on date
return c[:1]['expiry'].values[0]
except Exception as e:
if tools.after_third_friday():
return (datetime.datetime.now() + (datetime.timedelta(365 / 12) * 2)
).strftime('%Y%m')
else:
return (datetime.datetime.now() + datetime.timedelta(365 / 12)
).strftime('%Y%m')
# -------------------------------------------
def make_tuple(symbol, expiry=None, exchange=None):
if expiry == None:
expiry = get_active_contract(symbol)
contract = get_ib_futures(symbol, exchange)
if contract is not None:
return (contract['symbol'], "FUT", contract['exchange'], contract[
'currency'], expiry, 0.0, "")
return None
# -------------------------------------------
def get_ib_futures(symbol=None, exchange=None, ttl=86400):
cache_file = tempfile.gettempdir() + "/futures_spec.pkl"
if symbol is not None:
symbol = symbol.upper()
def _get(df, symbol=None, exchange=None):
if symbol == "*" or symbol is None:
return df
if exchange is None:
symdf = df[df['symbol'] == symbol]
if len(symdf) > 0:
return symdf.to_dict(orient='records')[0]
else:
symdf = df[df['class'] == symbol]
if len(symdf) > 0:
return symdf.to_dict(orient='records')[0]
symdf = df[(df['exchange'] == exchange) & (df['symbol'] == symbol)]
if len(symdf) == 1:
return symdf.to_dict(orient='records')[0]
else:
symdf = df[(df['exchange'] == exchange) & (df['class'] == symbol)]
if len(symdf) == 1:
return symdf.to_dict(orient='records')[0]
return None
if os.path.exists(cache_file):
if (int(time.time()) - int(os.path.getmtime(cache_file))) < ttl:
df = pd.read_pickle(cache_file)
symdf = _get(df, symbol, exchange)
if symdf is not None:
return symdf
# else...
try:
dfs = pd.read_html(
'https://www.interactivebrokers.ca/en/index.php?f=marginCA&p=fut')
df = pd.concat(dfs, sort=False)
df = df.drop_duplicates(
subset=['Exchange', 'IB Underlying'], keep='first')
df.reset_index(drop=True, inplace=True)
df.columns = ['exchange', 'symbol', 'description', 'class',
'intraday_initial', 'intraday_maintenance',
'overnight_initial', 'overnight_maintenance', 'currency']
df['intraday_initial'] = pd.to_numeric(
df['intraday_initial'], errors='coerce')
df['intraday_maintenance'] = pd.to_numeric(
df['intraday_maintenance'], errors='coerce')
df['overnight_initial'] = pd.to_numeric(
df['overnight_initial'], errors='coerce')
df['overnight_maintenance'] = pd.to_numeric(
df['overnight_maintenance'], errors='coerce')
df['currency'].fillna("USD", inplace=True)
df['overnight_maintenance'].fillna(
df['overnight_initial'], inplace=True)
df['intraday_initial'].fillna(df['overnight_initial'], inplace=True)
df['intraday_maintenance'].fillna(df['intraday_initial'], inplace=True)
except Exception as e:
# fallback - download specs from qtpylib.io
df = | pd.read_csv('https://qtpylib.io/resources/futures_spec.csv.gz') | pandas.read_csv |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from fastai.text import *
from pathlib import Path
import pandas as pd
import numpy as np
import pickle
from .experiment import Labels, label_map
from .ulmfit_experiment import ULMFiTExperiment
import re
from .ulmfit import ULMFiT_SP
from ...pipeline_logger import pipeline_logger
from copy import deepcopy
def load_crf(path):
with open(path, "rb") as f:
return pickle.load(f)
with_letters_re = re.compile(r"(?:^\s*[a-zA-Z])|(?:[a-zA-Z]{2,})")
def cut_ulmfit_head(model):
pooling = PoolingLinearClassifier([1], [])
pooling.layers = model[1].layers[:-2]
return SequentialRNN(model[0], pooling)
# todo: move to TSP
n_ulmfit_features = 50
n_fasttext_features = 0
n_layout_features = 16
n_features = n_ulmfit_features + n_fasttext_features + n_layout_features
n_classes = 5
class TableStructurePredictor(ULMFiT_SP):
step = "structure_prediction"
def __init__(self, path, file, crf_path=None, crf_model=None,
sp_path=None, sp_model="spm.model", sp_vocab="spm.vocab"):
super().__init__(path, file, sp_path, sp_model, sp_vocab)
self._full_learner = deepcopy(self.learner)
self.learner.model = cut_ulmfit_head(self.learner.model)
self.learner.loss_func = None
if crf_model is not None:
crf_path = Path(path) if crf_path is None else Path(crf_path)
self.crf = load_crf(crf_path / crf_model)
else:
self.crf = None
# todo: clean Experiment from older approaches
self._e = ULMFiTExperiment(remove_num=False, drop_duplicates=False,
this_paper=True, merge_fragments=True, merge_type='concat',
evidence_source='text_highlited', split_btags=True, fixed_tokenizer=True,
fixed_this_paper=True, mask=True, evidence_limit=None, context_tokens=None,
lowercase=True, drop_mult=0.15, fp16=True, train_on_easy=False)
def preprocess_df(self, raw_df):
return self._e.transform_df(raw_df)
@staticmethod
def keep_alphacells(df):
# which = df.cell_content.str.contains(with_letters_re)
which = df.cell_content.str.contains(with_letters_re)
return df[which], df[~which]
def df2tl(self, df):
text_cols = ["cell_styles", "cell_layout", "text", "cell_content", "row_context", "col_context",
"cell_reference"]
df = df[text_cols]
return TextList.from_df(df, cols=text_cols)
def get_features(self, evidences, use_crf=True):
if use_crf:
learner = self.learner
else:
learner = self._full_learner
if len(evidences):
tl = self.df2tl(evidences)
learner.data.add_test(tl)
preds, _ = learner.get_preds(DatasetType.Test, ordered=True)
return preds.cpu().numpy()
return np.zeros((0, n_ulmfit_features if use_crf else n_classes))
@staticmethod
def to_tables(df, transpose=False, n_ulmfit_features=n_ulmfit_features):
X_tables = []
Y_tables = []
ids = []
C_tables = []
for table_id, frame in df.groupby("table_id"):
rows, cols = frame.row.max()+1, frame.col.max()+1
x_table = np.zeros((rows, cols, n_features))
###y_table = np.ones((rows, cols), dtype=np.int) * n_classes
c_table = np.full((rows, cols), "", dtype=np.object)
for i, r in frame.iterrows():
x_table[r.row, r.col, :n_ulmfit_features] = r.features
c_table[r.row, r.col] = r.cell_content
#x_table[r.row, r.col, n_ulmfit_features:n_ulmfit_features+n_fasttext_features] = ft_model[r.text]
# if n_fasttext_features > 0:
# x_table[r.row, r.col, n_ulmfit_features:n_ulmfit_features+n_fasttext_features] = ft_model[r.cell_content]
###y_table[r.row, r.col] = r.label
if n_layout_features > 0:
offset = n_ulmfit_features+n_fasttext_features
layout = r.cell_layout
x_table[r.row, r.col, offset] = 1 if 'border-t' in layout or 'border-tt' in layout else -1
x_table[r.row, r.col, offset+1] = 1 if 'border-b' in layout or 'border-bb' in layout else -1
x_table[r.row, r.col, offset+2] = 1 if 'border-l' in layout or 'border-ll' in layout else -1
x_table[r.row, r.col, offset+3] = 1 if 'border-r' in layout or 'border-rr' in layout else -1
x_table[r.row, r.col, offset+4] = 1 if r.cell_reference == "True" else -1
x_table[r.row, r.col, offset+5] = 1 if r.cell_styles == "True" else -1
for span_idx, span in enumerate(["cb", "ci", "ce", "rb", "ri", "re"]):
x_table[r.row, r.col, offset+6+span_idx] = 1 if f'span-{span}' in r.cell_layout else -1
x_table[r.row, r.col, offset+12] = 1 if r.row == 0 else -1
x_table[r.row, r.col, offset+13] = 1 if r.row == rows-1 else -1
x_table[r.row, r.col, offset+14] = 1 if r.col == 0 else -1
x_table[r.row, r.col, offset+15] = 1 if r.col == cols-1 else -1
#x_table[r.row, r.col, -n_fasttext_features:] = ft_model[r.cell_content]
X_tables.append(x_table)
###Y_tables.append(y_table)
C_tables.append(c_table)
ids.append(table_id)
if transpose:
X_tables.append(x_table.transpose((1, 0, 2)))
###Y_tables.append(y_table.transpose())
C_tables.append(c_table.transpose())
ids.append(table_id)
###return (X_tables, Y_tables), C_tables, ids
return X_tables, C_tables, ids
@staticmethod
def merge_with_preds(df, preds):
if not len(df):
return []
ext_id = df.ext_id.str.split("/", expand=True)
return list(zip(ext_id[0] + "/" + ext_id[1], ext_id[2].astype(int), ext_id[3].astype(int),
preds, df.text, df.cell_content, df.cell_layout, df.cell_styles, df.cell_reference, df.label))
@staticmethod
def merge_all_with_preds(df, df_num, preds, use_crf=True):
columns = ["table_id", "row", "col", "features", "text", "cell_content", "cell_layout",
"cell_styles", "cell_reference", "label"]
alpha = TableStructurePredictor.merge_with_preds(df, preds)
nums = TableStructurePredictor.merge_with_preds(df_num, np.zeros((len(df_num), n_ulmfit_features if use_crf else n_classes)))
df1 = pd.DataFrame(alpha, columns=columns)
df2 = pd.DataFrame(nums, columns=columns)
df2.label = n_classes
return df1.append(df2, ignore_index=True)
# todo: fix numeric cells being labelled as meta / other
@staticmethod
def format_predictions(tables_preds, test_ids):
num2label = {v: k for k, v in label_map.items()}
num2label[0] = "table-meta"
num2label[Labels.PAPER_MODEL.value] = 'model-paper'
num2label[Labels.DATASET.value] = 'dataset'
num2label[max(label_map.values()) + 1] = ''
flat = []
for preds, ext_id in zip(tables_preds, test_ids):
paper_id, table_id = ext_id.split("/")
labels = pd.DataFrame(preds).applymap(num2label.get).values
flat.extend(
[(paper_id, table_id, r, c, labels[r, c]) for r in range(len(labels)) for c in range(len(labels[r])) if
labels[r, c]])
return | pd.DataFrame(flat, columns=["paper", "table", "row", "col", "predicted_tags"]) | pandas.DataFrame |
import pandas as pd
import time
# Need to open original file, filter out non class1
phospho_file = input('Enter phospho filepath: (default: Phospho (STY)Sites.txt) ') or 'Phospho (STY)Sites.txt'
PSP_dataset_file = input('Enter PhosphoSite Plus dataset: (default: Phosphorylation_site_dataset.xlsx) ') or 'Phosphorylation_site_dataset.xlsx'
localiation_cutoff = float(input('Enter Localization prob cutoff: (default: .75) ') or .75)
if phospho_file.endswith('.txt'):
phospho_df = pd.read_table(phospho_file, dtype=object)
elif phospho_file.endswith('.xlsx'):
phospho_df = pd.read_excel(phospho_file)
elif phospho_file.endswith('.csv'):
phospho_df = pd.read_csv(phospho_file)
else:
raise Exception('Please use tab-delimited (.txt), .xlsx or .csv')
if PSP_dataset_file.endswith('.txt'):
PSP_df = pd.read_table(PSP_dataset_file, dtype=object)
elif PSP_dataset_file.endswith('.xlsx'):
PSP_df = pd.read_excel(PSP_dataset_file)
elif PSP_dataset_file.endswith('.csv'):
PSP_df = | pd.read_csv(PSP_dataset_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
import numpy as np
import pandas as pd
import pytest
from tsfresh.feature_selection.selection import select_features
class TestSelectFeatures:
def test_assert_list(self):
with pytest.raises(AssertionError):
select_features(pd.DataFrame(index=range(2)), [1, 2, 3])
def test_assert_one_row_X(self):
X = pd.DataFrame([1], index=[1])
y = pd.Series([1], index=[1])
with pytest.raises(AssertionError):
select_features(X, y)
def test_assert_one_label_y(self):
X = | pd.DataFrame([10, 10], index=[1, 2]) | pandas.DataFrame |
import torch
import torch.nn.functional as F
import os
import wandb
import pandas as pd
import numpy as np
from dataloader.dataloader import data_generator, few_shot_data_generator, generator_percentage_of_data
from configs.data_model_configs import get_dataset_class
from configs.hparams import get_hparams_class
from configs.sweep_params import sweep_alg_hparams
from utils import fix_randomness, copy_Files, starting_logs, save_checkpoint, _calc_metrics
from utils import calc_dev_risk, calculate_risk
import warnings
import sklearn.exceptions
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
import collections
from algorithms.algorithms import get_algorithm_class
from models.models import get_backbone_class
from utils import AverageMeter
torch.backends.cudnn.benchmark = True # to fasten TCN
class cross_domain_trainer(object):
"""
This class contain the main training functions for our AdAtime
"""
def __init__(self, args):
self.da_method = args.da_method # Selected DA Method
self.dataset = args.dataset # Selected Dataset
self.backbone = args.backbone
self.device = torch.device(args.device) # device
self.num_sweeps = args.num_sweeps
# Exp Description
self.run_description = args.run_description
self.experiment_description = args.experiment_description
# sweep parameters
self.is_sweep = args.is_sweep
self.sweep_project_wandb = args.sweep_project_wandb
self.wandb_entity = args.wandb_entity
self.hp_search_strategy = args.hp_search_strategy
self.metric_to_minimize = args.metric_to_minimize
# paths
self.home_path = os.getcwd()
self.save_dir = args.save_dir
self.data_path = os.path.join(args.data_path, self.dataset)
self.create_save_dir()
# Specify runs
self.num_runs = args.num_runs
# get dataset and base model configs
self.dataset_configs, self.hparams_class = self.get_configs()
# to fix dimension of features in classifier and discriminator networks.
self.dataset_configs.final_out_channels = self.dataset_configs.tcn_final_out_channles if args.backbone == "TCN" else self.dataset_configs.final_out_channels
# Specify number of hparams
self.default_hparams = {**self.hparams_class.alg_hparams[self.da_method],
**self.hparams_class.train_params}
def sweep(self):
# sweep configurations
sweep_runs_count = self.num_sweeps
sweep_config = {
'method': self.hp_search_strategy,
'metric': {'name': self.metric_to_minimize, 'goal': 'minimize'},
'name': self.da_method,
'parameters': {**sweep_alg_hparams[self.da_method]}
}
sweep_id = wandb.sweep(sweep_config, project=self.sweep_project_wandb, entity=self.wandb_entity)
wandb.agent(sweep_id, self.train, count=sweep_runs_count) # Training with sweep
# resuming sweep
# wandb.agent('8wkaibgr', self.train, count=25,project='HHAR_SA_Resnet', entity= 'iclr_rebuttal' )
def train(self):
if self.is_sweep:
wandb.init(config=self.default_hparams)
run_name = f"sweep_{self.dataset}"
else:
run_name = f"{self.run_description}"
wandb.init(config=self.default_hparams, mode="online", name=run_name)
self.hparams = wandb.config
# Logging
self.exp_log_dir = os.path.join(self.save_dir, self.experiment_description, run_name)
os.makedirs(self.exp_log_dir, exist_ok=True)
copy_Files(self.exp_log_dir) # save a copy of training files:
scenarios = self.dataset_configs.scenarios # return the scenarios given a specific dataset.
self.metrics = {'accuracy': [], 'f1_score': [], 'src_risk': [], 'few_shot_trg_risk': [],
'trg_risk': [], 'dev_risk': []}
for i in scenarios:
src_id = i[0]
trg_id = i[1]
for run_id in range(self.num_runs): # specify number of consecutive runs
# fixing random seed
fix_randomness(run_id)
# Logging
self.logger, self.scenario_log_dir = starting_logs(self.dataset, self.da_method, self.exp_log_dir,
src_id, trg_id, run_id)
# Load data
self.load_data(src_id, trg_id)
# get algorithm
algorithm_class = get_algorithm_class(self.da_method)
backbone_fe = get_backbone_class(self.backbone)
algorithm = algorithm_class(backbone_fe, self.dataset_configs, self.hparams, self.device)
algorithm.to(self.device)
# Average meters
loss_avg_meters = collections.defaultdict(lambda: AverageMeter())
# training..
for epoch in range(1, self.hparams["num_epochs"] + 1):
joint_loaders = enumerate(zip(self.src_train_dl, self.trg_train_dl))
len_dataloader = min(len(self.src_train_dl), len(self.trg_train_dl))
algorithm.train()
for step, ((src_x, src_y), (trg_x, _)) in joint_loaders:
src_x, src_y, trg_x = src_x.float().to(self.device), src_y.long().to(self.device), \
trg_x.float().to(self.device)
if self.da_method == "DANN" or self.da_method == "CoDATS":
losses = algorithm.update(src_x, src_y, trg_x, step, epoch, len_dataloader)
else:
losses = algorithm.update(src_x, src_y, trg_x)
for key, val in losses.items():
loss_avg_meters[key].update(val, src_x.size(0))
# logging
self.logger.debug(f'[Epoch : {epoch}/{self.hparams["num_epochs"]}]')
for key, val in loss_avg_meters.items():
self.logger.debug(f'{key}\t: {val.avg:2.4f}')
self.logger.debug(f'-------------------------------------')
self.algorithm = algorithm
save_checkpoint(self.home_path, self.algorithm, scenarios, self.dataset_configs,
self.scenario_log_dir, self.hparams)
self.evaluate()
self.calc_results_per_run()
# logging metrics
self.calc_overall_results()
average_metrics = {metric: np.mean(value) for (metric, value) in self.metrics.items()}
wandb.log(average_metrics)
wandb.log({'hparams': wandb.Table(
dataframe=pd.DataFrame(dict(self.hparams).items(), columns=['parameter', 'value']),
allow_mixed_types=True)})
wandb.log({'avg_results': wandb.Table(dataframe=self.averages_results_df, allow_mixed_types=True)})
wandb.log({'std_results': wandb.Table(dataframe=self.std_results_df, allow_mixed_types=True)})
def evaluate(self):
feature_extractor = self.algorithm.feature_extractor.to(self.device)
classifier = self.algorithm.classifier.to(self.device)
feature_extractor.eval()
classifier.eval()
total_loss_ = []
self.trg_pred_labels = np.array([])
self.trg_true_labels = np.array([])
with torch.no_grad():
for data, labels in self.trg_test_dl:
data = data.float().to(self.device)
labels = labels.view((-1)).long().to(self.device)
# forward pass
features = feature_extractor(data)
predictions = classifier(features)
# compute loss
loss = F.cross_entropy(predictions, labels)
total_loss_.append(loss.item())
pred = predictions.detach().argmax(dim=1) # get the index of the max log-probability
self.trg_pred_labels = np.append(self.trg_pred_labels, pred.cpu().numpy())
self.trg_true_labels = np.append(self.trg_true_labels, labels.data.cpu().numpy())
self.trg_loss = torch.tensor(total_loss_).mean() # average loss
def get_configs(self):
dataset_class = get_dataset_class(self.dataset)
hparams_class = get_hparams_class(self.dataset)
return dataset_class(), hparams_class()
def load_data(self, src_id, trg_id):
self.src_train_dl, self.src_test_dl = data_generator(self.data_path, src_id, self.dataset_configs,
self.hparams)
self.trg_train_dl, self.trg_test_dl = data_generator(self.data_path, trg_id, self.dataset_configs,
self.hparams)
self.few_shot_dl = few_shot_data_generator(self.trg_test_dl)
# self.src_train_dl = generator_percentage_of_data(self.src_train_dl_)
# self.trg_train_dl = generator_percentage_of_data(self.trg_train_dl_)
def create_save_dir(self):
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
def calc_results_per_run(self):
'''
Calculates the acc, f1 and risk values for each cross-domain scenario
'''
self.acc, self.f1 = _calc_metrics(self.trg_pred_labels, self.trg_true_labels, self.scenario_log_dir,
self.home_path,
self.dataset_configs.class_names)
if self.is_sweep:
self.src_risk = calculate_risk(self.algorithm, self.src_test_dl, self.device)
self.trg_risk = calculate_risk(self.algorithm, self.trg_test_dl, self.device)
self.few_shot_trg_risk = calculate_risk(self.algorithm, self.few_shot_dl, self.device)
self.dev_risk = calc_dev_risk(self.algorithm, self.src_train_dl, self.trg_train_dl, self.src_test_dl,
self.dataset_configs, self.device)
run_metrics = {'accuracy': self.acc,
'f1_score': self.f1,
'src_risk': self.src_risk,
'few_shot_trg_risk': self.few_shot_trg_risk,
'trg_risk': self.trg_risk,
'dev_risk': self.dev_risk}
df = pd.DataFrame(columns=["acc", "f1", "src_risk", "few_shot_trg_risk", "trg_risk", "dev_risk"])
df.loc[0] = [self.acc, self.f1, self.src_risk, self.few_shot_trg_risk, self.trg_risk,
self.dev_risk]
else:
run_metrics = {'accuracy': self.acc, 'f1_score': self.f1}
df = pd.DataFrame(columns=["acc", "f1"])
df.loc[0] = [self.acc, self.f1]
for (key, val) in run_metrics.items(): self.metrics[key].append(val)
scores_save_path = os.path.join(self.home_path, self.scenario_log_dir, "scores.xlsx")
df.to_excel(scores_save_path, index=False)
self.results_df = df
def calc_overall_results(self):
exp = self.exp_log_dir
# for exp in experiments:
if self.is_sweep:
results = pd.DataFrame(
columns=["scenario", "acc", "f1", "src_risk", "few_shot_trg_risk", "trg_risk", "dev_risk"])
else:
results = | pd.DataFrame(columns=["scenario", "acc", "f1"]) | pandas.DataFrame |
import pandas as pd
import gdal
import numpy as np
import os
import rasterio
import tqdm
class TrainingData:
"""Prepares training datasets using a raster stack, species occurrences and a set of band means and standard
deviations.
:param self: a class instance of TrainingData
:param oh: an Occurrence object: holds occurrence files and tables
:param gh: a GIS object: holds path and file names required for computation of gis data
:param verbose: a boolean: prints a progress bar if True, silent if False
:return: Object. Used to create a series of .csv files (one for each species detected by the Occurrences object)
containing the input data to the trainer, executed by calling class method create_training_df on TrainingData
object.
"""
def __init__(self, oh, gh, verbose):
self.oh = oh
self.gh = gh
self.verbose = verbose
def prep_training_df(self, src, inras, spec):
"""Loads array from raster stack, locations from species occurrences and band statistics.
:param self: a class instance of TrainingData
:param src: rasterio source object for raster stack.
:param inras: gdal source object for raster stack.
:param spec: string containing the species name for which the data will be loaded.
:return: Tuple. Containing:
string 'spec' that contains the species name for which the files are loaded and returned;
list 'ppa' contains the status for each loaded occurrence (0 for absence, 1 for presence) for the specified
species;
list 'long' and 'lati' contain the longitude and latitude for each occurrence from a specified species;
list 'row' and 'col' contain the values from the previous 'long' and 'lati' columns converted from WGS84 to
image coordinates;
matrix 'myarray' is an multi-dimensional representation of the raster stack;
table 'mean_std' is an table containing the mean and standard deviation for each of the scaled raster layers
"""
data = pd.read_csv(self.gh.spec_ppa + '/%s_ppa_dataframe.csv' % spec)
spec = spec.replace(" ", "_")
len_pd = np.arange(len(data))
# dictionary keys are used to query table files that are generated by the package.
long = data["dLon"]
lati = data["dLat"]
ppa = data["present/pseudo_absent"]
lon = long.values
lat = lati.values
row = []
col = []
for i in len_pd:
row_n, col_n = src.index(lon[i], lat[i])
row.append(row_n)
col.append(col_n)
myarray = inras.ReadAsArray()
mean_std = pd.read_csv(self.gh.gis + '/env_bio_mean_std.txt', sep="\t")
mean_std = mean_std.to_numpy()
return spec, ppa, long, lati, row, col, myarray, mean_std
def create_training_df(self):
"""Create training dataset by extracting all environmental variables for each occurrence location for a set of
species.
:param self: a class instance of TrainingData
:return: None. Does not return value or object, instead writes the computed training dataset to file for each
species detected by the Occurrence object (oh).
"""
src = rasterio.open(self.gh.stack + '/stacked_env_variables.tif')
inRas = gdal.Open(self.gh.stack + '/stacked_env_variables.tif')
for i in tqdm.tqdm(self.oh.name, desc='Creating training data' + (28 * ' '), leave=True) if self.verbose else self.oh.name:
spec, ppa, long, lati, row, col, myarray, mean_std = self.prep_training_df(src, inRas, i)
X = []
for j in range(0, self.gh.length):
band = myarray[j]
x = []
for i in range(0, len(row)):
value = band[row[i], col[i]]
if j < self.gh.scaled_len:
if value < -1000:
value = np.nan
else:
value = ((value - mean_std.item((j, 1))) / mean_std.item((j, 2)))
x.append(value)
if j >= self.gh.scaled_len:
if value < -1000:
value = np.nan
else:
value = value
x.append(value)
X.append(x)
X = np.array([np.array(xi) for xi in X])
df = | pd.DataFrame(X) | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 20 10:24:34 2019
@author: labadmin
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 21:05:32 2019
@author: Hassan
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier as GBC
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import BorderlineSMOTE
from imblearn.over_sampling import SMOTENC
data_ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset1.csv",skiprows=4)
data_ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset2.csv",skiprows=4)
data_ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset3.csv",skiprows=4)
data_ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset4.csv",skiprows=4)
data_ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset5.csv",skiprows=4)
data_ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset6.csv",skiprows=4)
data_ben7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset7.csv",skiprows=4)
frames_ben1 = [data_ben1,data_ben2,data_ben3,data_ben4,data_ben5,data_ben6,data_ben7]
result_ben1 = pd.concat(frames_ben1)
result_ben1.index=range(3360)
df_ben1 = pd.DataFrame({'label': [1]},index=range(0,3360))
dat_ben1=pd.concat([result_ben1,df_ben1],axis=1)
#-------------------------------------------------------------------------------------------------
data__ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset1.csv",skiprows=4)
data__ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset2.csv",skiprows=4)
data__ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset3.csv",skiprows=4)
data__ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset4.csv",skiprows=4)
data__ben4=data__ben4['# Columns: time'].str.split(expand=True)
data__ben4.columns=['# Columns: time','avg_rss12','var_rss12','avg_rss13','var_rss13','avg_rss23','var_rss23']
data__ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset5.csv",skiprows=4)
data__ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset6.csv",skiprows=4)
frames_ben2 = [data__ben1,data__ben2,data__ben3,data__ben4,data__ben5,data__ben6]
result_ben2 = pd.concat(frames_ben2)
result_ben2.index=range(2880)
df_ben2 = pd.DataFrame({'label': [2]},index=range(0,2880))
dat__ben2=pd.concat([result_ben2,df_ben2],axis=1)
#-----------------------------------------------------------------------------------------------------
data_cyc1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset1.csv",skiprows=4)
data_cyc2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset2.csv",skiprows=4)
data_cyc3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset3.csv",skiprows=4)
data_cyc4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset4.csv",skiprows=4)
data_cyc5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset5.csv",skiprows=4)
data_cyc6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset6.csv",skiprows=4)
data_cyc7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset7.csv",skiprows=4)
data_cyc8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset8.csv",skiprows=4)
data_cyc9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset99.csv",skiprows=4)
data_cyc10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset10.csv",skiprows=4)
data_cyc11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset11.csv",skiprows=4)
data_cyc12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset12.csv",skiprows=4)
data_cyc13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset13.csv",skiprows=4)
data_cyc14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset144.csv",skiprows=4)
data_cyc15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset15.csv",skiprows=4)
frames_cyc = [data_cyc1,data_cyc2,data_cyc3,data_cyc4,data_cyc5,data_cyc6,data_cyc7,data_cyc8,data_cyc9,data_cyc10,data_cyc11,data_cyc12,data_cyc13,data_cyc14,data_cyc15]
result_cyc = pd.concat(frames_cyc)
result_cyc.index=range(7200)
df_cyc = pd.DataFrame({'label': [3]},index=range(0,7200))
data_cyc=pd.concat([result_cyc,df_cyc],axis=1)
#----------------------------------------------------------------------------------------------
data_ly1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset1.csv",skiprows=4)
data_ly2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset2.csv",skiprows=4)
data_ly3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset3.csv",skiprows=4)
data_ly4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset4.csv",skiprows=4)
data_ly5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset5.csv",skiprows=4)
data_ly6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset6.csv",skiprows=4)
data_ly7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset7.csv",skiprows=4)
data_ly8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset8.csv",skiprows=4)
data_ly9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset9.csv",skiprows=4)
data_ly10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset10.csv",skiprows=4)
data_ly11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset11.csv",skiprows=4)
data_ly12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset12.csv",skiprows=4)
data_ly13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset13.csv",skiprows=4)
data_ly14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset14.csv",skiprows=4)
data_ly15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset15.csv",skiprows=4)
frames_ly = [data_ly1,data_ly2,data_ly3,data_ly4,data_ly5,data_ly6,data_ly7,data_ly8,data_ly9,data_ly10,data_ly11,data_ly12,data_ly13,data_ly14,data_ly15]
result_ly = pd.concat(frames_ly)
result_ly.index=range(7200)
df_ly = pd.DataFrame({'label': [4]},index=range(0,7200))
data_ly=pd.concat([result_ly,df_ly],axis=1)
#-------------------------------------------------------------------------------------------------
data_sit1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset1.csv",skiprows=4)
data_sit2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset2.csv",skiprows=4)
data_sit3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset3.csv",skiprows=4)
data_sit4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset4.csv",skiprows=4)
data_sit5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset5.csv",skiprows=4)
data_sit6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset6.csv",skiprows=4)
data_sit7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset7.csv",skiprows=4)
data_sit8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset8.csv",skiprows=4)
data_sit9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset9.csv",skiprows=4)
data_sit10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset10.csv",skiprows=4)
data_sit11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset11.csv",skiprows=4)
data_sit12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset12.csv",skiprows=4)
data_sit13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset13.csv",skiprows=4)
data_sit14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset14.csv",skiprows=4)
data_sit15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\dataset15.csv",skiprows=4)
frames_sit= [data_sit1,data_sit2,data_sit3,data_sit4,data_sit5,data_sit6,data_sit7,data_sit8,data_sit9,data_sit10,data_sit11,data_sit12,data_sit13,data_sit14,data_sit15]
result_sit = pd.concat(frames_sit)
result_sit.index=range(7199)
df_sit= pd.DataFrame({'label': [5]},index=range(0,7199))
data_sit=pd.concat([result_sit,df_sit],axis=1)
#----------------------------------------------------------------------------------------------------
data_sta1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset1.csv",skiprows=4)
data_sta2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset2.csv",skiprows=4)
data_sta3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset3.csv",skiprows=4)
data_sta4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset4.csv",skiprows=4)
data_sta5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset5.csv",skiprows=4)
data_sta6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset6.csv",skiprows=4)
data_sta7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset7.csv",skiprows=4)
data_sta8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset8.csv",skiprows=4)
data_sta9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset9.csv",skiprows=4)
data_sta10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset10.csv",skiprows=4)
data_sta11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset11.csv",skiprows=4)
data_sta12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset12.csv",skiprows=4)
data_sta13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset13.csv",skiprows=4)
data_sta14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset14.csv",skiprows=4)
data_sta15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\dataset15.csv",skiprows=4)
frames_sta= [data_sta1,data_sta2,data_sta3,data_sta4,data_sta5,data_sta6,data_sta7,data_sta8,data_sta9,data_sta10,data_sta11,data_sta12,data_sta13,data_sta14,data_sta15]
result_sta = pd.concat(frames_sta)
result_sta.index=range(7200)
df_sta= pd.DataFrame({'label': [6]},index=range(0,7200))
data_sta=pd.concat([result_sta,df_sta],axis=1)
#---------------------------------------------------------------------------------------------------------------
data_wa1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset1.csv",skiprows=4)
data_wa2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset2.csv",skiprows=4)
data_wa3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset3.csv",skiprows=4)
data_wa4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset4.csv",skiprows=4)
data_wa5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset5.csv",skiprows=4)
data_wa6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset6.csv",skiprows=4)
data_wa7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset7.csv",skiprows=4)
data_wa8= | pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\walking\\dataset8.csv",skiprows=4) | pandas.read_csv |
import pandas as pd
from plaster.tools.zplots.zplots import ZPlots
from plaster.tools.plots import plots
from plaster.tools.plots import plots_dev
from plaster.tools.ipynb_helpers.displays import hd
from plaster.tools.utils.utils import json_print, munch_abbreviation_string
from IPython.display import display # for display of DataFrames
def plot_best_runs_peptide_yield(best_pr, run_info, filters, **kwargs):
"""
For each run, indicate how many peptides it was the 'best run' for based on
filter criteria.
"""
total_peps = len(best_pr.pep_i.unique())
fracs_by_run = run_info.pep_counts / total_peps
classifier_name = filters.get("classifier", "").upper()
title = f'"Best PR" peptide-yield for runs that produced a best peptide-pr ({classifier_name})'
y_label = "fraction of total peptides"
x_range = run_info.run_labels
z = ZPlots.zplot_singleton
z.cols(
fracs_by_run,
x=x_range,
f_x_range=x_range,
_x_axis_label_orientation=1.2,
f_title=title,
f_y_axis_label=y_label,
f_x_axis_label="run name",
_label=run_info.pep_counts,
_size_x=1000,
)
def plot_best_runs_peptide_observability(
job, best_pr, run_info, all_runs_pr, filters, **kwargs
):
"""
peptide observability-vs-precision considering the best runs for each peptide
as if they are one big super-run -- how well can we see peptides vs precision
if the best run (as defined by filters) is chosen for each peptide?
"""
z = ZPlots.zplot_singleton
z.color_reset()
classifier_name = filters.get("classifier", "").upper()
with z(
_merge=True,
f_title=f"Peptide-Classes Precision/Recall (best {filters.plot_n_runs} + combined-best runs) ({classifier_name})",
f_y_axis_label="precision",
f_x_axis_label="peptide-classes recall",
):
best_runs_full_pr = []
for i, (run_i, n_pep, peps) in enumerate(
zip(run_info.run_iz, run_info.pep_counts, run_info.peps)
):
best_runs_full_pr += [
all_runs_pr[
(all_runs_pr.run_i == run_i) & (all_runs_pr.pep_i.isin(peps))
]
]
run = job.runs[run_i]
if i < filters.plot_n_runs:
label = f"{plots_dev._run_labels(run.run_name)} ({n_pep})"
plots.plot_peptide_observability_vs_precision(
run,
pep_iz=filters.peptide_subset,
color=z.next(),
pr_axes=True,
_label=label,
legend_label=label,
_legend="top_right",
_range=(0, 1.05, 0, 1.05),
)
best_full_pr = pd.concat(best_runs_full_pr)
plots._plot_peptide_observability_vs_precision(
best_full_pr,
color=z.next(),
_label="combined",
legend_label="combined (filters)",
**kwargs,
)
def plot_pr_scatter_peps_runs(peps_runs_df, run_info, **kwargs):
"""
Single plot of best PR for run_i+pep_i pairs given in peps_runs_df
peps_runs_df: a df containing run_i,run_name,pep_i,prec, and recall
run_info: a Munch containing run_iz,run_labels,pep_counts,and peps per run, sorted
"""
df = peps_runs_df.copy()
df["label"] = df.apply(
lambda x: f"{x.pep_i:03d} {x.seqstr} {x.flustr} ({x.flu_count})", axis=1
)
z = ZPlots.zplot_singleton
n_peps = len(peps_runs_df.pep_i.unique())
title = kwargs.get(
"f_title", f"{n_peps} peptides, best precision for recall-filter"
)
z.color_reset()
with z(
_merge=True,
f_y_axis_label="precision",
f_x_axis_label="read recall",
f_title=title,
_legend="bottom_right",
_range=(0, 1.05, 0, 1.05),
):
groups = df.groupby("run_i")
for run_i, run_label, pep_count in zip(
run_info.run_iz, run_info.run_labels, run_info.pep_counts
):
try:
group = groups.get_group(run_i)
except KeyError:
continue # the run has no entries in the DF, that's ok.
legend = f"{run_label} ({pep_count})"
z.scat(
source=group,
y="prec",
x="recall",
_label="label",
fill_alpha=0.8,
color=z.next(),
legend_label=legend,
)
def plot_best_runs_scatter(best_pr, run_info, filters, **kwargs):
runs_to_plot = run_info.run_iz[: filters.plot_n_runs]
n_peps = len(best_pr.pep_i.unique())
plotted_pr = best_pr[best_pr.run_i.isin(runs_to_plot)]
n_plotted_peps = len(plotted_pr.pep_i.unique())
title = f"{n_plotted_peps} of {n_peps} peptides, best precision for min_recall={filters.min_recall} {filters.classifier}"
plot_pr_scatter_peps_runs(plotted_pr, run_info, f_title=title, **kwargs)
def plot_best_runs_pr(best_pr, all_pr, run_info, filters, **kwargs):
df = best_pr.sort_values(by=["prec", "recall"], ascending=[False, False])[
: filters.plot_n_peps
]
z = ZPlots.zplot_singleton
z.color_reset()
title = f"PR curves, best {len(df.pep_i.unique())} peptides, best runs. {filters.classifier} "
run_i_to_info = {
run_i: (run_label, z.next())
for run_i, run_label in zip(run_info.run_iz, run_info.run_labels)
}
with z(
f_title=title,
_merge=True,
_legend="bottom_right",
f_y_axis_label="precision",
f_x_axis_label="read recall",
):
for i, row in df.iterrows():
run_i = row.run_i
pep_i = row.pep_i
legend_label = f"{run_i_to_info[run_i][0]} p{pep_i}"
line_label = f"{row.pep_i:03d} {row.seqstr} {row.flustr} ({row.flu_count})"
color = run_i_to_info[run_i][1]
prdf = all_pr[(all_pr.run_i == run_i) & (all_pr.pep_i == pep_i)]
prsa = (prdf.prec.values, prdf.recall.values, prdf.score.values, None)
plots._plot_pr_curve(
prsa,
color=color,
legend_label=legend_label,
_label=line_label,
**kwargs,
)
def show_best_runs_df(best_pr, filters, save_csv=True):
hd(
"h3",
f"Top 50 precisions at min_recall={filters.min_recall} {filters.classifier}",
)
if save_csv:
csv_filename = f"./report_best_pr__{munch_abbreviation_string(filters)}.csv"
best_pr.to_csv(csv_filename, index=False, float_format="%g")
print(f"(All {len(best_pr)} rows exported to {csv_filename})")
| pd.set_option("display.max_columns", None) | pandas.set_option |
# -*- coding: utf-8 -*-
from datetime import timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range, timedelta_range
)
from pandas.errors import NullFrequencyError
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(params=['B', 'D'])
def freq(request):
return request.param
class TestTimedeltaIndexArithmetic(object):
# Addition and Subtraction Operations
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_tdi_shift_int(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None)
with pytest.raises(NullFrequencyError):
tdi.shift(2)
# -------------------------------------------------------------
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and integer
def test_tdi_add_int(self, one):
# Variants of `one` for #19012
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + one
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_iadd_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
rng += one
tm.assert_index_equal(rng, expected)
def test_tdi_sub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - one
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_isub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_add_integer_array(self, box):
# GH#19959
rng = timedelta_range('1 days 09:00:00', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['1 day 13:00:00'] * 3)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_sub_integer_array(self, box):
# GH#19959
rng = timedelta_range('9H', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['5H', '7H', '9H'])
result = rng - other
tm.assert_index_equal(result, expected)
result = other - rng
tm.assert_index_equal(result, -expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_addsub_integer_array_no_freq(self, box):
# GH#19959
tdi = TimedeltaIndex(['1 Day', 'NaT', '3 Hours'])
other = box([14, -1, 16])
with pytest.raises(NullFrequencyError):
tdi + other
with pytest.raises(NullFrequencyError):
other + tdi
with pytest.raises(NullFrequencyError):
tdi - other
with pytest.raises(NullFrequencyError):
other - tdi
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
# Note: add and sub are tested in tests.test_arithmetic
def test_tdi_iadd_timedeltalike(self, delta):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng += delta
tm.assert_index_equal(rng, expected)
def test_tdi_isub_timedeltalike(self, delta):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
pytest.raises(TypeError, lambda: tdi - dt)
pytest.raises(TypeError, lambda: tdi - dti)
pytest.raises(TypeError, lambda: td - dt)
pytest.raises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
pytest.raises(TypeError, lambda: dt_tz - ts)
pytest.raises(TypeError, lambda: dt_tz - dt)
pytest.raises(TypeError, lambda: dt_tz - ts_tz2)
pytest.raises(TypeError, lambda: dt - dt_tz)
pytest.raises(TypeError, lambda: ts - dt_tz)
pytest.raises(TypeError, lambda: ts_tz2 - ts)
pytest.raises(TypeError, lambda: ts_tz2 - dt)
pytest.raises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
pytest.raises(TypeError, lambda: dti - ts_tz)
pytest.raises(TypeError, lambda: dti_tz - ts)
pytest.raises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
assert result == expected
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = | pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) | pandas.date_range |
from copy import copy
import dask
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pandas.testing as tm
import pytest
import sklearn.preprocessing as spp
from dask import compute
from dask.array.utils import assert_eq as assert_eq_ar
from dask.dataframe.utils import assert_eq as assert_eq_df
from pandas.api.types import is_categorical_dtype, is_object_dtype
from sklearn.exceptions import NotFittedError
import dask_ml.preprocessing as dpp
from dask_ml.datasets import make_classification
from dask_ml.utils import assert_estimator_equal
X, y = make_classification(chunks=50)
df = X.to_dask_dataframe().rename(columns=str)
df2 = dd.from_pandas(pd.DataFrame(5 * [range(42)]).T.rename(columns=str), npartitions=5)
raw = pd.DataFrame(
{
"A": ["a", "b", "c", "a"],
"B": ["a", "b", "c", "a"],
"C": ["a", "b", "c", "a"],
"D": [1, 2, 3, 4],
},
columns=["A", "B", "C", "D"],
)
dummy = pd.DataFrame(
{
"A": pd.Categorical(["a", "b", "c", "a"], ordered=True),
"B": pd.Categorical(["a", "b", "c", "a"], ordered=False),
"C": pd.Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]),
"D": [1, 2, 3, 4],
},
columns=["A", "B", "C", "D"],
)
@pytest.fixture
def pandas_df():
return pd.DataFrame(5 * [range(42)]).T.rename(columns=str)
@pytest.fixture
def dask_df(pandas_df):
return dd.from_pandas(pandas_df, npartitions=5)
class TestStandardScaler:
def test_basic(self):
a = dpp.StandardScaler()
b = spp.StandardScaler()
a.fit(X)
b.fit(X.compute())
assert_estimator_equal(a, b, exclude="n_samples_seen_")
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.DataConversionWarning")
def test_input_types(self, dask_df, pandas_df):
a = dpp.StandardScaler()
b = spp.StandardScaler()
assert_estimator_equal(
a.fit(dask_df.values),
a.fit(dask_df),
)
assert_estimator_equal(
a.fit(dask_df),
b.fit(pandas_df),
exclude="n_samples_seen_",
)
assert_estimator_equal(
a.fit(dask_df.values),
b.fit(pandas_df),
exclude={"n_samples_seen_", "feature_names_in_"},
)
assert_estimator_equal(
a.fit(dask_df),
b.fit(pandas_df.values),
exclude={"n_samples_seen_", "feature_names_in_"},
)
assert_estimator_equal(
a.fit(dask_df.values),
b.fit(pandas_df.values),
exclude="n_samples_seen_",
)
def test_inverse_transform(self):
a = dpp.StandardScaler()
result = a.inverse_transform(a.fit_transform(X))
assert dask.is_dask_collection(result)
assert_eq_ar(result, X)
def test_nan(self, pandas_df):
pandas_df = pandas_df.copy()
pandas_df.iloc[0] = np.nan
dask_nan_df = dd.from_pandas(pandas_df, npartitions=5)
a = dpp.StandardScaler()
a.fit(dask_nan_df.values)
assert np.isnan(a.mean_).sum() == 0
assert np.isnan(a.var_).sum() == 0
class TestMinMaxScaler:
def test_basic(self):
a = dpp.MinMaxScaler()
b = spp.MinMaxScaler()
a.fit(X)
b.fit(X.compute())
assert_estimator_equal(a, b)
def test_inverse_transform(self):
a = dpp.MinMaxScaler()
result = a.inverse_transform(a.fit_transform(X))
assert dask.is_dask_collection(result)
assert_eq_ar(result, X)
@pytest.mark.xfail(reason="removed columns")
def test_df_inverse_transform(self):
mask = ["3", "4"]
a = dpp.MinMaxScaler(columns=mask)
result = a.inverse_transform(a.fit_transform(df2))
assert dask.is_dask_collection(result)
assert_eq_df(result, df2)
def test_df_values(self):
est1 = dpp.MinMaxScaler()
est2 = dpp.MinMaxScaler()
result_ar = est1.fit_transform(X)
result_df = est2.fit_transform(df)
for attr in ["data_min_", "data_max_", "data_range_", "scale_", "min_"]:
assert_eq_ar(getattr(est1, attr), getattr(est2, attr).values)
assert_eq_ar(est1.transform(X), est2.transform(df).values)
if hasattr(result_df, "values"):
result_df = result_df.values
assert_eq_ar(result_ar, result_df)
@pytest.mark.xfail(reason="removed columns")
def test_df_column_slice(self):
mask = ["3", "4"]
mask_ix = [mask.index(x) for x in mask]
a = dpp.MinMaxScaler(columns=mask)
b = spp.MinMaxScaler()
dfa = a.fit_transform(df2)
mxb = b.fit_transform(df2.compute())
assert isinstance(dfa, dd.DataFrame)
assert_eq_ar(dfa[mask].values, mxb[:, mask_ix])
assert_eq_df(dfa.drop(mask, axis=1), df2.drop(mask, axis=1))
class TestRobustScaler:
def test_fit(self):
a = dpp.RobustScaler()
b = spp.RobustScaler()
# bigger data to make percentile more reliable
# and not centered around 0 to make rtol work
X, y = make_classification(n_samples=1000, chunks=200, random_state=0)
X = X + 3
a.fit(X)
b.fit(X.compute())
assert_estimator_equal(a, b, rtol=0.2)
def test_transform(self):
a = dpp.RobustScaler()
b = spp.RobustScaler()
a.fit(X)
b.fit(X.compute())
# overwriting dask-ml's fitted attributes to have them exactly equal
# (the approximate equality is tested above)
a.scale_ = b.scale_
a.center_ = b.center_
assert dask.is_dask_collection(a.transform(X))
assert_eq_ar(a.transform(X), b.transform(X.compute()))
def test_inverse_transform(self):
a = dpp.RobustScaler()
result = a.inverse_transform(a.fit_transform(X))
assert dask.is_dask_collection(result)
assert_eq_ar(result, X)
def test_df_values(self):
est1 = dpp.RobustScaler()
est2 = dpp.RobustScaler()
result_ar = est1.fit_transform(X)
result_df = est2.fit_transform(df)
if hasattr(result_df, "values"):
result_df = result_df.values
assert_eq_ar(result_ar, result_df)
for attr in ["scale_", "center_"]:
assert_eq_ar(getattr(est1, attr), getattr(est2, attr))
assert_eq_ar(est1.transform(X), est2.transform(X))
assert_eq_ar(est1.transform(df).values, est2.transform(X))
assert_eq_ar(est1.transform(X), est2.transform(df).values)
# different data types
df["0"] = df["0"].astype("float32")
result_ar = est1.fit_transform(X)
result_df = est2.fit_transform(df)
if hasattr(result_df, "values"):
result_df = result_df.values
assert_eq_ar(result_ar, result_df)
class TestQuantileTransformer:
@pytest.mark.parametrize("output_distribution", ["uniform", "normal"])
def test_basic(self, output_distribution):
rs = da.random.RandomState(0)
a = dpp.QuantileTransformer(output_distribution=output_distribution)
b = spp.QuantileTransformer(output_distribution=output_distribution)
X = rs.uniform(size=(1000, 3), chunks=50)
a.fit(X)
b.fit(X)
assert_estimator_equal(a, b, atol=0.02)
# set the quantiles, so that from here out, we're exact
a.quantiles_ = b.quantiles_
assert_eq_ar(a.transform(X), b.transform(X), atol=1e-7)
assert_eq_ar(X, a.inverse_transform(a.transform(X)))
@pytest.mark.parametrize(
"type_, kwargs",
[
(np.array, {}),
(da.from_array, {"chunks": 100}),
(pd.DataFrame, {"columns": ["a", "b", "c"]}),
(dd.from_array, {"columns": ["a", "b", "c"]}),
],
)
def test_types(self, type_, kwargs):
X = np.random.uniform(size=(1000, 3))
dX = type_(X, **kwargs)
qt = spp.QuantileTransformer()
qt.fit(X)
dqt = dpp.QuantileTransformer()
dqt.fit(dX)
def test_fit_transform_frame(self):
df = pd.DataFrame(np.random.randn(1000, 3))
ddf = dd.from_pandas(df, 2)
a = spp.QuantileTransformer()
b = dpp.QuantileTransformer()
expected = a.fit_transform(df)
result = b.fit_transform(ddf)
assert_eq_ar(result, expected, rtol=1e-3, atol=1e-3)
class TestCategorizer:
def test_ce(self):
ce = dpp.Categorizer()
original = raw.copy()
trn = ce.fit_transform(raw)
assert is_categorical_dtype(trn["A"])
assert is_categorical_dtype(trn["B"])
assert is_categorical_dtype(trn["C"])
assert trn["D"].dtype == np.dtype("int64")
tm.assert_index_equal(ce.columns_, pd.Index(["A", "B", "C"]))
tm.assert_frame_equal(raw, original)
def test_given_categories(self):
cats = ["a", "b", "c", "d"]
ce = dpp.Categorizer(categories={"A": (cats, True)})
trn = ce.fit_transform(raw)
assert trn["A"].dtype == "category"
tm.assert_index_equal(trn["A"].cat.categories, pd.Index(cats))
assert all(trn["A"].cat.categories == cats)
assert trn["A"].cat.ordered
def test_dask(self):
a = dd.from_pandas(raw, npartitions=2)
ce = dpp.Categorizer()
trn = ce.fit_transform(a)
assert is_categorical_dtype(trn["A"])
assert | is_categorical_dtype(trn["B"]) | pandas.api.types.is_categorical_dtype |
#!/usr/bin/env python3.6
import pandas as pd
from collections import defaultdict, Counter
import argparse
import sys
import os
import subprocess
import re
import numpy as np
from datetime import datetime
from itertools import chain
from pyranges import PyRanges
from SV_modules import *
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', None)
pd.options.display.max_rows = 999
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def createGeneSyndromeDict(database_df):
dict = defaultdict(list)
for var, hpo in database_df.itertuples(index=False): # var can either be gene or syndrome
dict[var].append(hpo)
return(dict)
def createWeightDict(weights):
try:
w_df = pd.read_csv(weights, sep = ' ', names=["HPO_id", "weight"], comment = '#')
except OSError:
print("Count not open/read the input file:" + weights)
sys.exit()
weightDict = dict(zip(w_df.HPO_id, w_df.weight))
return(weightDict)
def getClinicalPhenome(args):
# Get the clinical phenome and store as a set
try:
clinical_phenome = set(open("./results/" + args.sampleid + "/" + args.sampleid + "_hpo_inexact.txt").read().splitlines())
except OSError:
print("Count not open/read the input file:" + "./results/" + args.sampleid + "/" + args.sampleid + "_hpo_inexact.txt")
sys.exit()
return(clinical_phenome)
def calculateGeneSumScore(args, hpo_gene_dict, weightDict, clinical_phenome, omim_gene):
# Go through genes in genelist found in the patients
try:
genes = open("./results/" + args.sampleid + "/" + args.sampleid + "_gene_list.txt", 'r')
except OSError:
print("Count not open/read the input file:" + "./results/" + args.sampleid + "/" + args.sampleid + "_gene_list.txt")
sys.exit()
with genes:
gene = genes.read().splitlines()
gene_sum_score = 0
gene_score_result = pd.DataFrame(columns=['gene', 'score'])
for query in gene:
#print(query)
hpo_pheno = set(hpo_gene_dict[query]) # To get the phenotypic features for a given gene
overlap = hpo_pheno.intersection(clinical_phenome) # overlap all the phenotypic features with the clinical phenomes
for term in overlap:
gene_sum_score += weightDict[term]
gene_score_result = gene_score_result.append({'gene':query, 'score':gene_sum_score}, ignore_index=True)
gene_score_result_r = gene_score_result.iloc[::-1]
gene_score_result_r = pd.concat([gene_score_result_r, omim_gene])
gene_score_result_r = normalizeRawScore(args, gene_score_result_r, 'gene')
return(gene_score_result_r)
def getParentsGeno(filtered_intervar, inheritance_mode, ov_allele):
# Create two new columns and initialize to 0
filtered_intervar[inheritance_mode] = 0
filtered_intervar = filtered_intervar.reset_index(drop=True)
for idx, row in enumerate(filtered_intervar.itertuples(index=False)):
if int(getattr(row, 'Start')) in set(ov_allele['Start']):
#parents_geno = ov_allele.loc[ov_allele['Start'] == getattr(row, 'Start'), 'geno'].head(1)
#print(parents_geno)
parents_geno = ov_allele.loc[ov_allele['Start']==getattr(row,'Start'),'geno'].head(1).item()
filtered_intervar.loc[idx, inheritance_mode] = parents_geno
return(filtered_intervar)
def rerankSmallVariant(df):
df['Clinvar_idx'] = df.Clinvar.str[9:-1]
df['InterVar_idx'] = df.InterVar_InterVarandEvidence.str[10:].str.split('PVS1').str[0]
df[['Clinvar_idx', 'InterVar_idx']] = df[['Clinvar_idx', 'InterVar_idx']].apply(lambda x:x.astype(str).str.lower())
df['Clinvar_score'], df['InterVar_score'] = 3, 3
# Calculate Clinvar score
df.loc[(df['Clinvar_idx'].str.contains('benign')), 'Clinvar_score'] = 1
df.loc[((df['Clinvar_idx'].str.contains('benign')) & (df['Clinvar_idx'].str.contains('likely'))), 'Clinvar_score'] = 2
df.loc[(df['Clinvar_idx'].str.contains('pathogenic')), 'Clinvar_score'] = 5
df.loc[((df['Clinvar_idx'].str.contains('pathogenic')) & (df['Clinvar_idx'].str.contains('likely'))), 'Clinvar_score'] = 4
df.loc[(df['Clinvar_idx'].str.contains('conflicting')), 'Clinvar_score'] = 3
# Calculate Intervar score
df.loc[(df['InterVar_idx'].str.contains('benign')), 'InterVar_score'] = 1
df.loc[((df['InterVar_idx'].str.contains('benign')) & (df['InterVar_idx'].str.contains('likely'))), 'InterVar_score'] = 2
df.loc[(df['InterVar_idx'].str.contains('pathogenic')), 'InterVar_score'] = 5
df.loc[((df['InterVar_idx'].str.contains('pathogenic')) & (df['InterVar_idx'].str.contains('likely'))), 'InterVar_score'] = 4
# Add them up
df['Patho_score'] = df['Clinvar_score'] + df['InterVar_score']
# Sort by the total patho_score
df = df.sort_values(by=['Patho_score', 'score'], ascending=False)
df = df.drop(['Clinvar_idx', 'InterVar_idx', 'Clinvar_score', 'InterVar_score', 'Patho_score'], axis=1)
return df
def smallVariantGeneOverlapCheckInheritance(args, smallVariantFile, interVarFinalFile, gene_score_result_r, famid):
# Overlap gene_score_result_r with small variants genes found in the proband
gene_score_result_r = gene_score_result_r[gene_score_result_r.gene.isin(smallVariantFile.gene)]
# Subset the intervar files further to store entries relevant to these set of genes
filtered_intervar = pd.merge(interVarFinalFile, gene_score_result_r, left_on='Ref_Gene', right_on='gene',how='inner')
# Remove common artifacts
try:
artifacts = pd.read_csv("./common_artifacts_20.txt", names = ["gene"])
filtered_intervar = filtered_intervar.loc[~filtered_intervar['Ref_Gene'].isin(artifacts['gene'])]
except OSError:
print("Could not open/read the input file: common_artifacts_20.txt")
sys.exit()
# If custom artifact bed file is provided, filter dataframe
if os.path.exists(args.artifact):
#print(filtered_intervar)
custom_artifact = pd.read_csv(args.artifact, sep='\t', usecols=[0, 2] ,names=["Chr", "End"])
keys = list(custom_artifact.columns.values)
i1 = filtered_intervar.set_index(keys).index
i2 = custom_artifact.set_index(keys).index
filtered_intervar = filtered_intervar.loc[~i1.isin(i2)]
# Create a bed file and write it out
pd.DataFrame(filtered_intervar).to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_smallVariant_candidates.txt', index=False, sep='\t',header=False) # Write out a subset of the variant first
filtered_intervar_bed = filtered_intervar[['Chr', 'Start', 'End']]
filtered_intervar_bed.loc[:,'Chr'] = 'chr' + filtered_intervar_bed.loc[:,'Chr'].astype(str)
filtered_intervar_bed.loc[:,'Start'] -= 1
pd.DataFrame(filtered_intervar_bed).to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_target.bed', index=False, sep='\t', header=False)
# Create two new columns and initialize to -1
# will later get overwritten to 0/1/2 if parents vcf files are provided
filtered_intervar['paternal'] = -1
filtered_intervar['maternal'] = -1
if args.type != 'singleton':
# Get overlapping variants from the parents so we know which variants are inherited
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Comparing small variants (SNPs/indels) inheritance')
cmd1 = "bcftools view -R ./results/" + args.sampleid + "/" + args.sampleid + "_target.bed " + args.fathervcf + " > ./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf"
cmd2 = "bcftools view -R ./results/" + args.sampleid + "/" + args.sampleid + "_target.bed " + args.mothervcf + " > ./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf"
if args.type == 'duo':
if args.father_duo:
cmds = [cmd1]
else:
cmds = [cmd2]
else:
cmds = [cmd1, cmd2]
for cmd in cmds:
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception(stderr)
# Go through every row in filtered_intervar and see if the same variant is found in either of the parents
# We will only compare allele start position (we always assume the alt allele is the same)
if args.type=='trio' or args.father_duo:
try:
paternal_ov_allele = pd.read_csv("./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf", sep='\t',usecols=[1,9], names=["Start", "geno"], comment='#')
paternal_ov_allele['geno'] = paternal_ov_allele['geno'].str[:1].astype(int) + paternal_ov_allele['geno'].str[2:3].astype(int)
filtered_intervar = getParentsGeno(filtered_intervar, 'paternal', paternal_ov_allele)
except OSError:
print("Could not open/read the input file: ./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf")
sys.exit()
if args.type=="trio" or args.mother_duo:
try:
maternal_ov_allele = pd.read_csv("./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf", sep='\t',usecols=[1,9], names=["Start", "geno"], comment='#')
maternal_ov_allele['geno'] = maternal_ov_allele['geno'].str[:1].astype(int) + maternal_ov_allele['geno'].str[2:3].astype(int)
filtered_intervar = getParentsGeno(filtered_intervar, 'maternal', maternal_ov_allele)
except OSError:
print("Could not open/read the input file: ./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf")
sys.exit()
# Rerank variants based on reported or predicted pathogeneicity
filtered_intervar = rerankSmallVariant(filtered_intervar)
if args.type=='trio':
# Divide the dataset into recessive, dominant, de novo, compound het
## Recessive
recessive = filtered_intervar[(filtered_intervar['paternal'] == 1) & (filtered_intervar['maternal'] == 1) & (filtered_intervar['Otherinfo'] == 'hom')]
## Dominant
dominant_inherited = filtered_intervar[((filtered_intervar['paternal'] == 1) & (filtered_intervar['maternal'] == 0)) | ((filtered_intervar['maternal'] == 1) & (filtered_intervar['paternal'] == 0))]
## De novo
denovo = filtered_intervar[(filtered_intervar['paternal'] == 0) & (filtered_intervar['maternal'] == 0)]
#Compound het
filtered_intervar_compoundhet = filtered_intervar[(filtered_intervar['Otherinfo'] == 'het')]
filtered_intervar_compoundhet = filtered_intervar_compoundhet[(filtered_intervar_compoundhet['maternal'] != 2) & (filtered_intervar_compoundhet['paternal'] != 2) & ((filtered_intervar_compoundhet['paternal'] == 1) & (filtered_intervar_compoundhet['maternal'] == 0)) | ((filtered_intervar_compoundhet['maternal'] == 1) & (filtered_intervar_compoundhet['paternal'] == 0)) | ((filtered_intervar_compoundhet['maternal'] == 0) & (filtered_intervar_compoundhet['paternal'] == 0))]
count = Counter(filtered_intervar_compoundhet['Ref_Gene'])
compoundhet_genes = [x for x, cnt in count.items() if cnt > 1]
compoundhet = filtered_intervar_compoundhet[filtered_intervar_compoundhet['Ref_Gene'].isin(compoundhet_genes)]
discard = []
for gene in compoundhet_genes:
df = compoundhet[compoundhet['Ref_Gene'].str.contains(gene)]
row_count = len(df.index)
col_list = ['paternal', 'maternal']
res = df[col_list].sum(axis=0)
if ((res[0] == 0) & (res[1] == row_count)) or (res[1] == 0 & (res[0] == row_count)):
discard.append(gene)
compoundhet = compoundhet[~compoundhet['Ref_Gene'].isin(discard)]
# Print all the variants according to inheritance mode
# Recessive
pd.DataFrame(recessive).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_recessive_candidates.txt', index=False, sep='\t', header=True)
# Dominant
pd.DataFrame(dominant_inherited).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_dominant_inherited_smallVariants_candidates.txt', index=False, sep='\t', header=True)
# De novo
pd.DataFrame(denovo).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_denovo_candidates.txt', index=False, sep='\t', header=True)
# Compound het
| pd.DataFrame(compoundhet) | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import datetime
from datetime import timedelta
import json
import os
import os.path
import pytz
import sys
from helpers import *
# global_dir = "/Volumes/dav/MD2K Processed Data/smoking-lvm-cleaned-data/"
global_dir = "../cleaned-data/"
python_version = int(sys.version[0])
def smoking_episode(participant_zip, participant_id):
# Inputs: zipfile, participant_id
# Output: add to csv (prints when done)
zip_namelist = participant_zip.namelist()
csv_marker = 'PUFFMARKER_SMOKING_EPISODE'
csv_matching = [s for s in zip_namelist if csv_marker in s]
csv_matching = [s for s in csv_matching if '.csv' in s]
if csv_matching == []:
print("No PUFFMARKER_SMOKING_EPISODE data for participant " + str(participant_id))
return
csv_file = participant_zip.open(csv_matching[0])
temp = csv_file.read()
if not temp or temp == 'BZh9\x17rE8P\x90\x00\x00\x00\x00':
print ('Empty file for smoking episode')
else:
csv_file = participant_zip.open(csv_matching[0])
newfile = pd.read_csv(csv_file, header=None)
df = pd.DataFrame(np.array(newfile).reshape(-1, 2),
columns=['timestamp', 'event'])
df['participant_id'] = participant_id
df['date'] = df['timestamp'].apply(unix_date)
df['hour'] = df['timestamp'].apply(hour_of_day)
df['minute'] = df['timestamp'].apply(minute_of_day)
df['day_of_week'] = df['timestamp'].apply(day_of_week)
save_dir = global_dir
save_filename = 'puff-episode-backup.csv'
if os.path.isfile(save_dir + save_filename):
append_write = 'a' # append if already exists
header_binary = False
else:
append_write = 'w' # make a new file if not
header_binary = True
temp_csv_file = open(save_dir+save_filename, append_write)
df.to_csv(temp_csv_file, header=header_binary, index=False, line_terminator = '\n')
temp_csv_file.close()
print('Added to episode file!')
return None
def puff_probability(participant_zip, participant_id):
# Inputs: zipfile, participant_id
# Output: add to csv (prints when done)
zip_namelist = participant_zip.namelist()
csv_marker = 'PUFF_PROBABILITY'
csv_matching = [s for s in zip_namelist if csv_marker in s]
csv_matching = [s for s in csv_matching if '.csv' in s]
if csv_matching == []:
print("No PUFF_PROBABILITY data for participant " + str(participant_id))
return
csv_file = participant_zip.open(csv_matching[0])
temp = csv_file.read()
if not temp or temp == 'BZh9\x17rE8P\x90\x00\x00\x00\x00':
print ('Empty file for smoking episode')
else:
csv_file = participant_zip.open(csv_matching[0])
newfile = | pd.read_csv(csv_file, header=None) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.